# HG changeset patch # User Doug Simon # Date 1371631556 -7200 # Node ID 836a62f43af94d54cfce1c37e712b57f6d5d1cc6 # Parent e0fb8a2136502c8ee67dc8c61e9a0f8f9abe5048# Parent 5d65c078cd0ac455aa5e58a09844c7acce54b487 Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/ diff -r e0fb8a213650 -r 836a62f43af9 .hgtags --- a/.hgtags Tue Jun 18 14:23:29 2013 -0700 +++ b/.hgtags Wed Jun 19 10:45:56 2013 +0200 @@ -333,3 +333,21 @@ 42fe530cd478744a4d12a0cbf803f0fc804bab1a jdk8-b85 09b0d3e9ba6cdf7da07d4010d2d1df14596f6864 hs25-b27 6d88a566d369f6a1f86912cad7d0912686b2fda1 hs25-b28 +86db4847f195c0ecceea646431f1ff22d56282e8 jdk8-b86 +d4c2667846607042370760e23f64c3ab9350e60d jdk8-b87 +01d5f04e64dc2d64625b2db2056f5ed4de918a45 hs25-b29 +c4af77d2045476c56fbf3f914b336bb1b7cd18af hs25-b30 +8482058e74bc8c1a890e6f3be3eff192dba6ce67 jdk8-b88 +4ec91349972255650f97bedfd07e6423e02428cf hs25-b31 +9c1fe0b419b40a9ecdd1653cc9af1b6d67a12c46 jdk8-b89 +69494caf57908ba2c8efa9eaaa472b4d1875588a hs25-b32 +1ae0472ff3a0117b5b019d380ad59fface2fde14 jdk8-b90 +b19517cecc2e91636d7c16ba2f35e3d3dc628099 hs25-b33 +7cbdf0e3725c0c56a2ff7540fc70b6d4b5890d04 jdk8-b91 +38da9f4f67096745f851318d792d6468aa1f6cf8 hs25-b34 +092018493d3bbeb1c24278fd8c40ff3d76e1fed7 jdk8-b92 +573d86d412cd9d3df7912194c1a540be50e9544e jdk8-b93 +b786c04b7be15194febe88dc1f0c9443e737a84b hs25-b35 +3c78a14da19d26d6937af5f98b97e2a21c653b04 hs25-b36 +1beed1f6f9edefe47ba8ed1355fbd3e7606b8288 jdk8-b94 +69689078dff8b21e6df30870464f5d736eebdf72 hs25-b37 diff -r e0fb8a213650 -r 836a62f43af9 agent/doc/c2replay.html --- a/agent/doc/c2replay.html Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,41 +0,0 @@ - - - -C2 Replay - - - - -

C2 compiler replay

-

-The C2 compiler replay is a function to repeat the compiling process from a crashed java process in compiled method
-This function only exists in debug version of VM -

-

Usage

-
 
-First, use SA to attach to the core file, if suceeded, do
-       clhsdb>dumpreplaydata 
| -a | [> replay.txt] - create file replay.txt, address is address of Method, or nmethod(CodeBlob) - clhsdb>buildreplayjars [all | boot | app] - create files: - all: - app.jar, boot.jar - boot: - boot.jar - app: - app.jar - exit SA now. -Second, use the obtained replay text file, replay.txt and jar files, app.jar and boot.jar, using debug version of java - java -Xbootclasspath/p:boot.jar -cp app.jar -XX:ReplayDataFile= -XX:+ReplayCompiles .... - This will replay the compiling process. - - With ReplayCompiles, the replay will recompile all the methods in app.jar, and in boot.jar to emulate the process in java app. - -notes: - 1) Most time, we don't need the boot.jar which is the classes loaded from JDK. It will be only modified when an agent(JVMDI) is running and modifies the classes. - 2) If encounter error as "" not found, that means the SA is using a VMStructs which is different from the one with corefile. In this case, SA has a utility tool vmstructsdump which is located at agent/src/os//proc/ - - Use this tool to dump VM type library: - vmstructsdump libjvm.so > .db - - set env SA_TYPEDB=.db (refer different shell for set envs) diff -r e0fb8a213650 -r 836a62f43af9 agent/doc/cireplay.html --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/agent/doc/cireplay.html Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,41 @@ + + + +Replay + + + + +

Compiler replay

+

+The compiler replay is a function to repeat the compiling process from a crashed java process in compiled method
+This function only exists in debug version of VM +

+

Usage

+
+First, use SA to attach to the core file, if succeeded, do
+       hsdb> dumpreplaydata <address> | -a | <thread_id> [> replay.txt]
+       create file replay.txt, address is address of Method, or nmethod(CodeBlob)
+       hsdb> buildreplayjars [all | boot | app]
+       create files:
+         all:
+           app.jar, boot.jar
+         boot:
+           boot.jar
+         app:
+           app.jar
+       exit SA now.
+Second, use the obtained replay text file, replay.txt and jar files, app.jar and boot.jar, using debug version of java
+       java -Xbootclasspath/p:boot.jar -cp app.jar -XX:ReplayDataFile=<datafile> -XX:+ReplayCompiles ....
+       This will replay the compiling process.
+
+       With ReplayCompiles, the replay will recompile all the methods in app.jar, and in boot.jar to emulate the process in java app.
+
+notes:
+       1) Most time, we don't need the boot.jar which is the classes loaded from JDK. It will be only modified when an agent(JVMDI) is running and modifies the classes.
+       2) If encounter error as "<flag>" not found, that means the SA is using a VMStructs which is different from the one with corefile. In this case, SA has a utility tool vmstructsdump which is located at agent/src/os/<os>/proc/<os_platform>
+
+       Use this tool to dump VM type library:
+       vmstructsdump libjvm.so > <type_name>.db
+
+       set env SA_TYPEDB=<type_name>.db (refer different shell for set envs)
diff -r e0fb8a213650 -r 836a62f43af9 agent/doc/clhsdb.html
--- a/agent/doc/clhsdb.html	Tue Jun 18 14:23:29 2013 -0700
+++ b/agent/doc/clhsdb.html	Wed Jun 19 10:45:56 2013 +0200
@@ -15,7 +15,7 @@
 

There is also JavaScript based SA command line interface called jsdb. But, CLHSDB supports Unix shell-like (or dbx/gdb-like) command line interface with -support for output redirection/appending (familiar >, >>), command history and so on. +support for output redirection/appending (familiar >, >>), command history and so on. Each CLHSDB command can have zero or more arguments and optionally end with output redirection (or append) to a file. Commands may be stored in a file and run using source command. help command prints usage message for all supported commands (or a specific command) @@ -49,7 +49,7 @@ dumpheap [ file ] dump heap in hprof binary format dumpideal -a | id dump ideal graph like debug flag -XX:+PrintIdeal dumpilt -a | id dump inline tree for C2 compilation - dumpreplaydata

| -a | [>replay.txt] dump replay data into a file + dumpreplaydata <address> | -a | <thread_id> [>replay.txt] dump replay data into a file echo [ true | false ] turn on/off command echo mode examine [ address/count ] | [ address,address] show contents of memory from given address field [ type [ name fieldtype isStatic offset address ] ] print info about a field of HotSpot type @@ -96,11 +96,11 @@

JavaScript integration

-

Few CLHSDB commands are already implemented in JavaScript. It is possible to extend CLHSDB command set +

Few CLHSDB commands are already implemented in JavaScript. It is possible to extend CLHSDB command set by implementing more commands in a JavaScript file and by loading it by jsload command. jseval command may be used to evaluate arbitrary JavaScript expression from a string. Any JavaScript function may be exposed as a CLHSDB command by registering it using JavaScript registerCommand -function. This function accepts command name, usage and name of the JavaScript implementation function +function. This function accepts command name, usage and name of the JavaScript implementation function as arguments.

@@ -127,11 +127,11 @@
-

C2 Compilation Replay

+

Compilation Replay

When a java process crashes in compiled method, usually a core file is saved. -The C2 replay function can reproduce the compiling process in the core. -c2replay.html +The replay function can reproduce the compiling process in the core. +cireplay.html diff -r e0fb8a213650 -r 836a62f43af9 agent/src/os/bsd/MacosxDebuggerLocal.m --- a/agent/src/os/bsd/MacosxDebuggerLocal.m Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/os/bsd/MacosxDebuggerLocal.m Wed Jun 19 10:45:56 2013 +0200 @@ -204,7 +204,7 @@ jstring objectName, jstring symbolName) { struct ps_prochandle* ph = get_proc_handle(env, this_obj); - if (ph->core != NULL) { + if (ph != NULL && ph->core != NULL) { return lookupByNameIncore(env, ph, this_obj, objectName, symbolName); } @@ -238,10 +238,13 @@ const char* sym = NULL; struct ps_prochandle* ph = get_proc_handle(env, this_obj); - sym = symbol_for_pc(ph, (uintptr_t) addr, &offset); - if (sym == NULL) return 0; - return (*env)->CallObjectMethod(env, this_obj, createClosestSymbol_ID, + if (ph != NULL && ph->core != NULL) { + sym = symbol_for_pc(ph, (uintptr_t) addr, &offset); + if (sym == NULL) return 0; + return (*env)->CallObjectMethod(env, this_obj, createClosestSymbol_ID, (*env)->NewStringUTF(env, sym), (jlong)offset); + } + return 0; } /** called from Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_readBytesFromProcess0 */ @@ -279,7 +282,7 @@ jbyteArray array; struct ps_prochandle* ph = get_proc_handle(env, this_obj); - if (ph->core != NULL) { + if (ph != NULL && ph->core != NULL) { return readBytesFromCore(env, ph, this_obj, addr, numBytes); } @@ -394,9 +397,9 @@ /* For core file only, called from * Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_getThreadIntegerRegisterSet0 */ -jlongArray getThreadIntegerRegisterSetFromCore(JNIEnv *env, jobject this_obj, long lwp_id) { +jlongArray getThreadIntegerRegisterSetFromCore(JNIEnv *env, jobject this_obj, long lwp_id, struct ps_prochandle* ph) { if (!_threads_filled) { - if (!fill_java_threads(env, this_obj, get_proc_handle(env, this_obj))) { + if (!fill_java_threads(env, this_obj, ph)) { throw_new_debugger_exception(env, "Failed to fill in threads"); return 0; } else { @@ -409,7 +412,6 @@ jlongArray array; jlong *regs; - struct ps_prochandle* ph = get_proc_handle(env, this_obj); if (get_lwp_regs(ph, lwp_id, &gregs) != true) { THROW_NEW_DEBUGGER_EXCEPTION_("get_thread_regs failed for a lwp", 0); } @@ -521,8 +523,8 @@ print_debug("getThreadRegisterSet0 called\n"); struct ps_prochandle* ph = get_proc_handle(env, this_obj); - if (ph->core != NULL) { - return getThreadIntegerRegisterSetFromCore(env, this_obj, thread_id); + if (ph != NULL && ph->core != NULL) { + return getThreadIntegerRegisterSetFromCore(env, this_obj, thread_id, ph); } kern_return_t result; @@ -705,8 +707,8 @@ task_t gTask = 0; result = task_for_pid(mach_task_self(), jpid, &gTask); if (result != KERN_SUCCESS) { - print_error("attach: task_for_pid(%d) failed (%d)\n", (int)jpid, result); - THROW_NEW_DEBUGGER_EXCEPTION("Can't attach to the process"); + print_error("attach: task_for_pid(%d) failed: '%s' (%d)\n", (int)jpid, mach_error_string(result), result); + THROW_NEW_DEBUGGER_EXCEPTION("Can't attach to the process. Could be caused by an incorrect pid or lack of privileges."); } putTask(env, this_obj, gTask); diff -r e0fb8a213650 -r 836a62f43af9 agent/src/os/bsd/ps_core.c --- a/agent/src/os/bsd/ps_core.c Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/os/bsd/ps_core.c Wed Jun 19 10:45:56 2013 +0200 @@ -199,10 +199,10 @@ //--------------------------------------------------------------- // Part of the class sharing workaround: // -// With class sharing, pages are mapped from classes[_g].jsa file. +// With class sharing, pages are mapped from classes.jsa file. // The read-only class sharing pages are mapped as MAP_SHARED, // PROT_READ pages. These pages are not dumped into core dump. -// With this workaround, these pages are read from classes[_g].jsa. +// With this workaround, these pages are read from classes.jsa. // FIXME: !HACK ALERT! // The format of sharing achive file header is needed to read shared heap @@ -298,14 +298,12 @@ lib_info* lib = ph->libs; while (lib != NULL) { // we are iterating over shared objects from the core dump. look for - // libjvm[_g].so. + // libjvm.so. const char *jvm_name = 0; #ifdef __APPLE__ - if ((jvm_name = strstr(lib->name, "/libjvm.dylib")) != 0 || - (jvm_name = strstr(lib->name, "/libjvm_g.dylib")) != 0) + if ((jvm_name = strstr(lib->name, "/libjvm.dylib")) != 0) #else - if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0 || - (jvm_name = strstr(lib->name, "/libjvm_g.so")) != 0) + if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0) #endif // __APPLE__ { char classes_jsa[PATH_MAX]; @@ -389,7 +387,7 @@ } ph->core->classes_jsa_fd = fd; - // add read-only maps from classes[_g].jsa to the list of maps + // add read-only maps from classes.jsa to the list of maps for (m = 0; m < NUM_SHARED_MAPS; m++) { if (header._space[m]._read_only) { base = (uintptr_t) header._space[m]._base; diff -r e0fb8a213650 -r 836a62f43af9 agent/src/os/linux/ps_core.c --- a/agent/src/os/linux/ps_core.c Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/os/linux/ps_core.c Wed Jun 19 10:45:56 2013 +0200 @@ -195,10 +195,10 @@ //--------------------------------------------------------------- // Part of the class sharing workaround: // -// With class sharing, pages are mapped from classes[_g].jsa file. +// With class sharing, pages are mapped from classes.jsa file. // The read-only class sharing pages are mapped as MAP_SHARED, // PROT_READ pages. These pages are not dumped into core dump. -// With this workaround, these pages are read from classes[_g].jsa. +// With this workaround, these pages are read from classes.jsa. // FIXME: !HACK ALERT! // The format of sharing achive file header is needed to read shared heap @@ -284,10 +284,9 @@ lib_info* lib = ph->libs; while (lib != NULL) { // we are iterating over shared objects from the core dump. look for - // libjvm[_g].so. + // libjvm.so. const char *jvm_name = 0; - if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0 || - (jvm_name = strstr(lib->name, "/libjvm_g.so")) != 0) { + if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0) { char classes_jsa[PATH_MAX]; struct FileMapHeader header; size_t n = 0; @@ -371,7 +370,7 @@ } ph->core->classes_jsa_fd = fd; - // add read-only maps from classes[_g].jsa to the list of maps + // add read-only maps from classes.jsa to the list of maps for (m = 0; m < NUM_SHARED_MAPS; m++) { if (header._space[m]._read_only) { base = (uintptr_t) header._space[m]._base; diff -r e0fb8a213650 -r 836a62f43af9 agent/src/os/solaris/proc/saproc.cpp --- a/agent/src/os/solaris/proc/saproc.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/os/solaris/proc/saproc.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -589,8 +589,7 @@ JNIEnv* env = dbg->env; jobject this_obj = dbg->this_obj; const char* jvm_name = 0; - if ((jvm_name = strstr(obj_name, "libjvm.so")) != NULL || - (jvm_name = strstr(obj_name, "libjvm_g.so")) != NULL) { + if ((jvm_name = strstr(obj_name, "libjvm.so")) != NULL) { jvm_name = obj_name; } else { return 0; @@ -598,7 +597,7 @@ struct ps_prochandle* ph = (struct ps_prochandle*) env->GetLongField(this_obj, p_ps_prochandle_ID); - // initialize classes[_g].jsa file descriptor field. + // initialize classes.jsa file descriptor field. dbg->env->SetIntField(this_obj, classes_jsa_fd_ID, -1); // check whether class sharing is on by reading variable "UseSharedSpaces" @@ -641,7 +640,7 @@ print_debug("looking for %s\n", classes_jsa); - // open the classes[_g].jsa + // open the classes.jsa int fd = libsaproc_open(classes_jsa, O_RDONLY); if (fd < 0) { char errMsg[ERR_MSG_SIZE]; @@ -651,7 +650,7 @@ print_debug("opened shared archive file %s\n", classes_jsa); } - // parse classes[_g].jsa + // parse classes.jsa struct FileMapHeader* pheader = (struct FileMapHeader*) malloc(sizeof(struct FileMapHeader)); if (pheader == NULL) { close(fd); @@ -798,8 +797,8 @@ if (! isProcess) { /* * With class sharing, shared perm. gen heap is allocated in with MAP_SHARED|PROT_READ. - * These pages are mapped from the file "classes[_g].jsa". MAP_SHARED pages are not dumped - * in Solaris core.To read shared heap pages, we have to read classes[_g].jsa file. + * These pages are mapped from the file "classes.jsa". MAP_SHARED pages are not dumped + * in Solaris core.To read shared heap pages, we have to read classes.jsa file. */ Pobject_iter(ph, init_classsharing_workaround, &dbg); exception = env->ExceptionOccurred(); diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java --- a/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java Wed Jun 19 10:45:56 2013 +0200 @@ -24,20 +24,29 @@ package sun.jvm.hotspot; -import java.io.PrintStream; -import java.net.*; -import java.rmi.*; -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.bsd.*; -import sun.jvm.hotspot.debugger.proc.*; -import sun.jvm.hotspot.debugger.remote.*; -import sun.jvm.hotspot.debugger.windbg.*; -import sun.jvm.hotspot.debugger.linux.*; -import sun.jvm.hotspot.memory.*; -import sun.jvm.hotspot.oops.*; -import sun.jvm.hotspot.runtime.*; -import sun.jvm.hotspot.types.*; -import sun.jvm.hotspot.utilities.*; +import java.rmi.RemoteException; + +import sun.jvm.hotspot.debugger.Debugger; +import sun.jvm.hotspot.debugger.DebuggerException; +import sun.jvm.hotspot.debugger.JVMDebugger; +import sun.jvm.hotspot.debugger.MachineDescription; +import sun.jvm.hotspot.debugger.MachineDescriptionAMD64; +import sun.jvm.hotspot.debugger.MachineDescriptionIA64; +import sun.jvm.hotspot.debugger.MachineDescriptionIntelX86; +import sun.jvm.hotspot.debugger.MachineDescriptionSPARC32Bit; +import sun.jvm.hotspot.debugger.MachineDescriptionSPARC64Bit; +import sun.jvm.hotspot.debugger.NoSuchSymbolException; +import sun.jvm.hotspot.debugger.bsd.BsdDebuggerLocal; +import sun.jvm.hotspot.debugger.linux.LinuxDebuggerLocal; +import sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal; +import sun.jvm.hotspot.debugger.remote.RemoteDebugger; +import sun.jvm.hotspot.debugger.remote.RemoteDebuggerClient; +import sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer; +import sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal; +import sun.jvm.hotspot.runtime.VM; +import sun.jvm.hotspot.types.TypeDataBase; +import sun.jvm.hotspot.utilities.PlatformInfo; +import sun.jvm.hotspot.utilities.UnsupportedPlatformException; /**

This class wraps much of the basic functionality and is the * highest-level factory for VM data structures. It makes it simple @@ -475,7 +484,7 @@ } private void setupJVMLibNamesSolaris() { - jvmLibNames = new String[] { "libjvm.so", "libjvm_g.so", "gamma_g" }; + jvmLibNames = new String[] { "libjvm.so" }; } // @@ -507,7 +516,7 @@ } private void setupJVMLibNamesWin32() { - jvmLibNames = new String[] { "jvm.dll", "jvm_g.dll" }; + jvmLibNames = new String[] { "jvm.dll" }; } // @@ -547,7 +556,7 @@ } private void setupJVMLibNamesLinux() { - jvmLibNames = new String[] { "libjvm.so", "libjvm_g.so" }; + jvmLibNames = new String[] { "libjvm.so" }; } // @@ -572,7 +581,7 @@ } private void setupJVMLibNamesBsd() { - jvmLibNames = new String[] { "libjvm.so", "libjvm_g.so" }; + jvmLibNames = new String[] { "libjvm.so" }; } // @@ -595,7 +604,7 @@ } private void setupJVMLibNamesDarwin() { - jvmLibNames = new String[] { "libjvm.dylib", "libjvm_g.dylib" }; + jvmLibNames = new String[] { "libjvm.dylib" }; } /** Convenience routine which should be called by per-platform diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/LinuxVtblAccess.java --- a/agent/src/share/classes/sun/jvm/hotspot/LinuxVtblAccess.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/LinuxVtblAccess.java Wed Jun 19 10:45:56 2013 +0200 @@ -24,9 +24,9 @@ package sun.jvm.hotspot; -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.types.*; -import sun.jvm.hotspot.types.basic.*; +import sun.jvm.hotspot.debugger.SymbolLookup; +import sun.jvm.hotspot.types.Type; +import sun.jvm.hotspot.types.basic.BasicVtblAccess; public class LinuxVtblAccess extends BasicVtblAccess { private String vt; @@ -35,8 +35,7 @@ String[] dllNames) { super(symbolLookup, dllNames); - if (symbolLookup.lookup("libjvm.so", "__vt_10JavaThread") != null || - symbolLookup.lookup("libjvm_g.so", "__vt_10JavaThread") != null) { + if (symbolLookup.lookup("libjvm.so", "__vt_10JavaThread") != null) { // old C++ ABI vt = "__vt_"; } else { diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/ci/ciEnv.java --- a/agent/src/share/classes/sun/jvm/hotspot/ci/ciEnv.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciEnv.java Wed Jun 19 10:45:56 2013 +0200 @@ -93,10 +93,11 @@ CompileTask task = task(); Method method = task.method(); int entryBci = task.osrBci(); + int compLevel = task.compLevel(); Klass holder = method.getMethodHolder(); out.println("compile " + holder.getName().asString() + " " + OopUtilities.escapeString(method.getName().asString()) + " " + method.getSignature().asString() + " " + - entryBci); + entryBci + " " + compLevel); } } diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/ci/ciMethod.java --- a/agent/src/share/classes/sun/jvm/hotspot/ci/ciMethod.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciMethod.java Wed Jun 19 10:45:56 2013 +0200 @@ -97,8 +97,8 @@ holder.getName().asString() + " " + OopUtilities.escapeString(method.getName().asString()) + " " + method.getSignature().asString() + " " + - method.getInvocationCounter() + " " + - method.getBackedgeCounter() + " " + + method.getInvocationCount() + " " + + method.getBackedgeCount() + " " + interpreterInvocationCount() + " " + interpreterThrowoutCount() + " " + instructionsSize()); diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java --- a/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java Wed Jun 19 10:45:56 2013 +0200 @@ -78,6 +78,8 @@ current sweep traversal index. */ private static CIntegerField stackTraversalMarkField; + private static CIntegerField compLevelField; + static { VM.registerVMInitializedObserver(new Observer() { public void update(Observable o, Object data) { @@ -113,7 +115,7 @@ osrEntryPointField = type.getAddressField("_osr_entry_point"); lockCountField = type.getJIntField("_lock_count"); stackTraversalMarkField = type.getCIntegerField("_stack_traversal_mark"); - + compLevelField = type.getCIntegerField("_comp_level"); pcDescSize = db.lookupType("PcDesc").getSize(); } @@ -530,7 +532,7 @@ out.println("compile " + holder.getName().asString() + " " + OopUtilities.escapeString(method.getName().asString()) + " " + method.getSignature().asString() + " " + - getEntryBCI()); + getEntryBCI() + " " + getCompLevel()); } @@ -551,4 +553,5 @@ private int getHandlerTableOffset() { return (int) handlerTableOffsetField.getValue(addr); } private int getNulChkTableOffset() { return (int) nulChkTableOffsetField .getValue(addr); } private int getNMethodEndOffset() { return (int) nmethodEndOffsetField .getValue(addr); } + private int getCompLevel() { return (int) compLevelField .getValue(addr); } } diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/compiler/CompileTask.java --- a/agent/src/share/classes/sun/jvm/hotspot/compiler/CompileTask.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/compiler/CompileTask.java Wed Jun 19 10:45:56 2013 +0200 @@ -46,10 +46,12 @@ Type type = db.lookupType("CompileTask"); methodField = type.getAddressField("_method"); osrBciField = new CIntField(type.getCIntegerField("_osr_bci"), 0); + compLevelField = new CIntField(type.getCIntegerField("_comp_level"), 0); } private static AddressField methodField; private static CIntField osrBciField; + private static CIntField compLevelField; public CompileTask(Address addr) { super(addr); @@ -63,4 +65,8 @@ public int osrBci() { return (int)osrBciField.getValue(getAddress()); } + + public int compLevel() { + return (int)compLevelField.getValue(getAddress()); + } } diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebuggerLocal.java --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebuggerLocal.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebuggerLocal.java Wed Jun 19 10:45:56 2013 +0200 @@ -24,17 +24,28 @@ package sun.jvm.hotspot.debugger.bsd; -import java.io.*; -import java.net.*; -import java.util.*; -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.x86.*; -import sun.jvm.hotspot.debugger.cdbg.*; -import sun.jvm.hotspot.utilities.*; +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import sun.jvm.hotspot.debugger.Address; +import sun.jvm.hotspot.debugger.DebuggerBase; +import sun.jvm.hotspot.debugger.DebuggerException; +import sun.jvm.hotspot.debugger.DebuggerUtilities; +import sun.jvm.hotspot.debugger.MachineDescription; +import sun.jvm.hotspot.debugger.NotInHeapException; +import sun.jvm.hotspot.debugger.OopHandle; +import sun.jvm.hotspot.debugger.ReadResult; +import sun.jvm.hotspot.debugger.ThreadProxy; +import sun.jvm.hotspot.debugger.UnalignedAddressException; +import sun.jvm.hotspot.debugger.UnmappedAddressException; +import sun.jvm.hotspot.debugger.cdbg.CDebugger; +import sun.jvm.hotspot.debugger.cdbg.ClosestSymbol; +import sun.jvm.hotspot.debugger.cdbg.LoadObject; +import sun.jvm.hotspot.runtime.JavaThread; +import sun.jvm.hotspot.runtime.Threads; import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.Threads; -import sun.jvm.hotspot.runtime.JavaThread; -import java.lang.reflect.*; +import sun.jvm.hotspot.utilities.PlatformInfo; /**

An implementation of the JVMDebugger interface. The basic debug facilities are implemented through ptrace interface in the JNI code @@ -246,10 +257,8 @@ /* called from attach methods */ private void findABIVersion() throws DebuggerException { String libjvmName = isDarwin ? "libjvm.dylib" : "libjvm.so"; - String libjvm_gName = isDarwin? "libjvm_g.dylib" : "libjvm_g.so"; String javaThreadVt = isDarwin ? "_vt_10JavaThread" : "__vt_10JavaThread"; - if (lookupByName0(libjvmName, javaThreadVt) != 0 || - lookupByName0(libjvm_gName, javaThreadVt) != 0) { + if (lookupByName0(libjvmName, javaThreadVt) != 0) { // old C++ ABI useGCC32ABI = false; } else { diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/debugger/cdbg/basic/amd64/AMD64CFrame.java --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/cdbg/basic/amd64/AMD64CFrame.java Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.cdbg.basic.amd64; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.amd64.*; -import sun.jvm.hotspot.debugger.cdbg.*; -import sun.jvm.hotspot.debugger.cdbg.basic.*; - -/** Basic AMD64 frame functionality providing sender() functionality. */ - -public class AMD64CFrame extends BasicCFrame { - private Address rbp; - private Address pc; - - private static final int ADDRESS_SIZE = 8; - - /** Constructor for topmost frame */ - public AMD64CFrame(CDebugger dbg, Address rbp, Address pc) { - super(dbg); - this.rbp = rbp; - this.pc = pc; - } - - public CFrame sender(ThreadProxy thread) { - AMD64ThreadContext context = (AMD64ThreadContext) thread.getContext(); - Address rsp = context.getRegisterAsAddress(AMD64ThreadContext.RSP); - - if ( (rbp == null) || rbp.lessThan(rsp) ) { - return null; - } - - Address nextRBP = rbp.getAddressAt( 0 * ADDRESS_SIZE); - if (nextRBP == null) { - return null; - } - Address nextPC = rbp.getAddressAt( 1 * ADDRESS_SIZE); - if (nextPC == null) { - return null; - } - return new AMD64CFrame(dbg(), nextRBP, nextPC); - } - - public Address pc() { - return pc; - } - - public Address localVariableBase() { - return rbp; - } -} diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/debugger/cdbg/basic/x86/X86CFrame.java --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/cdbg/basic/x86/X86CFrame.java Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.cdbg.basic.x86; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.x86.*; -import sun.jvm.hotspot.debugger.cdbg.*; -import sun.jvm.hotspot.debugger.cdbg.basic.*; - -/** Basic X86 frame functionality providing sender() functionality. */ - -public class X86CFrame extends BasicCFrame { - private Address ebp; - private Address pc; - - private static final int ADDRESS_SIZE = 4; - - /** Constructor for topmost frame */ - public X86CFrame(CDebugger dbg, Address ebp, Address pc) { - super(dbg); - this.ebp = ebp; - this.pc = pc; - } - - public CFrame sender(ThreadProxy thread) { - X86ThreadContext context = (X86ThreadContext) thread.getContext(); - Address esp = context.getRegisterAsAddress(X86ThreadContext.ESP); - - if ( (ebp == null) || ebp.lessThan(esp) ) { - return null; - } - - Address nextEBP = ebp.getAddressAt( 0 * ADDRESS_SIZE); - if (nextEBP == null) { - return null; - } - Address nextPC = ebp.getAddressAt( 1 * ADDRESS_SIZE); - if (nextPC == null) { - return null; - } - return new X86CFrame(dbg(), nextEBP, nextPC); - } - - public Address pc() { - return pc; - } - - public Address localVariableBase() { - return ebp; - } -} diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java Wed Jun 19 10:45:56 2013 +0200 @@ -24,14 +24,25 @@ package sun.jvm.hotspot.debugger.linux; -import java.io.*; -import java.net.*; -import java.util.*; -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.x86.*; -import sun.jvm.hotspot.debugger.cdbg.*; -import sun.jvm.hotspot.utilities.*; -import java.lang.reflect.*; +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import sun.jvm.hotspot.debugger.Address; +import sun.jvm.hotspot.debugger.DebuggerBase; +import sun.jvm.hotspot.debugger.DebuggerException; +import sun.jvm.hotspot.debugger.DebuggerUtilities; +import sun.jvm.hotspot.debugger.MachineDescription; +import sun.jvm.hotspot.debugger.NotInHeapException; +import sun.jvm.hotspot.debugger.OopHandle; +import sun.jvm.hotspot.debugger.ReadResult; +import sun.jvm.hotspot.debugger.ThreadProxy; +import sun.jvm.hotspot.debugger.UnalignedAddressException; +import sun.jvm.hotspot.debugger.UnmappedAddressException; +import sun.jvm.hotspot.debugger.cdbg.CDebugger; +import sun.jvm.hotspot.debugger.cdbg.ClosestSymbol; +import sun.jvm.hotspot.debugger.cdbg.LoadObject; +import sun.jvm.hotspot.utilities.PlatformInfo; /**

An implementation of the JVMDebugger interface. The basic debug facilities are implemented through ptrace interface in the JNI code @@ -238,8 +249,7 @@ /* called from attach methods */ private void findABIVersion() throws DebuggerException { - if (lookupByName0("libjvm.so", "__vt_10JavaThread") != 0 || - lookupByName0("libjvm_g.so", "__vt_10JavaThread") != 0) { + if (lookupByName0("libjvm.so", "__vt_10JavaThread") != 0) { // old C++ ABI useGCC32ABI = false; } else { diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgCDebugger.java --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgCDebugger.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgCDebugger.java Wed Jun 19 10:45:56 2013 +0200 @@ -28,10 +28,10 @@ import java.util.*; import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.cdbg.*; -import sun.jvm.hotspot.debugger.cdbg.basic.x86.*; -import sun.jvm.hotspot.debugger.cdbg.basic.amd64.*; import sun.jvm.hotspot.debugger.x86.*; import sun.jvm.hotspot.debugger.amd64.*; +import sun.jvm.hotspot.debugger.windows.x86.*; +import sun.jvm.hotspot.debugger.windows.amd64.*; import sun.jvm.hotspot.utilities.AddressOps; class WindbgCDebugger implements CDebugger { @@ -75,14 +75,14 @@ if (ebp == null) return null; Address pc = context.getRegisterAsAddress(X86ThreadContext.EIP); if (pc == null) return null; - return new X86CFrame(this, ebp, pc); + return new WindowsX86CFrame(dbg, ebp, pc); } else if (dbg.getCPU().equals("amd64")) { AMD64ThreadContext context = (AMD64ThreadContext) thread.getContext(); Address rbp = context.getRegisterAsAddress(AMD64ThreadContext.RBP); if (rbp == null) return null; Address pc = context.getRegisterAsAddress(AMD64ThreadContext.RIP); if (pc == null) return null; - return new AMD64CFrame(this, rbp, pc); + return new WindowsAMD64CFrame(dbg, rbp, pc); } else { // unsupported CPU! return null; diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/debugger/windows/amd64/WindowsAMD64CFrame.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/windows/amd64/WindowsAMD64CFrame.java Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.debugger.windows.amd64; + +import sun.jvm.hotspot.debugger.*; +import sun.jvm.hotspot.debugger.amd64.*; +import sun.jvm.hotspot.debugger.cdbg.*; +import sun.jvm.hotspot.debugger.cdbg.basic.*; +import sun.jvm.hotspot.debugger.windbg.*; + +public class WindowsAMD64CFrame extends BasicCFrame { + private Address rbp; + private Address pc; + + private static final int ADDRESS_SIZE = 8; + + /** Constructor for topmost frame */ + public WindowsAMD64CFrame(WindbgDebugger dbg, Address rbp, Address pc) { + super(dbg.getCDebugger()); + this.rbp = rbp; + this.pc = pc; + this.dbg = dbg; + } + + public CFrame sender(ThreadProxy thread) { + AMD64ThreadContext context = (AMD64ThreadContext) thread.getContext(); + Address rsp = context.getRegisterAsAddress(AMD64ThreadContext.RSP); + + if ( (rbp == null) || rbp.lessThan(rsp) ) { + return null; + } + + // Check alignment of rbp + if ( dbg.getAddressValue(rbp) % ADDRESS_SIZE != 0) { + return null; + } + + Address nextRBP = rbp.getAddressAt( 0 * ADDRESS_SIZE); + if (nextRBP == null || nextRBP.lessThanOrEqual(rbp)) { + return null; + } + Address nextPC = rbp.getAddressAt( 1 * ADDRESS_SIZE); + if (nextPC == null) { + return null; + } + return new WindowsAMD64CFrame(dbg, nextRBP, nextPC); + } + + public Address pc() { + return pc; + } + + public Address localVariableBase() { + return rbp; + } + + private WindbgDebugger dbg; +} diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/debugger/windows/x86/WindowsX86CFrame.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/windows/x86/WindowsX86CFrame.java Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.debugger.windows.x86; + +import sun.jvm.hotspot.debugger.*; +import sun.jvm.hotspot.debugger.x86.*; +import sun.jvm.hotspot.debugger.cdbg.*; +import sun.jvm.hotspot.debugger.cdbg.basic.*; +import sun.jvm.hotspot.debugger.windbg.*; + +public class WindowsX86CFrame extends BasicCFrame { + private Address ebp; + private Address pc; + + private static final int ADDRESS_SIZE = 4; + + /** Constructor for topmost frame */ + public WindowsX86CFrame(WindbgDebugger dbg, Address ebp, Address pc) { + super(dbg.getCDebugger()); + this.ebp = ebp; + this.pc = pc; + this.dbg = dbg; + } + + public CFrame sender(ThreadProxy thread) { + X86ThreadContext context = (X86ThreadContext) thread.getContext(); + Address esp = context.getRegisterAsAddress(X86ThreadContext.ESP); + + if ( (ebp == null) || ebp.lessThan(esp) ) { + return null; + } + + // Check alignment of ebp + if ( dbg.getAddressValue(ebp) % ADDRESS_SIZE != 0) { + return null; + } + + Address nextEBP = ebp.getAddressAt( 0 * ADDRESS_SIZE); + if (nextEBP == null || nextEBP.lessThanOrEqual(ebp)) { + return null; + } + Address nextPC = ebp.getAddressAt( 1 * ADDRESS_SIZE); + if (nextPC == null) { + return null; + } + return new WindowsX86CFrame(dbg, nextEBP, nextPC); + } + + public Address pc() { + return pc; + } + + public Address localVariableBase() { + return ebp; + } + + private WindbgDebugger dbg; +} diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/memory/DictionaryEntry.java --- a/agent/src/share/classes/sun/jvm/hotspot/memory/DictionaryEntry.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/DictionaryEntry.java Wed Jun 19 10:45:56 2013 +0200 @@ -96,9 +96,10 @@ public boolean containsProtectionDomain(Oop protectionDomain) { InstanceKlass ik = (InstanceKlass) klass(); - if (protectionDomain.equals(ik.getProtectionDomain())) { - return true; // Succeeds trivially - } + // Currently unimplemented and not used. + // if (protectionDomain.equals(ik.getJavaMirror().getProtectionDomain())) { + // return true; // Succeeds trivially + // } for (ProtectionDomainEntry current = pdSet(); current != null; current = current.next()) { if (protectionDomain.equals(current.protectionDomain())) { diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java --- a/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Wed Jun 19 10:45:56 2013 +0200 @@ -75,8 +75,6 @@ javaFieldsCount = new CIntField(type.getCIntegerField("_java_fields_count"), 0); constants = new MetadataField(type.getAddressField("_constants"), 0); classLoaderData = type.getAddressField("_class_loader_data"); - protectionDomain = new OopField(type.getOopField("_protection_domain"), 0); - signers = new OopField(type.getOopField("_signers"), 0); sourceFileName = type.getAddressField("_source_file_name"); sourceDebugExtension = type.getAddressField("_source_debug_extension"); innerClasses = type.getAddressField("_inner_classes"); @@ -136,8 +134,6 @@ private static CIntField javaFieldsCount; private static MetadataField constants; private static AddressField classLoaderData; - private static OopField protectionDomain; - private static OopField signers; private static AddressField sourceFileName; private static AddressField sourceDebugExtension; private static AddressField innerClasses; @@ -350,8 +346,6 @@ public ConstantPool getConstants() { return (ConstantPool) constants.getValue(this); } public ClassLoaderData getClassLoaderData() { return ClassLoaderData.instantiateWrapperFor(classLoaderData.getValue(getAddress())); } public Oop getClassLoader() { return getClassLoaderData().getClassLoader(); } - public Oop getProtectionDomain() { return protectionDomain.getValue(this); } - public ObjArray getSigners() { return (ObjArray) signers.getValue(this); } public Symbol getSourceFileName() { return getSymbol(sourceFileName); } public String getSourceDebugExtension(){ return CStringUtilities.getString(sourceDebugExtension.getValue(getAddress())); } public long getNonstaticFieldSize() { return nonstaticFieldSize.getValue(this); } @@ -541,8 +535,6 @@ // visitor.doOop(methods, true); // visitor.doOop(localInterfaces, true); // visitor.doOop(transitiveInterfaces, true); - visitor.doOop(protectionDomain, true); - visitor.doOop(signers, true); visitor.doCInt(nonstaticFieldSize, true); visitor.doCInt(staticFieldSize, true); visitor.doCInt(staticOopFieldCount, true); diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/oops/Method.java --- a/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,15 +24,21 @@ package sun.jvm.hotspot.oops; -import java.io.*; -import java.util.*; -import sun.jvm.hotspot.code.*; -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.interpreter.*; -import sun.jvm.hotspot.memory.*; -import sun.jvm.hotspot.runtime.*; -import sun.jvm.hotspot.types.*; -import sun.jvm.hotspot.utilities.*; +import java.io.PrintStream; +import java.util.Observable; +import java.util.Observer; + +import sun.jvm.hotspot.code.NMethod; +import sun.jvm.hotspot.debugger.Address; +import sun.jvm.hotspot.interpreter.OopMapCacheEntry; +import sun.jvm.hotspot.runtime.SignatureConverter; +import sun.jvm.hotspot.runtime.VM; +import sun.jvm.hotspot.runtime.VMObjectFactory; +import sun.jvm.hotspot.types.AddressField; +import sun.jvm.hotspot.types.Type; +import sun.jvm.hotspot.types.TypeDataBase; +import sun.jvm.hotspot.types.WrongTypeException; +import sun.jvm.hotspot.utilities.Assert; // A Method represents a Java method @@ -49,19 +55,13 @@ Type type = db.lookupType("Method"); constMethod = type.getAddressField("_constMethod"); methodData = type.getAddressField("_method_data"); + methodCounters = type.getAddressField("_method_counters"); methodSize = new CIntField(type.getCIntegerField("_method_size"), 0); accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0); code = type.getAddressField("_code"); vtableIndex = new CIntField(type.getCIntegerField("_vtable_index"), 0); - if (!VM.getVM().isCore()) { - invocationCounter = new CIntField(type.getCIntegerField("_invocation_counter"), 0); - backedgeCounter = new CIntField(type.getCIntegerField("_backedge_counter"), 0); - } bytecodeOffset = type.getSize(); - interpreterThrowoutCountField = new CIntField(type.getCIntegerField("_interpreter_throwout_count"), 0); - interpreterInvocationCountField = new CIntField(type.getCIntegerField("_interpreter_invocation_count"), 0); - /* interpreterEntry = type.getAddressField("_interpreter_entry"); fromCompiledCodeEntryPoint = type.getAddressField("_from_compiled_code_entry_point"); @@ -80,18 +80,14 @@ // Fields private static AddressField constMethod; private static AddressField methodData; + private static AddressField methodCounters; private static CIntField methodSize; private static CIntField accessFlags; private static CIntField vtableIndex; - private static CIntField invocationCounter; - private static CIntField backedgeCounter; private static long bytecodeOffset; private static AddressField code; - private static CIntField interpreterThrowoutCountField; - private static CIntField interpreterInvocationCountField; - // constant method names - , // Initialized lazily to avoid initialization ordering dependencies between Method and SymbolTable private static Symbol objectInitializerName; @@ -127,6 +123,10 @@ Address addr = methodData.getValue(getAddress()); return (MethodData) VMObjectFactory.newObject(MethodData.class, addr); } + public MethodCounters getMethodCounters() { + Address addr = methodCounters.getValue(getAddress()); + return (MethodCounters) VMObjectFactory.newObject(MethodCounters.class, addr); + } /** WARNING: this is in words, not useful in this system; use getObjectSize() instead */ public long getMethodSize() { return methodSize.getValue(this); } public long getMaxStack() { return getConstMethod().getMaxStack(); } @@ -138,17 +138,13 @@ public long getAccessFlags() { return accessFlags.getValue(this); } public long getCodeSize() { return getConstMethod().getCodeSize(); } public long getVtableIndex() { return vtableIndex.getValue(this); } - public long getInvocationCounter() { - if (Assert.ASSERTS_ENABLED) { - Assert.that(!VM.getVM().isCore(), "must not be used in core build"); - } - return invocationCounter.getValue(this); + public long getInvocationCount() { + MethodCounters mc = getMethodCounters(); + return mc == null ? 0 : mc.getInvocationCounter(); } - public long getBackedgeCounter() { - if (Assert.ASSERTS_ENABLED) { - Assert.that(!VM.getVM().isCore(), "must not be used in core build"); - } - return backedgeCounter.getValue(this); + public long getBackedgeCount() { + MethodCounters mc = getMethodCounters(); + return mc == null ? 0 : mc.getBackedgeCounter(); } // get associated compiled native method, if available, else return null. @@ -361,18 +357,18 @@ holder.getName().asString() + " " + OopUtilities.escapeString(getName().asString()) + " " + getSignature().asString() + " " + - getInvocationCounter() + " " + - getBackedgeCounter() + " " + + getInvocationCount() + " " + + getBackedgeCount() + " " + interpreterInvocationCount() + " " + interpreterThrowoutCount() + " " + code_size); } public int interpreterThrowoutCount() { - return (int) interpreterThrowoutCountField.getValue(this); + return getMethodCounters().interpreterThrowoutCount(); } public int interpreterInvocationCount() { - return (int) interpreterInvocationCountField.getValue(this); + return getMethodCounters().interpreterInvocationCount(); } } diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/oops/MethodCounters.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/MethodCounters.java Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2013, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.oops; + +import java.io.*; +import java.util.*; +import sun.jvm.hotspot.debugger.*; +import sun.jvm.hotspot.runtime.*; +import sun.jvm.hotspot.types.*; +import sun.jvm.hotspot.utilities.*; + +public class MethodCounters extends Metadata { + public MethodCounters(Address addr) { + super(addr); + } + + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + private static synchronized void initialize(TypeDataBase db) throws WrongTypeException { + Type type = db.lookupType("MethodCounters"); + + interpreterInvocationCountField = new CIntField(type.getCIntegerField("_interpreter_invocation_count"), 0); + interpreterThrowoutCountField = new CIntField(type.getCIntegerField("_interpreter_throwout_count"), 0); + if (!VM.getVM().isCore()) { + invocationCounter = new CIntField(type.getCIntegerField("_invocation_counter"), 0); + backedgeCounter = new CIntField(type.getCIntegerField("_backedge_counter"), 0); + } + } + + private static CIntField interpreterInvocationCountField; + private static CIntField interpreterThrowoutCountField; + private static CIntField invocationCounter; + private static CIntField backedgeCounter; + + public int interpreterInvocationCount() { + return (int) interpreterInvocationCountField.getValue(this); + } + + public int interpreterThrowoutCount() { + return (int) interpreterThrowoutCountField.getValue(this); + } + public long getInvocationCounter() { + if (Assert.ASSERTS_ENABLED) { + Assert.that(!VM.getVM().isCore(), "must not be used in core build"); + } + return invocationCounter.getValue(this); + } + public long getBackedgeCounter() { + if (Assert.ASSERTS_ENABLED) { + Assert.that(!VM.getVM().isCore(), "must not be used in core build"); + } + return backedgeCounter.getValue(this); + } + + public void printValueOn(PrintStream tty) { + } +} + diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/oops/MethodData.java --- a/agent/src/share/classes/sun/jvm/hotspot/oops/MethodData.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/MethodData.java Wed Jun 19 10:45:56 2013 +0200 @@ -316,8 +316,8 @@ int iic = method.interpreterInvocationCount(); if (mileage < iic) mileage = iic; - long ic = method.getInvocationCounter(); - long bc = method.getBackedgeCounter(); + long ic = method.getInvocationCount(); + long bc = method.getBackedgeCount(); long icval = ic >> 3; if ((ic & 4) != 0) icval += CompileThreshold; diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java Wed Jun 19 10:45:56 2013 +0200 @@ -117,8 +117,6 @@ mode = MODE_HEAP_SUMMARY; } else if (modeFlag.equals("-histo")) { mode = MODE_HISTOGRAM; - } else if (modeFlag.equals("-permstat")) { - mode = MODE_CLSTATS; } else if (modeFlag.equals("-clstats")) { mode = MODE_CLSTATS; } else if (modeFlag.equals("-finalizerinfo")) { diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/utilities/HeapGXLWriter.java --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapGXLWriter.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapGXLWriter.java Wed Jun 19 10:45:56 2013 +0200 @@ -204,13 +204,13 @@ Oop loader = ik.getClassLoader(); writeEdge(instance, loader, "loaded-by"); - // write signers - Oop signers = ik.getSigners(); - writeEdge(instance, signers, "signed-by"); + // write signers NYI + // Oop signers = ik.getJavaMirror().getSigners(); + writeEdge(instance, null, "signed-by"); - // write protection domain - Oop protectionDomain = ik.getProtectionDomain(); - writeEdge(instance, protectionDomain, "protection-domain"); + // write protection domain NYI + // Oop protectionDomain = ik.getJavaMirror().getProtectionDomain(); + writeEdge(instance, null, "protection-domain"); // write edges for static reference fields from this class for (Iterator itr = refFields.iterator(); itr.hasNext();) { diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java Wed Jun 19 10:45:56 2013 +0200 @@ -477,8 +477,8 @@ if (k instanceof InstanceKlass) { InstanceKlass ik = (InstanceKlass) k; writeObjectID(ik.getClassLoader()); - writeObjectID(ik.getSigners()); - writeObjectID(ik.getProtectionDomain()); + writeObjectID(null); // ik.getJavaMirror().getSigners()); + writeObjectID(null); // ik.getJavaMirror().getProtectionDomain()); // two reserved id fields writeObjectID(null); writeObjectID(null); @@ -516,8 +516,8 @@ if (bottomKlass instanceof InstanceKlass) { InstanceKlass ik = (InstanceKlass) bottomKlass; writeObjectID(ik.getClassLoader()); - writeObjectID(ik.getSigners()); - writeObjectID(ik.getProtectionDomain()); + writeObjectID(null); // ik.getJavaMirror().getSigners()); + writeObjectID(null); // ik.getJavaMirror().getProtectionDomain()); } else { writeObjectID(null); writeObjectID(null); diff -r e0fb8a213650 -r 836a62f43af9 agent/src/share/classes/sun/jvm/hotspot/utilities/soql/JSJavaInstanceKlass.java --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/JSJavaInstanceKlass.java Tue Jun 18 14:23:29 2013 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/JSJavaInstanceKlass.java Wed Jun 19 10:45:56 2013 +0200 @@ -47,8 +47,6 @@ private static final int FIELD_IS_SYNTHETIC = 13; private static final int FIELD_IS_INTERFACE = 14; private static final int FIELD_CLASS_LOADER = 15; - private static final int FIELD_PROTECTION_DOMAIN = 16; - private static final int FIELD_SIGNERS = 17; private static final int FIELD_STATICS = 18; private static final int FIELD_UNDEFINED = -1; @@ -100,10 +98,6 @@ return Boolean.valueOf(ik.isInterface()); case FIELD_CLASS_LOADER: return factory.newJSJavaObject(ik.getClassLoader()); - case FIELD_PROTECTION_DOMAIN: - return factory.newJSJavaObject(ik.getProtectionDomain()); - case FIELD_SIGNERS: - return factory.newJSJavaObject(ik.getSigners()); case FIELD_STATICS: return getStatics(); case FIELD_UNDEFINED: @@ -246,8 +240,6 @@ addField("isSynthetic", FIELD_IS_SYNTHETIC); addField("isInterface", FIELD_IS_INTERFACE); addField("classLoader", FIELD_CLASS_LOADER); - addField("protectionDomain", FIELD_PROTECTION_DOMAIN); - addField("signers", FIELD_SIGNERS); addField("statics", FIELD_STATICS); } diff -r e0fb8a213650 -r 836a62f43af9 make/Makefile --- a/make/Makefile Tue Jun 18 14:23:29 2013 -0700 +++ b/make/Makefile Wed Jun 19 10:45:56 2013 +0200 @@ -161,29 +161,40 @@ $(MAKE_ARGS) BUILD_FLAVOR=product docs endif +# Output directories +C1_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler1 +C2_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler2 +MINIMAL1_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_minimal1 +ZERO_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_zero +SHARK_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_shark + # Build variation of hotspot $(C1_VM_TARGETS): $(CD) $(GAMMADIR)/make; \ - $(MAKE) BUILD_FLAVOR=$(@:%1=%) VM_TARGET=$@ generic_build1 $(ALT_OUT) + $(MAKE) BUILD_DIR=$(C1_DIR) BUILD_FLAVOR=$(@:%1=%) VM_TARGET=$@ generic_build1 $(ALT_OUT) $(C2_VM_TARGETS): $(CD) $(GAMMADIR)/make; \ - $(MAKE) BUILD_FLAVOR=$@ VM_TARGET=$@ generic_build2 $(ALT_OUT) + $(MAKE) BUILD_DIR=$(C2_DIR) BUILD_FLAVOR=$@ VM_TARGET=$@ generic_build2 $(ALT_OUT) $(ZERO_VM_TARGETS): $(CD) $(GAMMADIR)/make; \ - $(MAKE) BUILD_FLAVOR=$(@:%zero=%) VM_TARGET=$@ \ - generic_buildzero $(ALT_OUT) + $(MAKE) BUILD_DIR=$(ZERO_DIR) BUILD_FLAVOR=$(@:%zero=%) VM_TARGET=$@ generic_buildzero $(ALT_OUT) $(SHARK_VM_TARGETS): $(CD) $(GAMMADIR)/make; \ - $(MAKE) BUILD_FLAVOR=$(@:%shark=%) VM_TARGET=$@ \ - generic_buildshark $(ALT_OUT) + $(MAKE) BUILD_DIR=$(SHARK_DIR) BUILD_FLAVOR=$(@:%shark=%) VM_TARGET=$@ generic_buildshark $(ALT_OUT) $(MINIMAL1_VM_TARGETS): $(CD) $(GAMMADIR)/make; \ - $(MAKE) BUILD_FLAVOR=$(@:%minimal1=%) VM_TARGET=$@ \ - generic_buildminimal1 $(ALT_OUT) + $(MAKE) BUILD_DIR=$(MINIMAL1_DIR) BUILD_FLAVOR=$(@:%minimal1=%) VM_TARGET=$@ generic_buildminimal1 $(ALT_OUT) + +# Install hotspot script in build directory +HOTSPOT_SCRIPT=$(BUILD_DIR)/$(BUILD_FLAVOR)/hotspot +$(HOTSPOT_SCRIPT): $(GAMMADIR)/make/hotspot.script + $(QUIETLY) $(MKDIR) -p $(BUILD_DIR)/$(BUILD_FLAVOR) + $(QUIETLY) cat $< | sed -e 's|@@LIBARCH@@|$(LIBARCH)|g' | sed -e 's|@@JDK_IMPORT_PATH@@|$(JDK_IMPORT_PATH)|g' > $@ + $(QUIETLY) chmod +x $@ $(GRAAL_VM_TARGETS): $(CD) $(GAMMADIR)/make; \ @@ -191,7 +202,7 @@ generic_buildgraal $(ALT_OUT) # Build compiler1 (client) rule, different for platforms -generic_build1: buildshared +generic_build1: $(HOTSPOT_SCRIPT) buildshared $(MKDIR) -p $(OUTPUTDIR) ifeq ($(OSNAME),windows) ifeq ($(ARCH_DATA_MODEL), 32) @@ -212,7 +223,7 @@ endif # Build compiler2 (server) rule, different for platforms -generic_build2: buildshared +generic_build2: $(HOTSPOT_SCRIPT) buildshared $(MKDIR) -p $(OUTPUTDIR) ifeq ($(OSNAME),windows) $(CD) $(OUTPUTDIR); \ @@ -228,19 +239,19 @@ $(MAKE_ARGS) $(VM_TARGET) endif -generic_buildzero: +generic_buildzero: $(HOTSPOT_SCRIPT) $(MKDIR) -p $(OUTPUTDIR) $(CD) $(OUTPUTDIR); \ $(MAKE) -f $(ABS_OS_MAKEFILE) \ $(MAKE_ARGS) $(VM_TARGET) -generic_buildshark: +generic_buildshark: $(HOTSPOT_SCRIPT) $(MKDIR) -p $(OUTPUTDIR) $(CD) $(OUTPUTDIR); \ $(MAKE) -f $(ABS_OS_MAKEFILE) \ $(MAKE_ARGS) $(VM_TARGET) -generic_buildminimal1: +generic_buildminimal1: $(HOTSPOT_SCRIPT) ifeq ($(JVM_VARIANT_MINIMAL1),true) $(MKDIR) -p $(OUTPUTDIR) ifeq ($(ARCH_DATA_MODEL), 32) @@ -274,252 +285,212 @@ # Export file rule generic_export: $(EXPORT_LIST) + export_product: - $(MAKE) BUILD_FLAVOR=$(@:export_%=%) VM_SUBDIR=$(@:export_%=%) \ - generic_export + $(MAKE) BUILD_FLAVOR=$(@:export_%=%) generic_export export_fastdebug: - $(MAKE) BUILD_FLAVOR=$(@:export_%=%) VM_SUBDIR=$(@:export_%=%) \ - EXPORT_SUBDIR=/$(@:export_%=%) \ - generic_export + $(MAKE) BUILD_FLAVOR=$(@:export_%=%) EXPORT_SUBDIR=/$(@:export_%=%) generic_export export_debug: - $(MAKE) BUILD_FLAVOR=$(@:export_%=%) VM_SUBDIR=$(@:export_%=%) \ - EXPORT_SUBDIR=/$(@:export_%=%) \ - generic_export + $(MAKE) BUILD_FLAVOR=$(@:export_%=%) EXPORT_SUBDIR=/$(@:export_%=%) generic_export export_optimized: - $(MAKE) BUILD_FLAVOR=$(@:export_%=%) VM_SUBDIR=$(@:export_%=%) \ - EXPORT_SUBDIR=/$(@:export_%=%) \ - generic_export + $(MAKE) BUILD_FLAVOR=$(@:export_%=%) EXPORT_SUBDIR=/$(@:export_%=%) generic_export + export_product_jdk:: - $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) \ - VM_SUBDIR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) \ - generic_export + $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) generic_export export_optimized_jdk:: - $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) \ - VM_SUBDIR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) \ - generic_export + $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) generic_export export_fastdebug_jdk:: - $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) \ - VM_SUBDIR=$(@:export_%_jdk=%) \ - ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) \ - generic_export + $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) generic_export export_debug_jdk:: - $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) VM_SUBDIR=$(@:export_%_jdk=%) \ - ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) \ - generic_export + $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) generic_export # Export file copy rules XUSAGE=$(HS_SRC_DIR)/share/vm/Xusage.txt DOCS_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_docs -C1_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_compiler1 -C2_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_compiler2 -ZERO_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_zero -SHARK_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_shark -GRAAL_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_graal -C1_DIR=$(C1_BASE_DIR)/$(VM_SUBDIR) -C2_DIR=$(C2_BASE_DIR)/$(VM_SUBDIR) -ZERO_DIR=$(ZERO_BASE_DIR)/$(VM_SUBDIR) -SHARK_DIR=$(SHARK_BASE_DIR)/$(VM_SUBDIR) -MINIMAL1_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_minimal1 -MINIMAL1_DIR=$(MINIMAL1_BASE_DIR)/$(VM_SUBDIR) -GRAAL_DIR=$(GRAAL_BASE_DIR)/$(VM_SUBDIR) - -ifeq ($(JVM_VARIANT_SERVER), true) - MISC_DIR=$(C2_DIR) - GEN_DIR=$(C2_BASE_DIR)/generated -endif -ifeq ($(JVM_VARIANT_CLIENT), true) - MISC_DIR=$(C1_DIR) - GEN_DIR=$(C1_BASE_DIR)/generated -endif -ifeq ($(JVM_VARIANT_ZEROSHARK), true) - MISC_DIR=$(SHARK_DIR) - GEN_DIR=$(SHARK_BASE_DIR)/generated -endif -ifeq ($(JVM_VARIANT_ZERO), true) - MISC_DIR=$(ZERO_DIR) - GEN_DIR=$(ZERO_BASE_DIR)/generated -endif -ifeq ($(JVM_VARIANT_MINIMAL1), true) - MISC_DIR=$(MINIMAL1_DIR) - GEN_DIR=$(MINIMAL1_BASE_DIR)/generated -endif +C1_BUILD_DIR =$(C1_DIR)/$(BUILD_FLAVOR) +C2_BUILD_DIR =$(C2_DIR)/$(BUILD_FLAVOR) +MINIMAL1_BUILD_DIR=$(MINIMAL1_DIR)/$(BUILD_FLAVOR) +ZERO_BUILD_DIR =$(ZERO_DIR)/$(BUILD_FLAVOR) +SHARK_BUILD_DIR =$(SHARK_DIR)/$(BUILD_FLAVOR) -# Bin files (windows) -ifeq ($(OSNAME),windows) - -# Get jvm.lib -$(EXPORT_LIB_DIR)/%.lib: $(MISC_DIR)/%.lib +# Server (C2) +ifeq ($(JVM_VARIANT_SERVER), true) +# Common +$(EXPORT_SERVER_DIR)/%.diz: $(C2_BUILD_DIR)/%.diz + $(install-file) +$(EXPORT_LIB_DIR)/%.jar: $(C2_BUILD_DIR)/../generated/%.jar + $(install-file) +$(EXPORT_INCLUDE_DIR)/%: $(C2_BUILD_DIR)/../generated/jvmtifiles/% $(install-file) - -# Other libraries (like SA) -$(EXPORT_JRE_BIN_DIR)/%.diz: $(MISC_DIR)/%.diz +# Windows +$(EXPORT_SERVER_DIR)/%.dll: $(C2_BUILD_DIR)/%.dll + $(install-file) +$(EXPORT_SERVER_DIR)/%.pdb: $(C2_BUILD_DIR)/%.pdb $(install-file) -$(EXPORT_JRE_BIN_DIR)/%.dll: $(MISC_DIR)/%.dll +$(EXPORT_SERVER_DIR)/%.map: $(C2_BUILD_DIR)/%.map $(install-file) -$(EXPORT_JRE_BIN_DIR)/%.pdb: $(MISC_DIR)/%.pdb +$(EXPORT_LIB_DIR)/%.lib: $(C2_BUILD_DIR)/%.lib $(install-file) -$(EXPORT_JRE_BIN_DIR)/%.map: $(MISC_DIR)/%.map +$(EXPORT_JRE_BIN_DIR)/%.diz: $(C2_BUILD_DIR)/%.diz $(install-file) - -# Client files always come from C1 area -$(EXPORT_CLIENT_DIR)/%.diz: $(C1_DIR)/%.diz +$(EXPORT_JRE_BIN_DIR)/%.dll: $(C2_BUILD_DIR)/%.dll + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.pdb: $(C2_BUILD_DIR)/%.pdb $(install-file) -$(EXPORT_CLIENT_DIR)/%.dll: $(C1_DIR)/%.dll +$(EXPORT_JRE_BIN_DIR)/%.map: $(C2_BUILD_DIR)/%.map $(install-file) -$(EXPORT_CLIENT_DIR)/%.pdb: $(C1_DIR)/%.pdb +# Unix +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_BUILD_DIR)/%.$(LIBRARY_SUFFIX) $(install-file) -$(EXPORT_CLIENT_DIR)/%.map: $(C1_DIR)/%.map +$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(C2_BUILD_DIR)/%.$(LIBRARY_SUFFIX) $(install-file) - -# Server files always come from C2 area -$(EXPORT_SERVER_DIR)/%.diz: $(C2_DIR)/%.diz +$(EXPORT_SERVER_DIR)/64/%.$(LIBRARY_SUFFIX): $(C2_BUILD_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C2_BUILD_DIR)/%.debuginfo + $(install-file) +$(EXPORT_SERVER_DIR)/%.debuginfo: $(C2_BUILD_DIR)/%.debuginfo $(install-file) -$(EXPORT_SERVER_DIR)/%.dll: $(C2_DIR)/%.dll +$(EXPORT_SERVER_DIR)/64/%.debuginfo: $(C2_BUILD_DIR)/%.debuginfo $(install-file) -$(EXPORT_SERVER_DIR)/%.pdb: $(C2_DIR)/%.pdb +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C2_BUILD_DIR)/%.diz $(install-file) -$(EXPORT_SERVER_DIR)/%.map: $(C2_DIR)/%.map +$(EXPORT_SERVER_DIR)/64/%.diz: $(C2_BUILD_DIR)/%.diz $(install-file) endif -# Minimal JVM files always come from minimal area -$(EXPORT_MINIMAL_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz +# Client (C1) +ifeq ($(JVM_VARIANT_CLIENT), true) +# Common +$(EXPORT_CLIENT_DIR)/%.diz: $(C1_BUILD_DIR)/%.diz + $(install-file) +$(EXPORT_LIB_DIR)/%.jar: $(C1_BUILD_DIR)/../generated/%.jar + $(install-file) +$(EXPORT_INCLUDE_DIR)/%: $(C1_BUILD_DIR)/../generated/jvmtifiles/% $(install-file) -$(EXPORT_MINIMAL_DIR)/%.dll: $(MINIMAL1_DIR)/%.dll +# Windows +$(EXPORT_CLIENT_DIR)/%.dll: $(C1_BUILD_DIR)/%.dll + $(install-file) +$(EXPORT_CLIENT_DIR)/%.pdb: $(C1_BUILD_DIR)/%.pdb $(install-file) -$(EXPORT_MINIMAL_DIR)/%.pdb: $(MINIMAL1_DIR)/%.pdb +$(EXPORT_CLIENT_DIR)/%.map: $(C1_BUILD_DIR)/%.map + $(install-file) +$(EXPORT_LIB_DIR)/%.lib: $(C1_BUILD_DIR)/%.lib $(install-file) -$(EXPORT_MINIMAL_DIR)/%.map: $(MINIMAL1_DIR)/%.map +$(EXPORT_JRE_BIN_DIR)/%.diz: $(C1_BUILD_DIR)/%.diz + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.dll: $(C1_BUILD_DIR)/%.dll $(install-file) - -# Shared Library -ifneq ($(OSNAME),windows) - ifeq ($(JVM_VARIANT_SERVER), true) - # C2 - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_SERVER_DIR)/64/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C2_DIR)/%.debuginfo - $(install-file) - $(EXPORT_SERVER_DIR)/%.debuginfo: $(C2_DIR)/%.debuginfo - $(install-file) - $(EXPORT_SERVER_DIR)/64/%.debuginfo: $(C2_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C2_DIR)/%.diz - $(install-file) - $(EXPORT_SERVER_DIR)/%.diz: $(C2_DIR)/%.diz - $(install-file) - $(EXPORT_SERVER_DIR)/64/%.diz: $(C2_DIR)/%.diz - $(install-file) - - # Graal - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(GRAAL_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(GRAAL_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_SERVER_DIR)/64/%.$(LIBRARY_SUFFIX): $(GRAAL_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(GRAAL_DIR)/%.debuginfo - $(install-file) - $(EXPORT_SERVER_DIR)/%.debuginfo: $(GRAAL_DIR)/%.debuginfo - $(install-file) - $(EXPORT_SERVER_DIR)/64/%.debuginfo: $(GRAAL_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(GRAAL_DIR)/%.diz - $(install-file) - $(EXPORT_SERVER_DIR)/%.diz: $(GRAAL_DIR)/%.diz - $(install-file) - $(EXPORT_SERVER_DIR)/64/%.diz: $(GRAAL_DIR)/%.diz - $(install-file) - endif - ifeq ($(JVM_VARIANT_CLIENT), true) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_CLIENT_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_CLIENT_DIR)/64/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_CLIENT_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_CLIENT_DIR)/64/%.debuginfo: $(C1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C1_DIR)/%.diz - $(install-file) - $(EXPORT_CLIENT_DIR)/%.diz: $(C1_DIR)/%.diz - $(install-file) - $(EXPORT_CLIENT_DIR)/64/%.diz: $(C1_DIR)/%.diz - $(install-file) - endif - ifeq ($(JVM_VARIANT_ZEROSHARK), true) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo): $(SHARK_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(SHARK_DIR)/%.diz - $(install-file) - $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_SERVER_DIR)/%.debuginfo: $(SHARK_DIR)/%.debuginfo - $(install-file) - $(EXPORT_SERVER_DIR)/%.diz: $(SHARK_DIR)/%.diz - $(install-file) - endif - ifeq ($(JVM_VARIANT_ZERO), true) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(ZERO_DIR)/%.diz - $(install-file) - $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_SERVER_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo - $(install-file) - $(EXPORT_SERVER_DIR)/%.diz: $(ZERO_DIR)/%.diz - $(install-file) - endif - ifeq ($(JVM_VARIANT_MINIMAL1), true) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_MINIMAL_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_MINIMAL_DIR)/64/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_MINIMAL_DIR)/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_MINIMAL_DIR)/64/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz - $(install-file) - $(EXPORT_MINIMAL_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz - $(install-file) - $(EXPORT_MINIMAL_DIR)/64/%.diz: $(MINIMAL1_DIR)/%.diz - $(install-file) - endif +$(EXPORT_JRE_BIN_DIR)/%.pdb: $(C1_BUILD_DIR)/%.pdb + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.map: $(C1_BUILD_DIR)/%.map + $(install-file) +# Unix +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_BUILD_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_CLIENT_DIR)/%.$(LIBRARY_SUFFIX): $(C1_BUILD_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_CLIENT_DIR)/64/%.$(LIBRARY_SUFFIX): $(C1_BUILD_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C1_BUILD_DIR)/%.debuginfo + $(install-file) +$(EXPORT_CLIENT_DIR)/%.debuginfo: $(C1_BUILD_DIR)/%.debuginfo + $(install-file) +$(EXPORT_CLIENT_DIR)/64/%.debuginfo: $(C1_BUILD_DIR)/%.debuginfo + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C1_BUILD_DIR)/%.diz + $(install-file) +$(EXPORT_CLIENT_DIR)/64/%.diz: $(C1_BUILD_DIR)/%.diz + $(install-file) endif -# Jar file (sa-jdi.jar) -$(EXPORT_LIB_DIR)/%.jar: $(GEN_DIR)/%.jar +# Minimal1 +ifeq ($(JVM_VARIANT_MINIMAL1), true) +# Common +$(EXPORT_MINIMAL_DIR)/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz + $(install-file) +$(EXPORT_LIB_DIR)/%.jar: $(MINIMAL1_BUILD_DIR)/../generated/%.jar + $(install-file) +$(EXPORT_INCLUDE_DIR)/%: $(MINIMAL1_BUILD_DIR)/../generated/jvmtifiles/% + $(install-file) +# Windows +$(EXPORT_MINIMAL_DIR)/%.dll: $(MINIMAL1_BUILD_DIR)/%.dll + $(install-file) +$(EXPORT_MINIMAL_DIR)/%.pdb: $(MINIMAL1_BUILD_DIR)/%.pdb + $(install-file) +$(EXPORT_MINIMAL_DIR)/%.map: $(MINIMAL1_BUILD_DIR)/%.map + $(install-file) +$(EXPORT_LIB_DIR)/%.lib: $(MINIMAL1_BUILD_DIR)/%.lib + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.dll: $(MINIMAL1_BUILD_DIR)/%.dll $(install-file) - -# Shared jar files -$(EXPORT_JRE_LIB_DIR)/%.jar: $(SHARED_DIR)/%.jar +$(EXPORT_JRE_BIN_DIR)/%.pdb: $(MINIMAL1_BUILD_DIR)/%.pdb + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.map: $(MINIMAL1_BUILD_DIR)/%.map + $(install-file) +# Unix +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_BUILD_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_MINIMAL_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_BUILD_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_MINIMAL_DIR)/64/%.$(LIBRARY_SUFFIX): $(MINIMAL1_BUILD_DIR)/%.$(LIBRARY_SUFFIX) $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(MINIMAL1_BUILD_DIR)/%.debuginfo + $(install-file) +$(EXPORT_MINIMAL_DIR)/%.debuginfo: $(MINIMAL1_BUILD_DIR)/%.debuginfo + $(install-file) +$(EXPORT_MINIMAL_DIR)/64/%.debuginfo: $(MINIMAL1_BUILD_DIR)/%.debuginfo + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz + $(install-file) +$(EXPORT_MINIMAL_DIR)/64/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz + $(install-file) +endif -# Shared options files -$(EXPORT_JRE_LIB_DIR)/%.options: $(SHARED_DIR)/%.options +# Zero +ifeq ($(JVM_VARIANT_ZERO), true) +# Common +$(EXPORT_LIB_DIR)/%.jar: $(ZERO_BUILD_DIR)/../generated/%.jar + $(install-file) +$(EXPORT_INCLUDE_DIR)/%: $(ZERO_BUILD_DIR)/../generated/jvmtifiles/% + $(install-file) +# Unix +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_BUILD_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(ZERO_BUILD_DIR)/%.debuginfo + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(ZERO_BUILD_DIR)/%.diz + $(install-file) +$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_BUILD_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_SERVER_DIR)/%.debuginfo: $(ZERO_BUILD_DIR)/%.debuginfo + $(install-file) +$(EXPORT_SERVER_DIR)/%.diz: $(ZERO_BUILD_DIR)/%.diz $(install-file) - -# Include files (jvmti.h, jvmticmlr.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h, jfr.h) -$(EXPORT_INCLUDE_DIR)/%: $(GEN_DIR)/jvmtifiles/% +endif + +# Shark +ifeq ($(JVM_VARIANT_ZEROSHARK), true) +# Common +$(EXPORT_LIB_DIR)/%.jar: $(SHARK_BUILD_DIR)/../generated/%.jar + $(install-file) +$(EXPORT_INCLUDE_DIR)/%: $(SHARK_BUILD_DIR)/../generated/jvmtifiles/% + $(install-file) +# Unix +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_BUILD_DIR)/%.$(LIBRARY_SUFFIX) $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo): $(SHARK_BUILD_DIR)/%.debuginfo + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(SHARK_BUILD_DIR)/%.diz + $(install-file) +$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_BUILD_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_SERVER_DIR)/%.debuginfo: $(SHARK_BUILD_DIR)/%.debuginfo + $(install-file) +$(EXPORT_SERVER_DIR)/%.diz: $(SHARK_BUILD_DIR)/%.diz + $(install-file) +endif $(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/code/% $(install-file) @@ -537,7 +508,7 @@ JFR_EXISTS=$(shell if [ -d $(HS_ALT_SRC) ]; then echo 1; else echo 0; fi) # export jfr.h ifeq ($JFR_EXISTS,1) -$(EXPORT_INCLUDE_DIR)/%: $(HS_ALT_SRC)/share/vm/jfr/agent/% +$(EXPORT_INCLUDE_DIR)/%: $(HS_ALT_SRC)/share/vm/jfr/% $(install-file) else $(EXPORT_INCLUDE_DIR)/jfr.h: diff -r e0fb8a213650 -r 836a62f43af9 make/bsd/makefiles/adlc.make --- a/make/bsd/makefiles/adlc.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/bsd/makefiles/adlc.make Wed Jun 19 10:45:56 2013 +0200 @@ -69,7 +69,7 @@ # CFLAGS_WARN holds compiler options to suppress/enable warnings. # Compiler warnings are treated as errors ifneq ($(COMPILER_WARNINGS_FATAL),false) - CFLAGS_WARN = -Werror + CFLAGS_WARN = $(WARNINGS_ARE_ERRORS) endif CFLAGS += $(CFLAGS_WARN) diff -r e0fb8a213650 -r 836a62f43af9 make/bsd/makefiles/arm.make --- a/make/bsd/makefiles/arm.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/bsd/makefiles/arm.make Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,8 @@ Obj_Files += bsd_arm.o -LIBS += $(EXT_LIBS_PATH)/sflt_glibc.a +ifneq ($(EXT_LIBS_PATH),) + LIBS += $(EXT_LIBS_PATH)/sflt_glibc.a +endif CFLAGS += -DVM_LITTLE_ENDIAN diff -r e0fb8a213650 -r 836a62f43af9 make/bsd/makefiles/buildtree.make --- a/make/bsd/makefiles/buildtree.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/bsd/makefiles/buildtree.make Wed Jun 19 10:45:56 2013 +0200 @@ -47,9 +47,9 @@ # flags.make - with macro settings # vm.make - to support making "$(MAKE) -v vm.make" in makefiles # adlc.make - +# trace.make - generate tracing event and type definitions # jvmti.make - generate JVMTI bindings from the spec (JSR-163) # sa.make - generate SA jar file and natives -# env.[ck]sh - environment settings # # The makefiles are split this way so that "make foo" will run faster by not # having to read the dependency files for the vm. @@ -120,6 +120,7 @@ $(PLATFORM_DIR)/generated/dependencies \ $(PLATFORM_DIR)/generated/adfiles \ $(PLATFORM_DIR)/generated/jvmtifiles \ + $(PLATFORM_DIR)/generated/tracefiles \ $(PLATFORM_DIR)/generated/dtracefiles TARGETS = debug fastdebug optimized product @@ -129,9 +130,7 @@ BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make # dtrace.make is used on BSD versions that implement Dtrace (like MacOS X) -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make \ - jvmti.make sa.make dtrace.make \ - env.sh env.csh jdkpath.sh +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make dtrace.make BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) @@ -338,6 +337,16 @@ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ ) > $@ +trace.make: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ + ) > $@ + sa.make: $(BUILDTREE_MAKE) @echo Creating $@ ... $(QUIETLY) ( \ @@ -352,19 +361,9 @@ @echo Creating $@ ... $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ - echo; \ - echo include flags.make; \ - echo; \ - echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ - ) > $@ - -env.sh: $(BUILDTREE_MAKE) - @echo Creating $@ ... - $(QUIETLY) ( \ - $(BUILDTREE_COMMENT); \ { echo "JAVA_HOME=$(JDK_IMPORT_PATH)"; }; \ { \ - echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:$(OUTPUTDIR)/shared/graal.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \ + echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \ } | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \ echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \ echo "export JAVA_HOME CLASSPATH HOTSPOT_BUILD_USER"; \ diff -r e0fb8a213650 -r 836a62f43af9 make/bsd/makefiles/fastdebug.make --- a/make/bsd/makefiles/fastdebug.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/bsd/makefiles/fastdebug.make Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2013 Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -58,6 +58,6 @@ # Linker mapfile MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug -VERSION = optimized +VERSION = fastdebug SYSDEFS += -DASSERT PICFLAGS = DEFAULT diff -r e0fb8a213650 -r 836a62f43af9 make/bsd/makefiles/gcc.make --- a/make/bsd/makefiles/gcc.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/bsd/makefiles/gcc.make Wed Jun 19 10:45:56 2013 +0200 @@ -71,6 +71,11 @@ CC = $(CC32) endif + ifeq ($(USE_CLANG), true) + CXX = clang++ + CC = clang + endif + HOSTCXX = $(CXX) HOSTCC = $(CC) endif @@ -79,21 +84,79 @@ endif -# -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only -# prints the numbers (e.g. "2.95", "3.2.1") -CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1) -CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2) - -# check for precompiled headers support -ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0" -# Allow the user to turn off precompiled headers from the command line. -ifneq ($(USE_PRECOMPILED_HEADER),0) -PRECOMPILED_HEADER_DIR=. -PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp -PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch -endif +ifeq ($(USE_CLANG), true) + CC_VER_MAJOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f1) + CC_VER_MINOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f2) +else + # -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only + # prints the numbers (e.g. "2.95", "3.2.1") + CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1) + CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2) endif +ifeq ($(USE_CLANG), true) + # clang has precompiled headers support by default, but the user can switch + # it off by using 'USE_PRECOMPILED_HEADER=0'. + ifdef LP64 + ifeq ($(USE_PRECOMPILED_HEADER),) + USE_PRECOMPILED_HEADER=1 + endif + else + # We don't support precompiled headers on 32-bit builds because there some files are + # compiled with -fPIC while others are compiled without (see 'NONPIC_OBJ_FILES' rules.make) + # Clang produces an error if the PCH file was compiled with other options than the actual compilation unit. + USE_PRECOMPILED_HEADER=0 + endif + + ifeq ($(USE_PRECOMPILED_HEADER),1) + + ifndef LP64 + $(error " Precompiled Headers only supported on 64-bit platforms!") + endif + + PRECOMPILED_HEADER_DIR=. + PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp + PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.pch + + PCH_FLAG = -include precompiled.hpp + PCH_FLAG/DEFAULT = $(PCH_FLAG) + PCH_FLAG/NO_PCH = -DNO_PCH + PCH_FLAG/BY_FILE = $(PCH_FLAG/$@)$(PCH_FLAG/DEFAULT$(PCH_FLAG/$@)) + + VM_PCH_FLAG/LIBJVM = $(PCH_FLAG/BY_FILE) + VM_PCH_FLAG/AOUT = + VM_PCH_FLAG = $(VM_PCH_FLAG/$(LINK_INTO)) + + # We only use precompiled headers for the JVM build + CFLAGS += $(VM_PCH_FLAG) + + # There are some files which don't like precompiled headers + # The following files are build with 'OPT_CFLAGS/NOOPT' (-O0) in the opt build. + # But Clang doesn't support a precompiled header which was compiled with -O3 + # to be used in a compilation unit which uses '-O0'. We could also prepare an + # extra '-O0' PCH file for the opt build and use it here, but it's probably + # not worth the effort as long as only two files need this special handling. + PCH_FLAG/loopTransform.o = $(PCH_FLAG/NO_PCH) + PCH_FLAG/sharedRuntimeTrig.o = $(PCH_FLAG/NO_PCH) + PCH_FLAG/sharedRuntimeTrans.o = $(PCH_FLAG/NO_PCH) + + endif +else # ($(USE_CLANG), true) + # check for precompiled headers support + ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0" + # Allow the user to turn off precompiled headers from the command line. + ifneq ($(USE_PRECOMPILED_HEADER),0) + PRECOMPILED_HEADER_DIR=. + PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp + PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch + endif + endif +endif + +# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp. +ifeq ($(USE_PRECOMPILED_HEADER),0) + CFLAGS += -DDONT_USE_PRECOMPILED_HEADER +endif #------------------------------------------------------------------------ # Compiler flags @@ -115,17 +178,31 @@ CFLAGS += $(VM_PICFLAG) CFLAGS += -fno-rtti CFLAGS += -fno-exceptions -CFLAGS += -pthread -CFLAGS += -fcheck-new -# version 4 and above support fvisibility=hidden (matches jni_x86.h file) -# except 4.1.2 gives pointless warnings that can't be disabled (afaik) -ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0" -CFLAGS += -fvisibility=hidden +ifeq ($(USE_CLANG),) + CFLAGS += -pthread + CFLAGS += -fcheck-new + # version 4 and above support fvisibility=hidden (matches jni_x86.h file) + # except 4.1.2 gives pointless warnings that can't be disabled (afaik) + ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0" + CFLAGS += -fvisibility=hidden + endif +else + CFLAGS += -fvisibility=hidden +endif + +ifeq ($(USE_CLANG), true) + # Before Clang 3.1, we had to pass the stack alignment specification directly to llvm with the help of '-mllvm' + # Starting with version 3.1, Clang understands the '-mstack-alignment' (and rejects '-mllvm -stack-alignment') + ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 1 \) \))" "0" + STACK_ALIGNMENT_OPT = -mno-omit-leaf-frame-pointer -mstack-alignment=16 + else + STACK_ALIGNMENT_OPT = -mno-omit-leaf-frame-pointer -mllvm -stack-alignment=16 + endif endif ARCHFLAG = $(ARCHFLAG/$(BUILDARCH)) ARCHFLAG/i486 = -m32 -march=i586 -ARCHFLAG/amd64 = -m64 +ARCHFLAG/amd64 = -m64 $(STACK_ALIGNMENT_OPT) ARCHFLAG/ia64 = ARCHFLAG/sparc = -m32 -mcpu=v9 ARCHFLAG/sparcv9 = -m64 -mcpu=v9 @@ -163,14 +240,25 @@ WARNINGS_ARE_ERRORS = -Werror endif -# Except for a few acceptable ones -# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit -# conversions which might affect the values. To avoid that, we need to turn -# it off explicitly. -ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0" +ifeq ($(USE_CLANG), true) + # However we need to clean the code up before we can unrestrictedly enable this option with Clang + WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses + WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-compare +# Not yet supported by clang in Xcode 4.6.2 +# WARNINGS_ARE_ERRORS += -Wno-tautological-constant-out-of-range-compare + WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess + WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body +endif + WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -else -WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef + +ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0" + # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit + # conversions which might affect the values. Only enable it in earlier versions. + WARNING_FLAGS = -Wunused-function + ifeq ($(USE_CLANG),) + WARNINGS_FLAGS += -Wconversion + endif endif CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS) @@ -214,14 +302,24 @@ OPT_CFLAGS/NOOPT=-O0 -# 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation. -ifneq "$(shell expr \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) = 3 \) \))" "0" -OPT_CFLAGS/mulnode.o += -O0 +# Work around some compiler bugs. +ifeq ($(USE_CLANG), true) + ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1) + OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT) + endif +else + # 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation. + ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 3), 1) + OPT_CFLAGS/mulnode.o += $(OPT_CFLAGS/NOOPT) + endif endif # Flags for generating make dependency flags. -ifneq ("${CC_VER_MAJOR}", "2") -DEPFLAGS = -fpch-deps -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d) +DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d) +ifeq ($(USE_CLANG),) + ifneq ($(CC_VER_MAJOR), 2) + DEPFLAGS += -fpch-deps + endif endif # -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp. @@ -249,13 +347,15 @@ # statically link libstdc++.so, work with gcc but ignored by g++ STATIC_STDCXX = -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic -# statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x. -ifneq ("${CC_VER_MAJOR}", "2") -STATIC_LIBGCC += -static-libgcc -endif +ifeq ($(USE_CLANG),) + # statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x. + ifneq ("${CC_VER_MAJOR}", "2") + STATIC_LIBGCC += -static-libgcc + endif -ifeq ($(BUILDARCH), ia64) -LFLAGS += -Wl,-relax + ifeq ($(BUILDARCH), ia64) + LFLAGS += -Wl,-relax + endif endif # Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file. @@ -296,25 +396,31 @@ #------------------------------------------------------------------------ # Debug flags -# Use the stabs format for debugging information (this is the default -# on gcc-2.91). It's good enough, has all the information about line -# numbers and local variables, and libjvm.so is only about 16M. -# Change this back to "-g" if you want the most expressive format. -# (warning: that could easily inflate libjvm.so to 150M!) -# Note: The Itanium gcc compiler crashes when using -gstabs. -DEBUG_CFLAGS/ia64 = -g -DEBUG_CFLAGS/amd64 = -g -DEBUG_CFLAGS/arm = -g -DEBUG_CFLAGS/ppc = -g -DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH)) -ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),) -DEBUG_CFLAGS += -gstabs +ifeq ($(USE_CLANG), true) + # Restrict the debug information created by Clang to avoid + # too big object files and speed the build up a little bit + # (see http://llvm.org/bugs/show_bug.cgi?id=7554) + CFLAGS += -flimit-debug-info endif -# DEBUG_BINARIES overrides everything, use full -g debug information +# DEBUG_BINARIES uses full -g debug information for all configs ifeq ($(DEBUG_BINARIES), true) - DEBUG_CFLAGS = -g - CFLAGS += $(DEBUG_CFLAGS) + CFLAGS += -g +else + # Use the stabs format for debugging information (this is the default + # on gcc-2.91). It's good enough, has all the information about line + # numbers and local variables, and libjvm.so is only about 16M. + # Change this back to "-g" if you want the most expressive format. + # (warning: that could easily inflate libjvm.so to 150M!) + # Note: The Itanium gcc compiler crashes when using -gstabs. + DEBUG_CFLAGS/ia64 = -g + DEBUG_CFLAGS/amd64 = -g + DEBUG_CFLAGS/arm = -g + DEBUG_CFLAGS/ppc = -g + DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH)) + ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),) + DEBUG_CFLAGS += -gstabs + endif endif # If we are building HEADLESS, pass on to VM diff -r e0fb8a213650 -r 836a62f43af9 make/bsd/makefiles/launcher.make --- a/make/bsd/makefiles/launcher.make Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,115 +0,0 @@ -# -# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Rules to build gamma launcher, used by vm.make - - -LAUNCHER_SCRIPT = hotspot -LAUNCHER = gamma - -LAUNCHERDIR := $(GAMMADIR)/src/os/posix/launcher -LAUNCHERDIR_SHARE := $(GAMMADIR)/src/share/tools/launcher -LAUNCHERFLAGS := $(ARCHFLAG) \ - -I$(LAUNCHERDIR) -I$(GAMMADIR)/src/share/vm/prims \ - -I$(LAUNCHERDIR_SHARE) \ - -DFULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \ - -DJDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \ - -DJDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \ - -DARCH=\"$(LIBARCH)\" \ - -DGAMMA \ - -DLAUNCHER_TYPE=\"gamma\" \ - -DLINK_INTO_$(LINK_INTO) \ - $(TARGET_DEFINES) -# Give the launcher task_for_pid() privileges so that it can be used to run JStack, JInfo, et al. -LFLAGS_LAUNCHER += -sectcreate __TEXT __info_plist $(GAMMADIR)/src/os/bsd/launcher/Info-privileged.plist - -ifeq ($(LINK_INTO),AOUT) - LAUNCHER.o = launcher.o $(JVM_OBJ_FILES) - LAUNCHER_MAPFILE = mapfile_reorder - LFLAGS_LAUNCHER$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LAUNCHER_MAPFILE)) - LFLAGS_LAUNCHER += $(SONAMEFLAG:SONAME=$(LIBJVM)) $(STATIC_LIBGCC) - LIBS_LAUNCHER += $(STATIC_STDCXX) $(LIBS) -else - LAUNCHER.o = launcher.o - LFLAGS_LAUNCHER += -L`pwd` - - # The gamma launcher runs the JDK from $JAVA_HOME, overriding the JVM with a - # freshly built JVM at ./libjvm.{so|dylib}. This is accomplished by setting - # the library searchpath using ({DY}LD_LIBRARY_PATH) to find the local JVM - # first. Gamma dlopen()s libjava from $JAVA_HOME/jre/lib{/$arch}, which is - # statically linked with CoreFoundation framework libs. Unfortunately, gamma's - # unique searchpath results in some unresolved symbols in the framework - # libraries, because JDK libraries are inadvertently discovered first on the - # searchpath, e.g. libjpeg. On Mac OS X, filenames are case *insensitive*. - # So, the actual filename collision is libjpeg.dylib and libJPEG.dylib. - # To resolve this, gamma needs to also statically link with the CoreFoundation - # framework libraries. - - ifeq ($(OS_VENDOR),Darwin) - LFLAGS_LAUNCHER += -framework CoreFoundation -framework ApplicationServices - endif - - LIBS_LAUNCHER += -l$(JVM) $(LIBS) -endif - -LINK_LAUNCHER = $(LINK.CC) - -LINK_LAUNCHER/PRE_HOOK = $(LINK_LIB.CXX/PRE_HOOK) -LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CXX/POST_HOOK) - -LAUNCHER_OUT = launcher - -SUFFIXES += .d - -SOURCES := $(shell find $(LAUNCHERDIR) -name "*.c") -SOURCES_SHARE := $(shell find $(LAUNCHERDIR_SHARE) -name "*.c") - -OBJS := $(patsubst $(LAUNCHERDIR)/%.c,$(LAUNCHER_OUT)/%.o,$(SOURCES)) $(patsubst $(LAUNCHERDIR_SHARE)/%.c,$(LAUNCHER_OUT)/%.o,$(SOURCES_SHARE)) - -DEPFILES := $(patsubst %.o,%.d,$(OBJS)) --include $(DEPFILES) - -$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR_SHARE)/%.c - $(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); } - $(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS) - -$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR)/%.c - $(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); } - $(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS) - -$(LAUNCHER): $(OBJS) $(LIBJVM) $(LAUNCHER_MAPFILE) - $(QUIETLY) echo Linking launcher... - $(QUIETLY) $(LINK_LAUNCHER/PRE_HOOK) - $(QUIETLY) $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(sort $(OBJS)) $(LIBS_LAUNCHER) - $(QUIETLY) $(LINK_LAUNCHER/POST_HOOK) - # Sign the launcher with the development certificate (if present) so that it can be used - # to run JStack, JInfo, et al. - $(QUIETLY) -codesign -s openjdk_codesign $@ - -$(LAUNCHER): $(LAUNCHER_SCRIPT) - -$(LAUNCHER_SCRIPT): $(LAUNCHERDIR)/launcher.script - $(QUIETLY) sed -e 's/@@LIBARCH@@/$(LIBARCH)/g' $< > $@ - $(QUIETLY) chmod +x $@ - diff -r e0fb8a213650 -r 836a62f43af9 make/bsd/makefiles/minimal1.make --- a/make/bsd/makefiles/minimal1.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/bsd/makefiles/minimal1.make Wed Jun 19 10:45:56 2013 +0200 @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # TYPE=MINIMAL1 @@ -32,6 +32,7 @@ INCLUDE_MANAGEMENT ?= false INCLUDE_ALL_GCS ?= false INCLUDE_NMT ?= false +INCLUDE_TRACE ?= false INCLUDE_CDS ?= false CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\" diff -r e0fb8a213650 -r 836a62f43af9 make/bsd/makefiles/top.make --- a/make/bsd/makefiles/top.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/bsd/makefiles/top.make Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -80,7 +80,7 @@ @echo All done. # This is an explicit dependency for the sake of parallel makes. -vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff sa_stuff dtrace_stuff +vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff dtrace_stuff @# We need a null action here, so implicit rules don't get consulted. $(Cached_plat): $(Plat_File) @@ -94,6 +94,10 @@ jvmti_stuff: $(Cached_plat) $(adjust-mflags) @$(MAKE) -f jvmti.make $(MFLAGS-adjusted) +# generate trace files +trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags) + @$(MAKE) -f trace.make $(MFLAGS-adjusted) + ifeq ($(OS_VENDOR), Darwin) # generate dtrace header files dtrace_stuff: $(Cached_plat) $(adjust-mflags) diff -r e0fb8a213650 -r 836a62f43af9 make/bsd/makefiles/trace.make --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/bsd/makefiles/trace.make Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,121 @@ +# +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# + +# This makefile (trace.make) is included from the trace.make in the +# build directories. +# +# It knows how to build and run the tools to generate trace files. + +include $(GAMMADIR)/make/bsd/makefiles/rules.make +include $(GAMMADIR)/make/altsrc.make + +# ######################################################################### + +HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \ + echo "true"; else echo "false";\ + fi) + +TOPDIR = $(shell echo `pwd`) +GENERATED = $(TOPDIR)/../generated +JvmtiOutDir = $(GENERATED)/jvmtifiles +TraceOutDir = $(GENERATED)/tracefiles + +TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace +TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace + +# set VPATH so make knows where to look for source files +Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir) +VPATH += $(Src_Dirs_V:%=%:) + +TraceGeneratedNames = \ + traceEventClasses.hpp \ + traceEventIds.hpp \ + traceTypes.hpp + +ifeq ($(HAS_ALT_SRC), true) +TraceGeneratedNames += \ + traceRequestables.hpp \ + traceEventControl.hpp + +ifneq ($(INCLUDE_TRACE), false) +TraceGeneratedNames += traceProducer.cpp +endif + +endif + + +TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%) + +XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen + +XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \ + $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod +ifeq ($(HAS_ALT_SRC), true) + XML_DEPS += $(TraceAltSrcDir)/traceevents.xml +endif + +.PHONY: all clean cleanall + +# ######################################################################### + +all: $(TraceGeneratedFiles) + +GENERATE_CODE= \ + $(QUIETLY) echo Generating $@; \ + $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \ + test -f $@ + +$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) + $(GENERATE_CODE) + +ifeq ($(HAS_ALT_SRC), false) + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) + $(GENERATE_CODE) + +else + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) + $(GENERATE_CODE) + +endif + +# ######################################################################### + + +clean cleanall: + rm $(TraceGeneratedFiles) + diff -r e0fb8a213650 -r 836a62f43af9 make/bsd/makefiles/vm.make --- a/make/bsd/makefiles/vm.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/bsd/makefiles/vm.make Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Rules to build JVM and related libraries, included from vm.make in the build @@ -52,7 +52,7 @@ # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm # The adfiles directory contains ad_.[ch]pp. # The jvmtifiles directory contains jvmti*.[ch]pp -Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles +Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles VPATH += $(Src_Dirs_V:%=%:) # set INCLUDES for C preprocessor. @@ -66,7 +66,7 @@ SYMFLAG = endif -# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined +# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined # in $(GAMMADIR)/make/defs.make ifeq ($(HOTSPOT_BUILD_VERSION),) BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\"" @@ -93,7 +93,7 @@ # This is VERY important! The version define must only be supplied to vm_version.o # If not, ccache will not re-use the cache at all, since the version string might contain -# a time and date. +# a time and date. CXXFLAGS/vm_version.o += ${JRE_VERSION} CXXFLAGS/BYFILE = $(CXXFLAGS/$@) @@ -105,10 +105,6 @@ CXXFLAGS += -DDEFAULT_LIBPATH="\"$(DEFAULT_LIBPATH)\"" endif -ifndef JAVASE_EMBEDDED -CFLAGS += -DINCLUDE_TRACE -endif - # CFLAGS_WARN holds compiler options to suppress/enable warnings. CFLAGS += $(CFLAGS_WARN/BYFILE) @@ -126,7 +122,11 @@ LFLAGS += -Xlinker -z -Xlinker noexecstack endif -LIBS += -lm -pthread +LIBS += -lm + +ifeq ($(USE_CLANG),) + LIBS += -pthread +endif ifeq ($(OS_VENDOR),Darwin) LIBS += -framework ApplicationServices -framework IOKit @@ -148,6 +148,9 @@ ifeq ($(OS_VENDOR), Darwin) LIBJVM = lib$(JVM).dylib CFLAGS += -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE + ifeq (${VERSION}, $(filter ${VERSION}, debug fastdebug)) + CFLAGS += -DALLOW_OPERATOR_NEW_USAGE + endif else LIBJVM = lib$(JVM).so endif @@ -163,15 +166,15 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/gpu/ptx -ifndef JAVASE_EMBEDDED -SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ +CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) +CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles + +ifneq ($(INCLUDE_TRACE), false) +CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ find $(HS_ALT_SRC)/share/vm/jfr -type d; \ fi) endif -CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) -CORE_PATHS+=$(GENERATED)/jvmtifiles - COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 @@ -198,7 +201,7 @@ Src_Dirs/GRAAL := $(CORE_PATHS) $(GRAAL_PATHS) Src_Dirs := $(Src_Dirs/$(TYPE)) -COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\* +COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\* COMPILER1_SPECIFIC_FILES := c1_\* SHARK_SPECIFIC_FILES := shark ZERO_SPECIFIC_FILES := zero @@ -346,9 +349,6 @@ #---------------------------------------------------------------------- # Other files -# Gamma launcher -include $(MAKEFILES_DIR)/launcher.make - # Signal interposition library include $(MAKEFILES_DIR)/jsig.make diff -r e0fb8a213650 -r 836a62f43af9 make/defs.make --- a/make/defs.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/defs.make Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # The common definitions for hotspot builds. @@ -236,33 +236,6 @@ JDK_IMAGE_DIR=$(ALT_JDK_IMAGE_DIR) endif -# Utilities ant -ifeq ($(PLATFORM), windows) - ifeq ($(ANT_HOME),) - ANT_HOME := $(call DirExists,$(JDK_DEVTOOLS_DIR)/share/ant/latest,,) - endif -endif - -# There are few problems with ant we need to workaround: -# 1) ant is using temporary directory java.io.tmpdir -# However, this directory is not unique enough and two separate ant processes -# can easily end up using the exact same temp directory. This may lead to weird build failures -# To workaround this we will define tmp dir explicitly -# 2) ant attempts to detect JDK location based on java.exe location -# This is fragile as developer may have JRE first on the PATH. -# To workaround this we will specify JAVA_HOME explicitly -# 3) Sometimes we need to run ant with the boot jdk, sometimes with the import -# jdk, sometimes with the jdk we are building (see deploy repo). - -ANT_TMPDIR = $(OUTPUTDIR)/tmp -ANT_WORKAROUNDS = ANT_OPTS=-Djava.io.tmpdir='$(ANT_TMPDIR)' - -ifeq ($(ANT_HOME),) - ANT = $(ANT_WORKAROUNDS) JAVA_HOME='$(BOOTDIR)' ant -else - ANT = $(ANT_WORKAROUNDS) JAVA_HOME='$(BOOTDIR)' $(ANT_HOME)/bin/ant -endif - # The platform dependent defs.make defines platform specific variable such # as ARCH, EXPORT_LIST etc. We must place the include here after BOOTDIR is defined. include $(GAMMADIR)/make/$(OSNAME)/makefiles/defs.make @@ -285,7 +258,7 @@ # LIBARCH - directory name in JDK/JRE # Use uname output for SRCARCH, but deal with platform differences. If ARCH - # is not explicitly listed below, it is treated as x86. + # is not explicitly listed below, it is treated as x86. SRCARCH = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc zero,$(ARCH))) ARCH/ = x86 ARCH/sparc = sparc @@ -365,8 +338,5 @@ EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h EXPORT_LIST += $(EXPORT_JRE_LIB_DIR)/graal.jar -ifndef JAVASE_EMBEDDED -EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jfr.h -endif +.PHONY: $(HS_ALT_MAKE)/defs.make -.PHONY: $(HS_ALT_MAKE)/defs.make diff -r e0fb8a213650 -r 836a62f43af9 make/excludeSrc.make --- a/make/excludeSrc.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/excludeSrc.make Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,13 +19,13 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # ifeq ($(INCLUDE_JVMTI), false) CXXFLAGS += -DINCLUDE_JVMTI=0 CFLAGS += -DINCLUDE_JVMTI=0 - Src_Files_EXCLUDE += jvmtiGetLoadedClasses.cpp forte.cpp jvmtiThreadState.cpp jvmtiExtensions.cpp \ + Src_Files_EXCLUDE += jvmtiGetLoadedClasses.cpp jvmtiThreadState.cpp jvmtiExtensions.cpp \ jvmtiImpl.cpp jvmtiManageCapabilities.cpp jvmtiRawMonitor.cpp jvmtiUtil.cpp jvmtiTrace.cpp \ jvmtiCodeBlobEvents.cpp jvmtiEnv.cpp jvmtiRedefineClasses.cpp jvmtiEnvBase.cpp jvmtiEnvThreadState.cpp \ jvmtiTagMap.cpp jvmtiEventController.cpp evmCompat.cpp jvmtiEnter.xsl jvmtiExport.cpp \ @@ -81,23 +81,26 @@ cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \ cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp compactibleFreeListSpace.cpp \ concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp \ - freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \ - concurrentG1Refine.cpp concurrentG1RefineThread.cpp concurrentMark.cpp concurrentMarkThread.cpp \ - dirtyCardQueue.cpp g1AllocRegion.cpp g1BlockOffsetTable.cpp g1CollectedHeap.cpp g1GCPhaseTimes.cpp \ - g1CollectorPolicy.cpp g1ErgoVerbose.cpp g1_globals.cpp g1HRPrinter.cpp g1MarkSweep.cpp \ - g1MMUTracker.cpp g1MonitoringSupport.cpp g1RemSet.cpp g1SATBCardTableModRefBS.cpp heapRegion.cpp \ - heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp ptrQueue.cpp \ - satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp adjoiningGenerations.cpp \ - adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp cardTableExtension.cpp \ - gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp parallelScavengeHeap.cpp parMarkBitMap.cpp \ - pcTasks.cpp psAdaptiveSizePolicy.cpp psCompactionManager.cpp psGCAdaptivePolicyCounters.cpp \ - psGenerationCounters.cpp psMarkSweep.cpp psMarkSweepDecorator.cpp psOldGen.cpp psParallelCompact.cpp \ - psPromotionLAB.cpp psPromotionManager.cpp psScavenge.cpp psTasks.cpp psVirtualspace.cpp \ - psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp parCardTableModRefBS.cpp \ - parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp gSpaceCounters.cpp allocationStats.cpp \ - spaceCounters.cpp gcAdaptivePolicyCounters.cpp mutableNUMASpace.cpp immutableSpace.cpp \ - immutableSpace.cpp g1MemoryPool.cpp psMemoryPool.cpp yieldingWorkGroup.cpp g1Log.cpp -endif + freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp \ + collectionSetChooser.cpp concurrentG1Refine.cpp concurrentG1RefineThread.cpp \ + concurrentMark.cpp concurrentMarkThread.cpp dirtyCardQueue.cpp g1AllocRegion.cpp \ + g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \ + g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \ + g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \ + g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \ + heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \ + ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \ + adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \ + cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \ + parallelScavengeHeap.cpp parMarkBitMap.cpp pcTasks.cpp psAdaptiveSizePolicy.cpp \ + psCompactionManager.cpp psGCAdaptivePolicyCounters.cpp psGenerationCounters.cpp \ + psMarkSweep.cpp psMarkSweepDecorator.cpp psMemoryPool.cpp psOldGen.cpp \ + psParallelCompact.cpp psPromotionLAB.cpp psPromotionManager.cpp psScavenge.cpp \ + psTasks.cpp psVirtualspace.cpp psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp \ + parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \ + gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \ + mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp +endif ifeq ($(INCLUDE_NMT), false) CXXFLAGS += -DINCLUDE_NMT=0 @@ -107,3 +110,5 @@ memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \ memTracker.cpp nmtDCmd.cpp endif + +-include $(HS_ALT_MAKE)/excludeSrc.make diff -r e0fb8a213650 -r 836a62f43af9 make/hotspot.script --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/hotspot.script Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,218 @@ +#!/bin/sh + +# Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. + + +# This script launches HotSpot. +# +# If the first parameter is either "-gdb" or "-gud", HotSpot will be +# launched inside gdb. "-gud" means "open an Emacs window and run gdb +# inside Emacs". +# +# If the first parameter is "-dbx", HotSpot will be launched inside dbx. +# +# If the first parameter is "-valgrind", HotSpot will be launched +# inside Valgrind (http://valgrind.kde.org) using the Memcheck skin, +# and with memory leak detection enabled. This currently (2005jan19) +# requires at least Valgrind 2.3.0. -Xmx16m will also be passed as +# the first parameter to HotSpot, since lowering HotSpot's memory +# consumption makes execution inside of Valgrind *a lot* faster. +# + + +# +# User changeable parameters ------------------------------------------------ +# + +# This is the name of the gdb binary to use +if [ ! "$GDB" ] +then + GDB=gdb +fi + +# This is the name of the gdb binary to use +if [ ! "$DBX" ] +then + DBX=dbx +fi + +# This is the name of the Valgrind binary to use +if [ ! "$VALGRIND" ] +then + VALGRIND=valgrind +fi + +# This is the name of Emacs for running GUD +EMACS=emacs + +# +# End of user changeable parameters ----------------------------------------- +# + +# Make sure the paths are fully specified, i.e. they must begin with /. +REL_MYDIR=`dirname $0` +MYDIR=`cd $REL_MYDIR && pwd` + +# +# Look whether the user wants to run inside gdb +case "$1" in + -gdb) + MODE=gdb + shift + ;; + -gud) + MODE=gud + shift + ;; + -dbx) + MODE=dbx + shift + ;; + -valgrind) + MODE=valgrind + shift + ;; + *) + MODE=run + ;; +esac + +if [ "${ALT_JAVA_HOME}" != "" ]; then + JDK=${ALT_JAVA_HOME%%/jre} +else + JDK=@@JDK_IMPORT_PATH@@ +fi + +if [ "${JDK}" = "" ]; then + echo "Failed to find JDK. Either ALT_JAVA_HOME is not set or JDK_IMPORT_PATH is empty." +fi + +# We will set the LD_LIBRARY_PATH as follows: +# o $JVMPATH (directory portion only) +# o $JRE/lib/$ARCH +# followed by the user's previous effective LD_LIBRARY_PATH, if +# any. +JRE=$JDK/jre +JAVA_HOME=$JDK +export JAVA_HOME + +ARCH=@@LIBARCH@@ +SBP=${MYDIR}:${JRE}/lib/${ARCH} + + +# Set up a suitable LD_LIBRARY_PATH or DYLD_LIBRARY_PATH +OS=`uname -s` +if [ "${OS}" = "Darwin" ] +then + if [ -z "$DYLD_LIBRARY_PATH" ] + then + DYLD_LIBRARY_PATH="$SBP" + else + DYLD_LIBRARY_PATH="$SBP:$DYLD_LIBRARY_PATH" + fi + export DYLD_LIBRARY_PATH +else + # not 'Darwin' + if [ -z "$LD_LIBRARY_PATH" ] + then + LD_LIBRARY_PATH="$SBP" + else + LD_LIBRARY_PATH="$SBP:$LD_LIBRARY_PATH" + fi + export LD_LIBRARY_PATH +fi + +JPARMS="-Dsun.java.launcher=gamma -XXaltjvm=$MYDIR $@ $JAVA_ARGS"; + +# Locate the java launcher +LAUNCHER=$JDK/bin/java +if [ ! -x $LAUNCHER ] ; then + echo Error: Cannot find the java launcher \"$LAUNCHER\" + exit 1 +fi + +GDBSRCDIR=$MYDIR +BASEDIR=`cd $MYDIR/../../.. && pwd` + +init_gdb() { +# Create a gdb script in case we should run inside gdb + GDBSCR=/tmp/hsl.$$ + rm -f $GDBSCR + cat >>$GDBSCR <= 22.1 + case `$EMACS -version 2> /dev/null` in + *GNU\ Emacs\ 2[23]*) + emacs_gud_cmd="gdba" + emacs_gud_args="--annotate=3" + ;; + *) + emacs_gud_cmd="gdb" + emacs_gud_args= + ;; + esac + $EMACS --eval "($emacs_gud_cmd \"$GDB $emacs_gud_args -x $GDBSCR\")"; + rm -f $GDBSCR + ;; + dbx) + $DBX -s $HOME/.dbxrc -c "loadobject -load libjvm.so; stop in JNI_CreateJavaVM; run $JPARMS; delete all" $LAUNCHER + ;; + valgrind) + echo Warning: Defaulting to 16Mb heap to make Valgrind run faster, use -Xmx for larger heap + echo + $VALGRIND --tool=memcheck --leak-check=yes --num-callers=50 $LAUNCHER -Xmx16m $JPARMS + ;; + run) + LD_PRELOAD=$PRELOADING exec $LAUNCHER $JPARMS + ;; + *) + echo Error: Internal error, unknown launch mode \"$MODE\" + exit 1 + ;; +esac +RETVAL=$? +exit $RETVAL diff -r e0fb8a213650 -r 836a62f43af9 make/hotspot_version --- a/make/hotspot_version Tue Jun 18 14:23:29 2013 -0700 +++ b/make/hotspot_version Wed Jun 19 10:45:56 2013 +0200 @@ -35,7 +35,7 @@ HS_MAJOR_VER=25 HS_MINOR_VER=0 -HS_BUILD_NUMBER=29 +HS_BUILD_NUMBER=37 JDK_MAJOR_VER=1 JDK_MINOR_VER=8 diff -r e0fb8a213650 -r 836a62f43af9 make/jprt.properties --- a/make/jprt.properties Tue Jun 18 14:23:29 2013 -0700 +++ b/make/jprt.properties Wed Jun 19 10:45:56 2013 +0200 @@ -134,14 +134,14 @@ jprt.build.targets.standard= \ ${jprt.my.solaris.sparc}-{product|fastdebug}, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}, \ + ${jprt.my.solaris.sparcv9}-{product|fastdebug|optimized}, \ ${jprt.my.solaris.i586}-{product|fastdebug}, \ ${jprt.my.solaris.x64}-{product|fastdebug}, \ ${jprt.my.linux.i586}-{product|fastdebug}, \ - ${jprt.my.linux.x64}-{product|fastdebug}, \ + ${jprt.my.linux.x64}-{product|fastdebug|optimized}, \ ${jprt.my.macosx.x64}-{product|fastdebug}, \ ${jprt.my.windows.i586}-{product|fastdebug}, \ - ${jprt.my.windows.x64}-{product|fastdebug}, \ + ${jprt.my.windows.x64}-{product|fastdebug|optimized}, \ ${jprt.my.linux.armvh}-{product|fastdebug} jprt.build.targets.open= \ diff -r e0fb8a213650 -r 836a62f43af9 make/linux/makefiles/adlc.make --- a/make/linux/makefiles/adlc.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/linux/makefiles/adlc.make Wed Jun 19 10:45:56 2013 +0200 @@ -68,7 +68,7 @@ # CFLAGS_WARN holds compiler options to suppress/enable warnings. # Compiler warnings are treated as errors -CFLAGS_WARN = -Werror +CFLAGS_WARN = $(WARNINGS_ARE_ERRORS) CFLAGS += $(CFLAGS_WARN) OBJECTNAMES = \ diff -r e0fb8a213650 -r 836a62f43af9 make/linux/makefiles/arm.make --- a/make/linux/makefiles/arm.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/linux/makefiles/arm.make Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,8 @@ Obj_Files += linux_arm.o -LIBS += $(EXT_LIBS_PATH)/sflt_glibc.a +ifneq ($(EXT_LIBS_PATH),) + LIBS += $(EXT_LIBS_PATH)/sflt_glibc.a +endif CFLAGS += -DVM_LITTLE_ENDIAN diff -r e0fb8a213650 -r 836a62f43af9 make/linux/makefiles/buildtree.make --- a/make/linux/makefiles/buildtree.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/linux/makefiles/buildtree.make Wed Jun 19 10:45:56 2013 +0200 @@ -47,9 +47,9 @@ # flags.make - with macro settings # vm.make - to support making "$(MAKE) -v vm.make" in makefiles # adlc.make - +# trace.make - generate tracing event and type definitions # jvmti.make - generate JVMTI bindings from the spec (JSR-163) # sa.make - generate SA jar file and natives -# env.[ck]sh - environment settings # # The makefiles are split this way so that "make foo" will run faster by not # having to read the dependency files for the vm. @@ -115,7 +115,8 @@ SIMPLE_DIRS = \ $(PLATFORM_DIR)/generated/dependencies \ $(PLATFORM_DIR)/generated/adfiles \ - $(PLATFORM_DIR)/generated/jvmtifiles + $(PLATFORM_DIR)/generated/jvmtifiles \ + $(PLATFORM_DIR)/generated/tracefiles TARGETS = debug fastdebug optimized product SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) @@ -123,8 +124,7 @@ # For dependencies and recursive makes. BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \ - env.sh env.csh jdkpath.sh +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) @@ -274,6 +274,8 @@ echo && \ echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \ echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \ + [ -n "$(INCLUDE_TRACE)" ] && \ + echo && echo "INCLUDE_TRACE = $(INCLUDE_TRACE)"; \ echo; \ [ -n "$(SPEC)" ] && \ echo "include $(SPEC)"; \ @@ -342,6 +344,16 @@ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ ) > $@ +trace.make: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ + ) > $@ + sa.make: $(BUILDTREE_MAKE) @echo Creating $@ ... $(QUIETLY) ( \ @@ -352,33 +364,6 @@ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ ) > $@ -env.sh: $(BUILDTREE_MAKE) - @echo Creating $@ ... - $(QUIETLY) ( \ - $(BUILDTREE_COMMENT); \ - { echo "JAVA_HOME=$(JDK_IMPORT_PATH)"; }; \ - { \ - echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:$(OUTPUTDIR)/shared/graal.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \ - } | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \ - echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \ - echo "export JAVA_HOME CLASSPATH HOTSPOT_BUILD_USER"; \ - ) > $@ - -env.csh: env.sh - @echo Creating $@ ... - $(QUIETLY) ( \ - $(BUILDTREE_COMMENT); \ - { echo "setenv JAVA_HOME \"$(JDK_IMPORT_PATH)\""; }; \ - sed -n 's/^\([A-Za-z_][A-Za-z0-9_]*\)=/setenv \1 /p' $?; \ - ) > $@ - -jdkpath.sh: $(BUILDTREE_MAKE) - @echo Creating $@ ... - $(QUIETLY) ( \ - $(BUILDTREE_COMMENT); \ - echo "JDK=${JAVA_HOME}"; \ - ) > $@ - FORCE: .PHONY: all FORCE diff -r e0fb8a213650 -r 836a62f43af9 make/linux/makefiles/gcc.make --- a/make/linux/makefiles/gcc.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/linux/makefiles/gcc.make Wed Jun 19 10:45:56 2013 +0200 @@ -36,8 +36,14 @@ HOSTCC = gcc STRIP = $(ALT_COMPILER_PATH)/strip else - CXX = g++ - CC = gcc + ifeq ($(USE_CLANG), true) + CXX = clang++ + CC = clang + else + CXX = g++ + CC = gcc + endif + HOSTCXX = $(CXX) HOSTCC = $(CC) STRIP = strip @@ -46,19 +52,79 @@ endif -# -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only -# prints the numbers (e.g. "2.95", "3.2.1") -CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1) -CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2) +ifeq ($(USE_CLANG), true) + CC_VER_MAJOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f1) + CC_VER_MINOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f2) +else + # -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only + # prints the numbers (e.g. "2.95", "3.2.1") + CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1) + CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2) +endif + + +ifeq ($(USE_CLANG), true) + # Clang has precompiled headers support by default, but the user can switch + # it off by using 'USE_PRECOMPILED_HEADER=0'. + ifdef LP64 + ifeq ($(USE_PRECOMPILED_HEADER),) + USE_PRECOMPILED_HEADER=1 + endif + else + # We don't support precompiled headers on 32-bit builds because there some files are + # compiled with -fPIC while others are compiled without (see 'NONPIC_OBJ_FILES' rules.make) + # Clang produces an error if the PCH file was compiled with other options than the actual compilation unit. + USE_PRECOMPILED_HEADER=0 + endif + + ifeq ($(USE_PRECOMPILED_HEADER),1) + + ifndef LP64 + $(error " Precompiled Headers only supported on 64-bit platforms!") + endif + + PRECOMPILED_HEADER_DIR=. + PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp + PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.pch -# check for precompiled headers support -ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0" -# Allow the user to turn off precompiled headers from the command line. -ifneq ($(USE_PRECOMPILED_HEADER),0) -PRECOMPILED_HEADER_DIR=. -PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp -PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch + PCH_FLAG = -include precompiled.hpp + PCH_FLAG/DEFAULT = $(PCH_FLAG) + PCH_FLAG/NO_PCH = -DNO_PCH + PCH_FLAG/BY_FILE = $(PCH_FLAG/$@)$(PCH_FLAG/DEFAULT$(PCH_FLAG/$@)) + + VM_PCH_FLAG/LIBJVM = $(PCH_FLAG/BY_FILE) + VM_PCH_FLAG/AOUT = + VM_PCH_FLAG = $(VM_PCH_FLAG/$(LINK_INTO)) + + # We only use precompiled headers for the JVM build + CFLAGS += $(VM_PCH_FLAG) + + # There are some files which don't like precompiled headers + # The following files are build with 'OPT_CFLAGS/NOOPT' (-O0) in the opt build. + # But Clang doesn't support a precompiled header which was compiled with -O3 + # to be used in a compilation unit which uses '-O0'. We could also prepare an + # extra '-O0' PCH file for the opt build and use it here, but it's probably + # not worth the effoert as long as only two files need this special handling. + PCH_FLAG/loopTransform.o = $(PCH_FLAG/NO_PCH) + PCH_FLAG/sharedRuntimeTrig.o = $(PCH_FLAG/NO_PCH) + PCH_FLAG/sharedRuntimeTrans.o = $(PCH_FLAG/NO_PCH) + + endif +else # ($(USE_CLANG), true) + # check for precompiled headers support + ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0" + # Allow the user to turn off precompiled headers from the command line. + ifneq ($(USE_PRECOMPILED_HEADER),0) + PRECOMPILED_HEADER_DIR=. + PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp + PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch + endif + endif endif + +# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp. +ifeq ($(USE_PRECOMPILED_HEADER),0) + CFLAGS += -DDONT_USE_PRECOMPILED_HEADER endif @@ -83,16 +149,30 @@ CFLAGS += -fno-rtti CFLAGS += -fno-exceptions CFLAGS += -D_REENTRANT -CFLAGS += -fcheck-new -# version 4 and above support fvisibility=hidden (matches jni_x86.h file) -# except 4.1.2 gives pointless warnings that can't be disabled (afaik) -ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0" -CFLAGS += -fvisibility=hidden +ifeq ($(USE_CLANG),) + CFLAGS += -fcheck-new + # version 4 and above support fvisibility=hidden (matches jni_x86.h file) + # except 4.1.2 gives pointless warnings that can't be disabled (afaik) + ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0" + CFLAGS += -fvisibility=hidden + endif +else + CFLAGS += -fvisibility=hidden +endif + +ifeq ($(USE_CLANG), true) + # Before Clang 3.1, we had to pass the stack alignment specification directly to llvm with the help of '-mllvm' + # Starting with version 3.1, Clang understands the '-mstack-alignment' (and rejects '-mllvm -stack-alignment') + ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 1 \) \))" "0" + STACK_ALIGNMENT_OPT = -mno-omit-leaf-frame-pointer -mstack-alignment=16 + else + STACK_ALIGNMENT_OPT = -mno-omit-leaf-frame-pointer -mllvm -stack-alignment=16 + endif endif ARCHFLAG = $(ARCHFLAG/$(BUILDARCH)) ARCHFLAG/i486 = -m32 -march=i586 -ARCHFLAG/amd64 = -m64 +ARCHFLAG/amd64 = -m64 $(STACK_ALIGNMENT_OPT) ARCHFLAG/ia64 = ARCHFLAG/sparc = -m32 -mcpu=v9 ARCHFLAG/sparcv9 = -m64 -mcpu=v9 @@ -126,12 +206,22 @@ # Compiler warnings are treated as errors WARNINGS_ARE_ERRORS = -Werror +ifeq ($(USE_CLANG), true) + # However we need to clean the code up before we can unrestrictedly enable this option with Clang + WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses + WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-constant-out-of-range-compare -Wno-tautological-compare + WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess + WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body +endif + WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit -# conversions which might affect the values. Only enable it in earlier versions. -ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0" -WARNING_FLAGS += -Wconversion +ifeq ($(USE_CLANG),) + # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit + # conversions which might affect the values. Only enable it in earlier versions. + ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0" + WARNING_FLAGS += -Wconversion + endif endif CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS) @@ -165,19 +255,24 @@ OPT_CFLAGS/NOOPT=-O0 -# 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation. -ifneq "$(shell expr \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) = 3 \) \))" "0" -OPT_CFLAGS/mulnode.o += -O0 +# Work around some compiler bugs. +ifeq ($(USE_CLANG), true) + ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1) + OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT) + endif +else + # 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation. + ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 3), 1) + OPT_CFLAGS/mulnode.o += $(OPT_CFLAGS/NOOPT) + endif endif # Flags for generating make dependency flags. -ifneq ("${CC_VER_MAJOR}", "2") -DEPFLAGS = -fpch-deps -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d) -endif - -# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp. -ifeq ($(USE_PRECOMPILED_HEADER),0) -CFLAGS += -DDONT_USE_PRECOMPILED_HEADER +DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d) +ifeq ($(USE_CLANG),) + ifneq ("${CC_VER_MAJOR}", "2") + DEPFLAGS += -fpch-deps + endif endif #------------------------------------------------------------------------ @@ -186,24 +281,33 @@ # statically link libstdc++.so, work with gcc but ignored by g++ STATIC_STDCXX = -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic -# statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x. -ifneq ("${CC_VER_MAJOR}", "2") -STATIC_LIBGCC += -static-libgcc -endif +ifeq ($(USE_CLANG),) + # statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x. + ifneq ("${CC_VER_MAJOR}", "2") + STATIC_LIBGCC += -static-libgcc + endif -ifeq ($(BUILDARCH), ia64) -LFLAGS += -Wl,-relax + ifeq ($(BUILDARCH), ia64) + LFLAGS += -Wl,-relax + endif endif # Enable linker optimization LFLAGS += -Xlinker -O1 -# If this is a --hash-style=gnu system, use --hash-style=both -# The gnu .hash section won't work on some Linux systems like SuSE 10. -_HAS_HASH_STYLE_GNU:=$(shell $(CC) -dumpspecs | grep -- '--hash-style=gnu') -ifneq ($(_HAS_HASH_STYLE_GNU),) +ifeq ($(USE_CLANG),) + # If this is a --hash-style=gnu system, use --hash-style=both + # The gnu .hash section won't work on some Linux systems like SuSE 10. + _HAS_HASH_STYLE_GNU:=$(shell $(CC) -dumpspecs | grep -- '--hash-style=gnu') + ifneq ($(_HAS_HASH_STYLE_GNU),) + LDFLAGS_HASH_STYLE = -Wl,--hash-style=both + endif +else + # Don't know how to find out the 'hash style' of a system as '-dumpspecs' + # doesn't work for Clang. So for now we'll alwys use --hash-style=both LDFLAGS_HASH_STYLE = -Wl,--hash-style=both endif + LFLAGS += $(LDFLAGS_HASH_STYLE) # Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file. @@ -221,6 +325,13 @@ #------------------------------------------------------------------------ # Debug flags +ifeq ($(USE_CLANG), true) + # Restrict the debug information created by Clang to avoid + # too big object files and speed the build up a little bit + # (see http://llvm.org/bugs/show_bug.cgi?id=7554) + CFLAGS += -flimit-debug-info +endif + # DEBUG_BINARIES uses full -g debug information for all configs ifeq ($(DEBUG_BINARIES), true) CFLAGS += -g @@ -237,7 +348,12 @@ DEBUG_CFLAGS/ppc = -g DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH)) ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),) - DEBUG_CFLAGS += -gstabs + ifeq ($(USE_CLANG), true) + # Clang doesn't understand -gstabs + OPT_CFLAGS += -g + else + OPT_CFLAGS += -gstabs + endif endif ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) @@ -247,7 +363,12 @@ FASTDEBUG_CFLAGS/ppc = -g FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH)) ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),) - FASTDEBUG_CFLAGS += -gstabs + ifeq ($(USE_CLANG), true) + # Clang doesn't understand -gstabs + OPT_CFLAGS += -g + else + OPT_CFLAGS += -gstabs + endif endif OPT_CFLAGS/ia64 = -g @@ -256,7 +377,12 @@ OPT_CFLAGS/ppc = -g OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH)) ifeq ($(OPT_CFLAGS/$(BUILDARCH)),) - OPT_CFLAGS += -gstabs + ifeq ($(USE_CLANG), true) + # Clang doesn't understand -gstabs + OPT_CFLAGS += -g + else + OPT_CFLAGS += -gstabs + endif endif endif endif diff -r e0fb8a213650 -r 836a62f43af9 make/linux/makefiles/jsig.make --- a/make/linux/makefiles/jsig.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/linux/makefiles/jsig.make Wed Jun 19 10:45:56 2013 +0200 @@ -54,7 +54,7 @@ $(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE) @echo Making signal interposition lib... $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \ - $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< -ldl + $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) $(EXTRA_CFLAGS) -o $@ $< -ldl ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO) $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@ diff -r e0fb8a213650 -r 836a62f43af9 make/linux/makefiles/launcher.make --- a/make/linux/makefiles/launcher.make Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,93 +0,0 @@ -# -# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Rules to build gamma launcher, used by vm.make - - -LAUNCHER_SCRIPT = hotspot -LAUNCHER = gamma - -LAUNCHERDIR := $(GAMMADIR)/src/os/posix/launcher -LAUNCHERDIR_SHARE := $(GAMMADIR)/src/share/tools/launcher -LAUNCHERFLAGS := $(ARCHFLAG) \ - -I$(LAUNCHERDIR) -I$(GAMMADIR)/src/share/vm/prims \ - -I$(LAUNCHERDIR_SHARE) \ - -DFULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \ - -DJDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \ - -DJDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \ - -DARCH=\"$(LIBARCH)\" \ - -DGAMMA \ - -DLAUNCHER_TYPE=\"gamma\" \ - -DLINK_INTO_$(LINK_INTO) \ - $(TARGET_DEFINES) - -ifeq ($(LINK_INTO),AOUT) - LAUNCHER.o = launcher.o $(JVM_OBJ_FILES) - LAUNCHER_MAPFILE = mapfile_reorder - LFLAGS_LAUNCHER$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LAUNCHER_MAPFILE)) - LFLAGS_LAUNCHER += $(SONAMEFLAG:SONAME=$(LIBJVM)) $(STATIC_LIBGCC) - LIBS_LAUNCHER += $(STATIC_STDCXX) $(LIBS) -else - LAUNCHER.o = launcher.o - LFLAGS_LAUNCHER += -L `pwd` - LIBS_LAUNCHER += -l$(JVM) $(LIBS) -endif - -LINK_LAUNCHER = $(LINK.CC) - -LINK_LAUNCHER/PRE_HOOK = $(LINK_LIB.CXX/PRE_HOOK) -LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CXX/POST_HOOK) - -LAUNCHER_OUT = launcher - -SUFFIXES += .d - -SOURCES := $(shell find $(LAUNCHERDIR) -name "*.c") -SOURCES_SHARE := $(shell find $(LAUNCHERDIR_SHARE) -name "*.c") - -OBJS := $(patsubst $(LAUNCHERDIR)/%.c,$(LAUNCHER_OUT)/%.o,$(SOURCES)) $(patsubst $(LAUNCHERDIR_SHARE)/%.c,$(LAUNCHER_OUT)/%.o,$(SOURCES_SHARE)) - -DEPFILES := $(patsubst %.o,%.d,$(OBJS)) --include $(DEPFILES) - -$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR_SHARE)/%.c - $(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); } - $(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS) - -$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR)/%.c - $(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); } - $(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS) - -$(LAUNCHER): $(OBJS) $(LIBJVM) $(LAUNCHER_MAPFILE) - $(QUIETLY) echo Linking launcher... - $(QUIETLY) $(LINK_LAUNCHER/PRE_HOOK) - $(QUIETLY) $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(sort $(OBJS)) $(LIBS_LAUNCHER) - $(QUIETLY) $(LINK_LAUNCHER/POST_HOOK) - -$(LAUNCHER): $(LAUNCHER_SCRIPT) - -$(LAUNCHER_SCRIPT): $(LAUNCHERDIR)/launcher.script - $(QUIETLY) sed -e 's/@@LIBARCH@@/$(LIBARCH)/g' $< > $@ - $(QUIETLY) chmod +x $@ - diff -r e0fb8a213650 -r 836a62f43af9 make/linux/makefiles/minimal1.make --- a/make/linux/makefiles/minimal1.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/linux/makefiles/minimal1.make Wed Jun 19 10:45:56 2013 +0200 @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # TYPE=MINIMAL1 @@ -32,6 +32,7 @@ INCLUDE_MANAGEMENT ?= false INCLUDE_ALL_GCS ?= false INCLUDE_NMT ?= false +INCLUDE_TRACE ?= false INCLUDE_CDS ?= false CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\" diff -r e0fb8a213650 -r 836a62f43af9 make/linux/makefiles/saproc.make --- a/make/linux/makefiles/saproc.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/linux/makefiles/saproc.make Wed Jun 19 10:45:56 2013 +0200 @@ -92,6 +92,7 @@ $(SASRCFILES) \ $(SA_LFLAGS) \ $(SA_DEBUG_CFLAGS) \ + $(EXTRA_CFLAGS) \ -o $@ \ -lthread_db ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) diff -r e0fb8a213650 -r 836a62f43af9 make/linux/makefiles/top.make --- a/make/linux/makefiles/top.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/linux/makefiles/top.make Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -80,7 +80,7 @@ @echo All done. # This is an explicit dependency for the sake of parallel makes. -vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff sa_stuff +vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) trace_stuff jvmti_stuff sa_stuff @# We need a null action here, so implicit rules don't get consulted. $(Cached_plat): $(Plat_File) @@ -94,6 +94,10 @@ jvmti_stuff: $(Cached_plat) $(adjust-mflags) @$(MAKE) -f jvmti.make $(MFLAGS-adjusted) +# generate trace files +trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags) + @$(MAKE) -f trace.make $(MFLAGS-adjusted) + # generate SA jar files and native header sa_stuff: @$(MAKE) -f sa.make $(MFLAGS-adjusted) diff -r e0fb8a213650 -r 836a62f43af9 make/linux/makefiles/trace.make --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/linux/makefiles/trace.make Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,120 @@ +# +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# + +# This makefile (trace.make) is included from the trace.make in the +# build directories. +# +# It knows how to build and run the tools to generate trace files. + +include $(GAMMADIR)/make/linux/makefiles/rules.make +include $(GAMMADIR)/make/altsrc.make + +# ######################################################################### + +HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \ + echo "true"; else echo "false";\ + fi) + +TOPDIR = $(shell echo `pwd`) +GENERATED = $(TOPDIR)/../generated +JvmtiOutDir = $(GENERATED)/jvmtifiles +TraceOutDir = $(GENERATED)/tracefiles + +TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace +TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace + +# set VPATH so make knows where to look for source files +Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir) +VPATH += $(Src_Dirs_V:%=%:) + +TraceGeneratedNames = \ + traceEventClasses.hpp \ + traceEventIds.hpp \ + traceTypes.hpp + +ifeq ($(HAS_ALT_SRC), true) +TraceGeneratedNames += \ + traceRequestables.hpp \ + traceEventControl.hpp + +ifneq ($(INCLUDE_TRACE), false) +TraceGeneratedNames += traceProducer.cpp +endif + +endif + +TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%) + +XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen + +XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \ + $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod +ifeq ($(HAS_ALT_SRC), true) + XML_DEPS += $(TraceAltSrcDir)/traceevents.xml +endif + +.PHONY: all clean cleanall + +# ######################################################################### + +all: $(TraceGeneratedFiles) + +GENERATE_CODE= \ + $(QUIETLY) echo Generating $@; \ + $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \ + test -f $@ + +$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) + $(GENERATE_CODE) + +ifeq ($(HAS_ALT_SRC), false) + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) + $(GENERATE_CODE) + +else + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) + $(GENERATE_CODE) + +endif + +# ######################################################################### + +clean cleanall: + rm $(TraceGeneratedFiles) + + diff -r e0fb8a213650 -r 836a62f43af9 make/linux/makefiles/vm.make --- a/make/linux/makefiles/vm.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/linux/makefiles/vm.make Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Rules to build JVM and related libraries, included from vm.make in the build @@ -52,7 +52,7 @@ # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm # The adfiles directory contains ad_.[ch]pp. # The jvmtifiles directory contains jvmti*.[ch]pp -Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles +Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles VPATH += $(Src_Dirs_V:%=%:) # set INCLUDES for C preprocessor. @@ -72,7 +72,7 @@ endif endif -# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined +# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined # in $(GAMMADIR)/make/defs.make ifeq ($(HOTSPOT_BUILD_VERSION),) BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\"" @@ -99,7 +99,7 @@ # This is VERY important! The version define must only be supplied to vm_version.o # If not, ccache will not re-use the cache at all, since the version string might contain -# a time and date. +# a time and date. CXXFLAGS/vm_version.o += ${JRE_VERSION} CXXFLAGS/BYFILE = $(CXXFLAGS/$@) @@ -108,12 +108,6 @@ CXXFLAGS += $(CXXFLAGS/BYFILE) -ifndef JAVASE_EMBEDDED -ifneq (${ARCH},arm) -CFLAGS += -DINCLUDE_TRACE -endif -endif - # CFLAGS_WARN holds compiler options to suppress/enable warnings. CFLAGS += $(CFLAGS_WARN/BYFILE) @@ -159,16 +153,14 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/gpu/ptx -ifndef JAVASE_EMBEDDED -ifneq (${ARCH},arm) -SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ +CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) +CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles + +ifneq ($(INCLUDE_TRACE), false) +CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ find $(HS_ALT_SRC)/share/vm/jfr -type d; \ fi) endif -endif - -CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) -CORE_PATHS+=$(GENERATED)/jvmtifiles COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 @@ -196,7 +188,7 @@ Src_Dirs/GRAAL := $(CORE_PATHS) $(GRAAL_PATHS) Src_Dirs := $(Src_Dirs/$(TYPE)) -COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\* +COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\* COMPILER1_SPECIFIC_FILES := c1_\* SHARK_SPECIFIC_FILES := shark ZERO_SPECIFIC_FILES := zero @@ -330,7 +322,7 @@ # With more recent Redhat releases (or the cutting edge version Fedora), if # SELinux is configured to be enabled, the runtime linker will fail to apply # the text relocation to libjvm.so considering that it is built as a non-PIC -# DSO. To workaround that, we run chcon to libjvm.so after it is built. See +# DSO. To workaround that, we run chcon to libjvm.so after it is built. See # details in bug 6538311. $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT) $(QUIETLY) { \ @@ -386,9 +378,6 @@ #---------------------------------------------------------------------- # Other files -# Gamma launcher -include $(MAKEFILES_DIR)/launcher.make - # Signal interposition library include $(MAKEFILES_DIR)/jsig.make diff -r e0fb8a213650 -r 836a62f43af9 make/sa.files --- a/make/sa.files Tue Jun 18 14:23:29 2013 -0700 +++ b/make/sa.files Wed Jun 19 10:45:56 2013 +0200 @@ -48,8 +48,6 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/bsd/x86/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/x86/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/amd64/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dummy/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/amd64/*.java \ @@ -70,6 +68,8 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/amd64/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/x86/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windows/x86/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windows/amd64/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/x86/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/g1/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/parallelScavenge/*.java \ diff -r e0fb8a213650 -r 836a62f43af9 make/solaris/makefiles/buildtree.make --- a/make/solaris/makefiles/buildtree.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/solaris/makefiles/buildtree.make Wed Jun 19 10:45:56 2013 +0200 @@ -47,9 +47,9 @@ # flags.make - with macro settings # vm.make - to support making "$(MAKE) -v vm.make" in makefiles # adlc.make - +# trace.make - generate tracing event and type definitions # jvmti.make - generate JVMTI bindings from the spec (JSR-163) # sa.make - generate SA jar file and natives -# env.[ck]sh - environment settings # # The makefiles are split this way so that "make foo" will run faster by not # having to read the dependency files for the vm. @@ -108,7 +108,8 @@ SIMPLE_DIRS = \ $(PLATFORM_DIR)/generated/dependencies \ $(PLATFORM_DIR)/generated/adfiles \ - $(PLATFORM_DIR)/generated/jvmtifiles + $(PLATFORM_DIR)/generated/jvmtifiles \ + $(PLATFORM_DIR)/generated/tracefiles TARGETS = debug fastdebug optimized product SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) @@ -116,8 +117,7 @@ # For dependencies and recursive makes. BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \ - env.sh env.csh jdkpath.sh +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) @@ -332,6 +332,16 @@ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ ) > $@ +trace.make: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ + ) > $@ + sa.make: $(BUILDTREE_MAKE) @echo Creating $@ ... $(QUIETLY) ( \ @@ -342,33 +352,6 @@ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ ) > $@ -env.sh: $(BUILDTREE_MAKE) - @echo Creating $@ ... - $(QUIETLY) ( \ - $(BUILDTREE_COMMENT); \ - { echo "JAVA_HOME=$(JDK_IMPORT_PATH)"; }; \ - { \ - echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:$(OUTPUTDIR)/shared/graal.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \ - } | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \ - echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \ - echo "export JAVA_HOME LD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \ - ) > $@ - -env.csh: env.sh - @echo Creating $@ ... - $(QUIETLY) ( \ - $(BUILDTREE_COMMENT); \ - { echo "setenv JAVA_HOME \"$(JDK_IMPORT_PATH)\""; }; \ - sed -n 's/^\([A-Za-z_][A-Za-z0-9_]*\)=/setenv \1 /p' $?; \ - ) > $@ - -jdkpath.sh: $(BUILDTREE_MAKE) - @echo Creating $@ ... - $(QUIETLY) ( \ - $(BUILDTREE_COMMENT); \ - echo "JDK=${JAVA_HOME}"; \ - ) > $@ - FORCE: .PHONY: all FORCE diff -r e0fb8a213650 -r 836a62f43af9 make/solaris/makefiles/launcher.make --- a/make/solaris/makefiles/launcher.make Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,108 +0,0 @@ -# -# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Rules to build gamma launcher, used by vm.make - -LAUNCHER_SCRIPT = hotspot -LAUNCHER = gamma - -LAUNCHERDIR = $(GAMMADIR)/src/os/posix/launcher -LAUNCHERDIR_SHARE := $(GAMMADIR)/src/share/tools/launcher -LAUNCHERFLAGS = $(ARCHFLAG) \ - -I$(LAUNCHERDIR) -I$(GAMMADIR)/src/share/vm/prims \ - -I$(LAUNCHERDIR_SHARE) \ - -DFULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \ - -DJDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \ - -DJDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \ - -DARCH=\"$(LIBARCH)\" \ - -DGAMMA \ - -DLAUNCHER_TYPE=\"gamma\" \ - -DLINK_INTO_$(LINK_INTO) \ - $(TARGET_DEFINES) - -ifeq ($(LINK_INTO),AOUT) - LAUNCHER.o = launcher.o $(JVM_OBJ_FILES) - LAUNCHER_MAPFILE = mapfile_extended - LFLAGS_LAUNCHER$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LAUNCHER_MAPFILE)) - LIBS_LAUNCHER += $(LIBS) -else - LAUNCHER.o = launcher.o - LFLAGS_LAUNCHER += -L `pwd` - LIBS_LAUNCHER += -l$(JVM) $(LIBS) -endif - -LINK_LAUNCHER = $(LINK.CXX) - -LINK_LAUNCHER/PRE_HOOK = $(LINK_LIB.CXX/PRE_HOOK) -LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CXX/POST_HOOK) - -ifeq ("${Platform_compiler}", "sparcWorks") -# Enable the following LAUNCHERFLAGS addition if you need to compare the -# built ELF objects. -# -# The -g option makes static data global and the "-W0,-noglobal" -# option tells the compiler to not globalize static data using a unique -# globalization prefix. Instead force the use of a static globalization -# prefix based on the source filepath so the objects from two identical -# compilations are the same. -# -# Note: The blog says to use "-W0,-xglobalstatic", but that doesn't -# seem to work. I got "-W0,-noglobal" from Kelly and that works. -#LAUNCHERFLAGS += -W0,-noglobal -endif # Platform_compiler == sparcWorks - -LAUNCHER_OUT = launcher - -SUFFIXES += .d - -SOURCES := $(shell find $(LAUNCHERDIR) -name "*.c") -SOURCES_SHARE := $(shell find $(LAUNCHERDIR_SHARE) -name "*.c") - -OBJS := $(patsubst $(LAUNCHERDIR)/%.c,$(LAUNCHER_OUT)/%.o,$(SOURCES)) $(patsubst $(LAUNCHERDIR_SHARE)/%.c,$(LAUNCHER_OUT)/%.o,$(SOURCES_SHARE)) - -DEPFILES := $(patsubst %.o,%.d,$(OBJS)) --include $(DEPFILES) - -$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR_SHARE)/%.c - $(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); } - $(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS) - -$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR)/%.c - $(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); } - $(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS) - -$(LAUNCHER): $(OBJS) $(LIBJVM) $(LAUNCHER_MAPFILE) -ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),) - $(QUIETLY) echo Linking launcher... - $(QUIETLY) $(LINK_LAUNCHER/PRE_HOOK) - $(QUIETLY) $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(sort $(OBJS)) $(LIBS_LAUNCHER) - $(QUIETLY) $(LINK_LAUNCHER/POST_HOOK) -endif # filter -sbfast -xsbfast - -$(LAUNCHER): $(LAUNCHER_SCRIPT) - -$(LAUNCHER_SCRIPT): $(LAUNCHERDIR)/launcher.script - $(QUIETLY) sed -e 's/@@LIBARCH@@/$(LIBARCH)/g' $< > $@ - $(QUIETLY) chmod +x $@ - diff -r e0fb8a213650 -r 836a62f43af9 make/solaris/makefiles/top.make --- a/make/solaris/makefiles/top.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/solaris/makefiles/top.make Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -73,7 +73,7 @@ @echo All done. # This is an explicit dependency for the sake of parallel makes. -vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff sa_stuff +vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff @# We need a null action here, so implicit rules don't get consulted. $(Cached_plat): $(Plat_File) @@ -87,6 +87,10 @@ jvmti_stuff: $(Cached_plat) $(adjust-mflags) @$(MAKE) -f jvmti.make $(MFLAGS-adjusted) +# generate trace files +trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags) + @$(MAKE) -f trace.make $(MFLAGS-adjusted) + # generate SA jar files and native header sa_stuff: @$(MAKE) -f sa.make $(MFLAGS-adjusted) @@ -127,5 +131,5 @@ rm -fr $(GENERATED) .PHONY: default vm_build_preliminaries -.PHONY: lists ad_stuff jvmti_stuff sa_stuff the_vm clean realclean +.PHONY: lists ad_stuff jvmti_stuff trace_stuff sa_stuff the_vm clean realclean .PHONY: checks check_os_version install diff -r e0fb8a213650 -r 836a62f43af9 make/solaris/makefiles/trace.make --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/solaris/makefiles/trace.make Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,116 @@ +# +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# + +# This makefile (trace.make) is included from the trace.make in the +# build directories. +# +# It knows how to build and run the tools to generate trace files. + +include $(GAMMADIR)/make/solaris/makefiles/rules.make +include $(GAMMADIR)/make/altsrc.make + +# ######################################################################### + +HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \ + echo "true"; else echo "false";\ + fi) + +TOPDIR = $(shell echo `pwd`) +GENERATED = $(TOPDIR)/../generated +JvmtiOutDir = $(GENERATED)/jvmtifiles +TraceOutDir = $(GENERATED)/tracefiles + +TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace +TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace + +# set VPATH so make knows where to look for source files +Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir) +VPATH += $(Src_Dirs_V:%=%:) + +TraceGeneratedNames = \ + traceEventClasses.hpp \ + traceEventIds.hpp \ + traceTypes.hpp + +ifeq ($(HAS_ALT_SRC), true) +TraceGeneratedNames += \ + traceRequestables.hpp \ + traceEventControl.hpp \ + traceProducer.cpp +endif + +TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%) + +XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen + +XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \ + $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod +ifeq ($(HAS_ALT_SRC), true) + XML_DEPS += $(TraceAltSrcDir)/traceevents.xml +endif + +.PHONY: all clean cleanall + +# ######################################################################### + +all: $(TraceGeneratedFiles) + +GENERATE_CODE= \ + $(QUIETLY) echo Generating $@; \ + $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \ + test -f $@ + +$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) + $(GENERATE_CODE) + +ifeq ($(HAS_ALT_SRC), false) + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) + $(GENERATE_CODE) + +else + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) + $(GENERATE_CODE) + +endif + +# ######################################################################### + +clean cleanall: + rm $(TraceGeneratedFiles) + + diff -r e0fb8a213650 -r 836a62f43af9 make/solaris/makefiles/vm.make --- a/make/solaris/makefiles/vm.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/solaris/makefiles/vm.make Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Rules to build JVM and related libraries, included from vm.make in the build @@ -48,7 +48,7 @@ # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm # The adfiles directory contains ad_.[ch]pp. # The jvmtifiles directory contains jvmti*.[ch]pp -Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles +Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles VPATH += $(Src_Dirs_V:%=%:) # set INCLUDES for C preprocessor @@ -87,7 +87,7 @@ # This is VERY important! The version define must only be supplied to vm_version.o # If not, ccache will not re-use the cache at all, since the version string might contain -# a time and date. +# a time and date. CXXFLAGS/vm_version.o += ${JRE_VERSION} CXXFLAGS/BYFILE = $(CXXFLAGS/$@) @@ -103,7 +103,7 @@ CFLAGS += $(CFLAGS/NOEX) # Extra flags from gnumake's invocation or environment -CFLAGS += $(EXTRA_CFLAGS) -DINCLUDE_TRACE +CFLAGS += $(EXTRA_CFLAGS) # Math Library (libm.so), do not use -lm. # There might be two versions of libm.so on the build system: @@ -137,9 +137,7 @@ LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc -ldemangle endif # sparcWorks -ifeq ("${Platform_arch}", "sparc") LIBS += -lkstat -endif # By default, link the *.o into the library, not the executable. LINK_INTO$(LINK_INTO) = LIBJVM @@ -177,12 +175,14 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm -SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ +CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) +CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles + +ifneq ($(INCLUDE_TRACE), false) +CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ find $(HS_ALT_SRC)/share/vm/jfr -type d; \ fi) - -CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) -CORE_PATHS+=$(GENERATED)/jvmtifiles +endif COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 @@ -208,7 +208,7 @@ Src_Dirs/GRAAL := $(CORE_PATHS) $(GRAAL_PATHS) Src_Dirs := $(Src_Dirs/$(TYPE)) -COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\* +COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\* COMPILER1_SPECIFIC_FILES := c1_\* SHARK_SPECIFIC_FILES := shark ZERO_SPECIFIC_FILES := zero @@ -295,7 +295,7 @@ LINK_VM = $(LINK_LIB.CXX) endif # making the library: -$(LIBJVM): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(LIBJVM.o) $(LIBJVM_MAPFILE) +$(LIBJVM): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(LIBJVM.o) $(LIBJVM_MAPFILE) ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),) @echo Linking vm... $(QUIETLY) $(LINK_LIB.CXX/PRE_HOOK) @@ -346,9 +346,6 @@ #---------------------------------------------------------------------- # Other files -# Gamma launcher -include $(MAKEFILES_DIR)/launcher.make - # Signal interposition library include $(MAKEFILES_DIR)/jsig.make diff -r e0fb8a213650 -r 836a62f43af9 make/windows/build.make --- a/make/windows/build.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/windows/build.make Wed Jun 19 10:45:56 2013 +0200 @@ -198,6 +198,12 @@ # End VERSIONINFO parameters +# if hotspot-only build and/or OPENJDK isn't passed down, need to set OPENJDK +!ifndef OPENJDK +!if !exists($(WorkSpace)\src\closed) +OPENJDK=true +!endif +!endif # We don't support SA on ia64, and we can't # build it if we are using a version of Vis Studio @@ -275,6 +281,7 @@ @ echo HS_COMPANY=$(COMPANY_NAME) >> $@ @ echo HS_FILEDESC=$(HS_FILEDESC) >> $@ @ echo HOTSPOT_VM_DISTRO=$(HOTSPOT_VM_DISTRO) >> $@ + @ if "$(OPENJDK)" NEQ "" echo OPENJDK=$(OPENJDK) >> $@ @ echo HS_COPYRIGHT=$(HOTSPOT_VM_COPYRIGHT) >> $@ @ echo HS_NAME=$(PRODUCT_NAME) $(JDK_MKTG_VERSION) >> $@ @ echo HS_BUILD_VER=$(HS_BUILD_VER) >> $@ diff -r e0fb8a213650 -r 836a62f43af9 make/windows/create_obj_files.sh --- a/make/windows/create_obj_files.sh Tue Jun 18 14:23:29 2013 -0700 +++ b/make/windows/create_obj_files.sh Wed Jun 19 10:45:56 2013 +0200 @@ -71,13 +71,11 @@ BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/${sd}" done -BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles" +BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles ${GENERATED}/tracefiles" if [ -d "${ALTSRC}/share/vm/jfr" ]; then - BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent" - BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent/isolated_deps/util" - BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/jvm" - BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr" + BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr" + BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/buffers" fi BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/prims/wbtestmethods" @@ -114,7 +112,7 @@ "shark") Src_Dirs="${CORE_PATHS}" ;; esac -COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp chaitin* c2_* runtime_*" +COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp c2_* runtime_*" COMPILER1_SPECIFIC_FILES="c1_*" SHARK_SPECIFIC_FILES="shark" ZERO_SPECIFIC_FILES="zero" diff -r e0fb8a213650 -r 836a62f43af9 make/windows/makefiles/compile.make --- a/make/windows/makefiles/compile.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/windows/makefiles/compile.make Wed Jun 19 10:45:56 2013 +0200 @@ -52,7 +52,7 @@ # improving the quality of crash log stack traces involving jvm.dll. # These are always used in all compiles -CXX_FLAGS=/nologo /W3 /WX +CXX_FLAGS=$(EXTRA_CFLAGS) /nologo /W3 /WX # Let's add debug information when Full Debug Symbols is enabled !if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1" diff -r e0fb8a213650 -r 836a62f43af9 make/windows/makefiles/debug.make --- a/make/windows/makefiles/debug.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/windows/makefiles/debug.make Wed Jun 19 10:45:56 2013 +0200 @@ -33,7 +33,7 @@ BUILD_PCH_FILE=_build_pch_file.obj !endif -default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA +default:: $(BUILD_PCH_FILE) $(AOUT) checkAndBuildSA !include ../local.make !include compile.make @@ -71,4 +71,3 @@ !include $(WorkSpace)/make/windows/makefiles/shared.make !include $(WorkSpace)/make/windows/makefiles/sa.make -!include $(WorkSpace)/make/windows/makefiles/launcher.make diff -r e0fb8a213650 -r 836a62f43af9 make/windows/makefiles/defs.make --- a/make/windows/makefiles/defs.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/windows/makefiles/defs.make Wed Jun 19 10:45:56 2013 +0200 @@ -193,7 +193,7 @@ MAKE_ARGS += JDK_BUILD_NUMBER=$(COOKED_BUILD_NUMBER) endif -NMAKE= MAKEFLAGS= MFLAGS= nmake -NOLOGO +NMAKE= MAKEFLAGS= MFLAGS= EXTRA_CFLAGS="$(EXTRA_CFLAGS)" nmake -NOLOGO ifndef SYSTEM_UNAME SYSTEM_UNAME := $(shell uname) export SYSTEM_UNAME diff -r e0fb8a213650 -r 836a62f43af9 make/windows/makefiles/fastdebug.make --- a/make/windows/makefiles/fastdebug.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/windows/makefiles/fastdebug.make Wed Jun 19 10:45:56 2013 +0200 @@ -33,7 +33,7 @@ BUILD_PCH_FILE=_build_pch_file.obj !endif -default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA +default:: $(BUILD_PCH_FILE) $(AOUT) checkAndBuildSA !include ../local.make !include compile.make @@ -70,4 +70,3 @@ !include $(WorkSpace)/make/windows/makefiles/shared.make !include $(WorkSpace)/make/windows/makefiles/sa.make -!include $(WorkSpace)/make/windows/makefiles/launcher.make diff -r e0fb8a213650 -r 836a62f43af9 make/windows/makefiles/generated.make --- a/make/windows/makefiles/generated.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/windows/makefiles/generated.make Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -30,15 +30,19 @@ JvmtiOutDir=jvmtifiles !include $(WorkSpace)/make/windows/makefiles/jvmti.make +# Pick up rules for building trace +TraceOutDir=tracefiles +!include $(WorkSpace)/make/windows/makefiles/trace.make + # Pick up rules for building SA !include $(WorkSpace)/make/windows/makefiles/sa.make AdlcOutDir=adfiles !if ("$(Variant)" == "compiler2") || ("$(Variant)" == "tiered") -default:: $(AdlcOutDir)/ad_$(Platform_arch_model).cpp $(AdlcOutDir)/dfa_$(Platform_arch_model).cpp $(JvmtiGeneratedFiles) buildobjfiles +default:: $(AdlcOutDir)/ad_$(Platform_arch_model).cpp $(AdlcOutDir)/dfa_$(Platform_arch_model).cpp $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) buildobjfiles !else -default:: $(JvmtiGeneratedFiles) buildobjfiles +default:: $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) buildobjfiles !endif buildobjfiles: diff -r e0fb8a213650 -r 836a62f43af9 make/windows/makefiles/launcher.make --- a/make/windows/makefiles/launcher.make Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,73 +0,0 @@ -# -# Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - - -LAUNCHER_FLAGS=$(CXX_FLAGS) $(ARCHFLAG) \ - /D FULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \ - /D JDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \ - /D JDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \ - /D GAMMA \ - /D LAUNCHER_TYPE=\"gamma\" \ - /D _CRT_SECURE_NO_WARNINGS \ - /D _CRT_SECURE_NO_DEPRECATE \ - /D LINK_INTO_LIBJVM \ - /I $(WorkSpace)\src\os\windows\launcher \ - /I $(WorkSpace)\src\share\tools\launcher \ - /I $(WorkSpace)\src\share\vm\prims \ - /I $(WorkSpace)\src\share\vm \ - /I $(WorkSpace)\src\cpu\$(Platform_arch)\vm \ - /I $(WorkSpace)\src\os\windows\vm - -LD_FLAGS=/manifest $(HS_INTERNAL_NAME).lib kernel32.lib user32.lib /nologo /machine:$(MACHINE) /map /debug /subsystem:console - -!if "$(COMPILER_NAME)" == "VS2005" -# This VS2005 compiler has /GS as a default and requires bufferoverflowU.lib -# on the link command line, otherwise we get missing __security_check_cookie -# externals at link time. Even with /GS-, you need bufferoverflowU.lib. -BUFFEROVERFLOWLIB = bufferoverflowU.lib -LD_FLAGS = $(LD_FLAGS) $(BUFFEROVERFLOWLIB) -!endif - -!if "$(COMPILER_NAME)" == "VS2010" && "$(BUILDARCH)" == "i486" -LD_FLAGS = /SAFESEH $(LD_FLAGS) -!endif - -LAUNCHERDIR = $(WorkSpace)/src/os/windows/launcher -LAUNCHERDIR_SHARE = $(WorkSpace)/src/share/tools/launcher - -OUTDIR = launcher - -{$(LAUNCHERDIR)}.c{$(OUTDIR)}.obj: - -mkdir $(OUTDIR) 2>NUL >NUL - $(CXX) $(LAUNCHER_FLAGS) /c /Fo$@ $< - -{$(LAUNCHERDIR_SHARE)}.c{$(OUTDIR)}.obj: - -mkdir $(OUTDIR) 2>NUL >NUL - $(CXX) $(LAUNCHER_FLAGS) /c /Fo$@ $< - -$(OUTDIR)\*.obj: $(LAUNCHERDIR)\*.c $(LAUNCHERDIR)\*.h $(LAUNCHERDIR_SHARE)\*.c $(LAUNCHERDIR_SHARE)\*.h - -launcher: $(OUTDIR)\java.obj $(OUTDIR)\java_md.obj $(OUTDIR)\jli_util.obj - echo $(JAVA_HOME) > jdkpath.txt - $(LD) $(LD_FLAGS) /out:hotspot.exe $** diff -r e0fb8a213650 -r 836a62f43af9 make/windows/makefiles/product.make --- a/make/windows/makefiles/product.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/windows/makefiles/product.make Wed Jun 19 10:45:56 2013 +0200 @@ -32,7 +32,7 @@ BUILD_PCH_FILE=_build_pch_file.obj !endif -default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA +default:: $(BUILD_PCH_FILE) $(AOUT) checkAndBuildSA !include ../local.make !include compile.make @@ -73,4 +73,3 @@ !include $(WorkSpace)/make/windows/makefiles/shared.make !include $(WorkSpace)/make/windows/makefiles/sa.make -!include $(WorkSpace)/make/windows/makefiles/launcher.make diff -r e0fb8a213650 -r 836a62f43af9 make/windows/makefiles/projectcreator.make --- a/make/windows/makefiles/projectcreator.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/windows/makefiles/projectcreator.make Wed Jun 19 10:45:56 2013 +0200 @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # !include $(WorkSpace)/make/windows/makefiles/rules.make @@ -60,7 +60,6 @@ -relativeSrcInclude src \ -absoluteSrcInclude $(HOTSPOTBUILDSPACE) \ -ignorePath $(HOTSPOTBUILDSPACE) \ - -ignorePath launcher \ -ignorePath share\vm\adlc \ -ignorePath share\vm\shark \ -ignorePath share\tools \ @@ -74,7 +73,7 @@ -ignorePath ppc \ -ignorePath zero \ -hidePath .hg - + # This is referenced externally by both the IDE and batch builds ProjectCreatorOptions= @@ -91,7 +90,7 @@ -disablePch bytecodeInterpreter.cpp \ -disablePch bytecodeInterpreterWithChecks.cpp \ -disablePch getThread_windows_$(Platform_arch).cpp \ - -disablePch_compiler2 opcodes.cpp + -disablePch_compiler2 opcodes.cpp # Common options for the IDE builds for core, c1, and c2 ProjectCreatorIDEOptions=\ @@ -105,8 +104,8 @@ -jdkTargetRoot $(HOTSPOTJDKDIST) \ -define ALIGN_STACK_FRAMES \ -define VM_LITTLE_ENDIAN \ - -prelink "" "Generating vm.def..." "cd %o set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) set JAVA_HOME=$(HOTSPOTJDKDIST) $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LD_VER)" \ - -postbuild "" "Building hotspot.exe..." "cd %o set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) nmake -f $(HOTSPOTWORKSPACE)\make\windows\projectfiles\common\Makefile LOCAL_MAKE=$(HOTSPOTBUILDSPACE)\%f\local.make JAVA_HOME=$(HOTSPOTJDKDIST) launcher" \ + -prelink "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) set JAVA_HOME=$(HOTSPOTJDKDIST) $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LD_VER)" \ + -postbuild "" "Building hotspot.exe..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) nmake -f $(HOTSPOTWORKSPACE)\make\windows\projectfiles\common\Makefile LOCAL_MAKE=$(HOTSPOTBUILDSPACE)\%f\local.make JAVA_HOME=$(HOTSPOTJDKDIST) launcher" \ -ignoreFile jsig.c \ -ignoreFile jvmtiEnvRecommended.cpp \ -ignoreFile jvmtiEnvStub.cpp \ @@ -118,7 +117,7 @@ -define TARGET_OS_ARCH_windows_x86 \ -define TARGET_OS_FAMILY_windows \ -define TARGET_COMPILER_visCPP \ - -define INCLUDE_TRACE \ + -define INCLUDE_TRACE=1 \ $(ProjectCreatorIncludesPRIVATE) # Add in build-specific options @@ -225,4 +224,12 @@ -additionalFile jvmtiEnter.cpp \ -additionalFile jvmtiEnterTrace.cpp \ -additionalFile jvmti.h \ - -additionalFile bytecodeInterpreterWithChecks.cpp + -additionalFile bytecodeInterpreterWithChecks.cpp \ + -additionalFile traceEventClasses.hpp \ + -additionalFile traceEventIds.hpp \ +!if "$(OPENJDK)" != "true" + -additionalFile traceRequestables.hpp \ + -additionalFile traceEventControl.hpp \ + -additionalFile traceProducer.cpp \ +!endif + -additionalFile traceTypes.hpp diff -r e0fb8a213650 -r 836a62f43af9 make/windows/makefiles/trace.make --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/windows/makefiles/trace.make Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,121 @@ +# +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# + +# This makefile (trace.make) is included from the trace.make in the +# build directories. +# +# It knows how to build and run the tools to generate trace files. + +!include $(WorkSpace)/make/windows/makefiles/rules.make + +# ######################################################################### + + +TraceAltSrcDir = $(WorkSpace)/src/closed/share/vm/trace +TraceSrcDir = $(WorkSpace)/src/share/vm/trace + +TraceGeneratedNames = \ + traceEventClasses.hpp \ + traceEventIds.hpp \ + traceTypes.hpp + + +!if "$(OPENJDK)" != "true" +TraceGeneratedNames = $(TraceGeneratedNames) \ + traceRequestables.hpp \ + traceEventControl.hpp \ + traceProducer.cpp +!endif + + +#Note: TraceGeneratedFiles must be kept in sync with TraceGeneratedNames by hand. +#Should be equivalent to "TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)" +TraceGeneratedFiles = \ + $(TraceOutDir)/traceEventClasses.hpp \ + $(TraceOutDir)/traceEventIds.hpp \ + $(TraceOutDir)/traceTypes.hpp + +!if "$(OPENJDK)" != "true" +TraceGeneratedFiles = $(TraceGeneratedFiles) \ + $(TraceOutDir)/traceRequestables.hpp \ + $(TraceOutDir)/traceEventControl.hpp \ + $(TraceOutDir)/traceProducer.cpp +!endif + +XSLT = $(QUIETLY) $(REMOTE) $(RUN_JAVA) -classpath $(JvmtiOutDir) jvmtiGen + +XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \ + $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod + +!if "$(OPENJDK)" != "true" +XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceevents.xml +!endif + +.PHONY: all clean cleanall + +# ######################################################################### + +default:: + @if not exist $(TraceOutDir) mkdir $(TraceOutDir) + +$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) + @echo Generating $@ + @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventIds.xsl -OUT $(TraceOutDir)/traceEventIds.hpp + +$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) + @echo Generating $@ + @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp + +!if "$(OPENJDK)" == "true" + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) + @echo Generating $@ + @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp + +!else + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) + @echo Generating $@ + @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp + +$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS) + @echo Generating $@ + @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceProducer.xsl -OUT $(TraceOutDir)/traceProducer.cpp + +$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) + @echo Generating $@ + @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp + +$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) + @echo Generating $@ + @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp + +!endif + +# ######################################################################### + +cleanall : + rm $(TraceGeneratedFiles) + + diff -r e0fb8a213650 -r 836a62f43af9 make/windows/makefiles/vm.make --- a/make/windows/makefiles/vm.make Tue Jun 18 14:23:29 2013 -0700 +++ b/make/windows/makefiles/vm.make Wed Jun 19 10:45:56 2013 +0200 @@ -70,10 +70,6 @@ CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\"" CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\"" -!ifndef JAVASE_EMBEDDED -CXX_FLAGS=$(CXX_FLAGS) /D "INCLUDE_TRACE" -!endif - CXX_FLAGS=$(CXX_FLAGS) $(CXX_INCLUDE_DIRS) # Define that so jni.h is on correct side @@ -148,6 +144,7 @@ VM_PATH=../generated VM_PATH=$(VM_PATH);../generated/adfiles VM_PATH=$(VM_PATH);../generated/jvmtifiles +VM_PATH=$(VM_PATH);../generated/tracefiles VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/c1 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/graal VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/compiler @@ -177,10 +174,8 @@ VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/opto !if exists($(ALTSRC)\share\vm\jfr) -VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent -VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent/isolated_deps/util -VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/jvm VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr +VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/buffers !endif VM_PATH={$(VM_PATH)} @@ -389,16 +384,13 @@ {..\generated\jvmtifiles}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< +{..\generated\tracefiles}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + {$(ALTSRC)\share\vm\jfr}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< -{$(ALTSRC)\share\vm\jfr\agent}.cpp.obj:: - $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< - -{$(ALTSRC)\share\vm\jfr\agent\isolated_deps\util}.cpp.obj:: - $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< - -{$(ALTSRC)\share\vm\jfr\jvm}.cpp.obj:: +{$(ALTSRC)\share\vm\jfr\buffers}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< default:: diff -r e0fb8a213650 -r 836a62f43af9 make/windows/projectfiles/common/Makefile --- a/make/windows/projectfiles/common/Makefile Tue Jun 18 14:23:29 2013 -0700 +++ b/make/windows/projectfiles/common/Makefile Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -45,6 +45,12 @@ !endif !endif +# if hotspot-only build and/or OPENJDK isn't passed down, need to set OPENJDK +!ifndef OPENJDK +!if !exists($(WorkSpace)\src\closed) +OPENJDK=true +!endif +!endif !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/projectcreator.make @@ -54,6 +60,10 @@ JvmtiOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\jvmtifiles !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/jvmti.make +# Pick up rules for building trace +TraceOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\tracefiles +!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/trace.make + !if "$(Variant)" == "compiler2" # Pick up rules for building adlc !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/adlc.make @@ -65,9 +75,8 @@ !endif HS_INTERNAL_NAME=jvm -!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/launcher.make -default:: $(AdditionalTargets) $(JvmtiGeneratedFiles) +default:: $(AdditionalTargets) $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) !include $(HOTSPOTWORKSPACE)/make/hotspot_version diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/sparc/vm/compiledIC_sparc.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/cpu/sparc/vm/compiledIC_sparc.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,193 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "code/compiledIC.hpp" +#include "code/icBuffer.hpp" +#include "code/nmethod.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/safepoint.hpp" +#ifdef COMPILER2 +#include "opto/matcher.hpp" +#endif + +// Release the CompiledICHolder* associated with this call site is there is one. +void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + if (is_icholder_entry(call->destination())) { + NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); + InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); + } +} + +bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + return is_icholder_entry(call->destination()); +} + +//----------------------------------------------------------------------------- +// High-level access to an inline cache. Guaranteed to be MT-safe. + +CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) + : _ic_call(call) +{ + address ic_call = call->instruction_address(); + + assert(ic_call != NULL, "ic_call address must be set"); + assert(nm != NULL, "must pass nmethod"); + assert(nm->contains(ic_call), "must be in nmethod"); + + // Search for the ic_call at the given address. + RelocIterator iter(nm, ic_call, ic_call+1); + bool ret = iter.next(); + assert(ret == true, "relocInfo must exist at this address"); + assert(iter.addr() == ic_call, "must find ic_call"); + if (iter.type() == relocInfo::virtual_call_type) { + virtual_call_Relocation* r = iter.virtual_call_reloc(); + _is_optimized = false; + _value = nativeMovConstReg_at(r->cached_value()); + } else { + assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); + _is_optimized = true; + _value = NULL; + } +} + +// ---------------------------------------------------------------------------- + +#define __ _masm. +void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { +#ifdef COMPILER2 + // Stub is fixed up when the corresponding call is converted from calling + // compiled code to calling interpreted code. + // set (empty), G5 + // jmp -1 + + address mark = cbuf.insts_mark(); // Get mark within main instrs section. + + MacroAssembler _masm(&cbuf); + + address base = + __ start_a_stub(to_interp_stub_size()*2); + if (base == NULL) return; // CodeBuffer::expand failed. + + // Static stub relocation stores the instruction address of the call. + __ relocate(static_stub_Relocation::spec(mark)); + + __ set_metadata(NULL, as_Register(Matcher::inline_cache_reg_encode())); + + __ set_inst_mark(); + AddressLiteral addrlit(-1); + __ JUMP(addrlit, G3, 0); + + __ delayed()->nop(); + + // Update current stubs pointer and restore code_end. + __ end_a_stub(); +#else + ShouldNotReachHere(); +#endif +} +#undef __ + +int CompiledStaticCall::to_interp_stub_size() { + // This doesn't need to be accurate but it must be larger or equal to + // the real size of the stub. + return (NativeMovConstReg::instruction_size + // sethi/setlo; + NativeJump::instruction_size + // sethi; jmp; nop + (TraceJumps ? 20 * BytesPerInstWord : 0) ); +} + +// Relocation entries for call stub, compiled java to interpreter. +int CompiledStaticCall::reloc_to_interp_stub() { + return 10; // 4 in emit_java_to_interp + 1 in Java_Static_Call +} + +void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { + address stub = find_stub(); + guarantee(stub != NULL, "stub not found"); + + if (TraceICs) { + ResourceMark rm; + tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", + instruction_address(), + callee->name_and_sig_as_C_string()); + } + + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + + assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), + "a) MT-unsafe modification of inline cache"); + assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, + "b) MT-unsafe modification of inline cache"); + + // Update stub. + method_holder->set_data((intptr_t)callee()); + jump->set_jump_destination(entry); + + // Update jump to call. + set_destination_mt_safe(stub); +} + +void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); + // Reset stub. + address stub = static_stub->addr(); + assert(stub != NULL, "stub not found"); + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + method_holder->set_data(0); + jump->set_jump_destination((address)-1); +} + +//----------------------------------------------------------------------------- +// Non-product mode code +#ifndef PRODUCT + +void CompiledStaticCall::verify() { + // Verify call. + NativeCall::verify(); + if (os::is_MP()) { + verify_alignment(); + } + + // Verify stub. + address stub = find_stub(); + assert(stub != NULL, "no stub found for static call"); + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + + // Verify state. + assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); +} + +#endif // !PRODUCT diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/sparc/vm/cppInterpreter_sparc.cpp --- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -404,14 +404,20 @@ // ??: invocation counter // void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { + Label done; + const Register Rcounters = G3_scratch; + + __ ld_ptr(STATE(_method), G5_method); + __ get_method_counters(G5_method, Rcounters, done); + // Update standard invocation counters - __ increment_invocation_counter(O0, G3_scratch); - if (ProfileInterpreter) { // %%% Merge this into MethodData* - __ ld_ptr(STATE(_method), G3_scratch); - Address interpreter_invocation_counter(G3_scratch, 0, in_bytes(Method::interpreter_invocation_counter_offset())); - __ ld(interpreter_invocation_counter, G3_scratch); - __ inc(G3_scratch); - __ st(G3_scratch, interpreter_invocation_counter); + __ increment_invocation_counter(Rcounters, O0, G4_scratch); + if (ProfileInterpreter) { + Address interpreter_invocation_counter(Rcounters, 0, + in_bytes(MethodCounters::interpreter_invocation_counter_offset())); + __ ld(interpreter_invocation_counter, G4_scratch); + __ inc(G4_scratch); + __ st(G4_scratch, interpreter_invocation_counter); } Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit); @@ -420,7 +426,7 @@ __ cmp(O0, G3_scratch); __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); __ delayed()->nop(); - + __ bind(done); } address InterpreterGenerator::generate_empty_entry(void) { @@ -1059,7 +1065,7 @@ const int slop_factor = 2*wordSize; const int fixed_size = ((sizeof(BytecodeInterpreter) + slop_factor) >> LogBytesPerWord) + // what is the slop factor? - //6815692//Method::extra_stack_words() + // extra push slots for MH adapters + Method::extra_stack_entries() + // extra stack for jsr 292 frame::memory_parameter_word_sp_offset + // register save area + param window (native ? frame::interpreter_frame_extra_outgoing_argument_words : 0); // JNI, class @@ -1215,9 +1221,7 @@ // Full size expression stack __ ld_ptr(constMethod, O3); __ lduh(O3, in_bytes(ConstMethod::max_stack_offset()), O3); - guarantee(!EnableInvokeDynamic, "no support yet for java.lang.invoke.MethodHandle"); //6815692 - //6815692//if (EnableInvokeDynamic) - //6815692// __ inc(O3, Method::extra_stack_entries()); + __ inc(O3, Method::extra_stack_entries()); __ sll(O3, LogBytesPerWord, O3); __ sub(O2, O3, O3); // __ sub(O3, wordSize, O3); // so prepush doesn't look out of bounds @@ -2078,9 +2082,7 @@ const int fixed_size = sizeof(BytecodeInterpreter)/wordSize + // interpreter state object frame::memory_parameter_word_sp_offset; // register save area + param window - const int extra_stack = 0; //6815692//Method::extra_stack_entries(); return (round_to(max_stack + - extra_stack + slop_factor + fixed_size + monitor_size + @@ -2167,8 +2169,7 @@ // Need +1 here because stack_base points to the word just above the first expr stack entry // and stack_limit is supposed to point to the word just below the last expr stack entry. // See generate_compute_interpreter_state. - int extra_stack = 0; //6815692//Method::extra_stack_entries(); - to_fill->_stack_limit = stack_base - (method->max_stack() + 1 + extra_stack); + to_fill->_stack_limit = stack_base - (method->max_stack() + 1); to_fill->_monitor_base = (BasicObjectLock*) monitor_base; // sparc specific diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/sparc/vm/frame_sparc.cpp --- a/src/cpu/sparc/vm/frame_sparc.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/sparc/vm/frame_sparc.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -252,6 +252,16 @@ return false; } + // Could be a zombie method + if (sender_blob->is_zombie() || sender_blob->is_unloaded()) { + return false; + } + + // Could be a zombie method + if (sender_blob->is_zombie() || sender_blob->is_unloaded()) { + return false; + } + // It should be safe to construct the sender though it might not be valid frame sender(_SENDER_SP, younger_sp, adjusted_stack); @@ -294,10 +304,10 @@ return jcw_safe; } - // If the frame size is 0 something is bad because every nmethod has a non-zero frame size + // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size // because you must allocate window space - if (sender_blob->frame_size() == 0) { + if (sender_blob->frame_size() <= 0) { assert(!sender_blob->is_nmethod(), "should count return address at least"); return false; } diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/sparc/vm/globalDefinitions_sparc.hpp --- a/src/cpu/sparc/vm/globalDefinitions_sparc.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/sparc/vm/globalDefinitions_sparc.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -30,4 +30,6 @@ const int StackAlignmentInBytes = (2*wordSize); +#define SUPPORTS_NATIVE_CX8 + #endif // CPU_SPARC_VM_GLOBALDEFINITIONS_SPARC_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/sparc/vm/globals_sparc.hpp --- a/src/cpu/sparc/vm/globals_sparc.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/sparc/vm/globals_sparc.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -74,7 +74,7 @@ define_pd_global(bool, UseMembar, false); // GC Ergo Flags -define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread +define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \ \ diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/sparc/vm/interp_masm_sparc.cpp --- a/src/cpu/sparc/vm/interp_masm_sparc.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ #include "oops/markOop.hpp" #include "oops/methodData.hpp" #include "oops/method.hpp" +#include "oops/methodCounters.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" #include "prims/jvmtiThreadState.hpp" @@ -520,7 +521,7 @@ // Compute max expression stack+register save area ld_ptr(Lmethod, in_bytes(Method::const_offset()), Gframe_size); lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); // Load max stack. - add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size ); + add(Gframe_size, frame::memory_parameter_word_sp_offset+Method::extra_stack_entries(), Gframe_size ); // // now set up a stack frame with the size computed above @@ -2086,19 +2087,29 @@ #endif /* CC_INTERP */ -void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) { +void InterpreterMacroAssembler::get_method_counters(Register method, + Register Rcounters, + Label& skip) { + Label has_counters; + Address method_counters(method, in_bytes(Method::method_counters_offset())); + ld_ptr(method_counters, Rcounters); + br_notnull_short(Rcounters, Assembler::pt, has_counters); + call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::build_method_counters), method); + ld_ptr(method_counters, Rcounters); + br_null(Rcounters, false, Assembler::pn, skip); // No MethodCounters, OutOfMemory + delayed()->nop(); + bind(has_counters); +} + +void InterpreterMacroAssembler::increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { assert(UseCompiler, "incrementing must be useful"); -#ifdef CC_INTERP - Address inv_counter(G5_method, Method::invocation_counter_offset() + - InvocationCounter::counter_offset()); - Address be_counter (G5_method, Method::backedge_counter_offset() + + assert_different_registers(Rcounters, Rtmp, Rtmp2); + + Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + InvocationCounter::counter_offset()); -#else - Address inv_counter(Lmethod, Method::invocation_counter_offset() + - InvocationCounter::counter_offset()); - Address be_counter (Lmethod, Method::backedge_counter_offset() + - InvocationCounter::counter_offset()); -#endif /* CC_INTERP */ + Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + + InvocationCounter::counter_offset()); int delta = InvocationCounter::count_increment; // Load each counter in a register @@ -2122,19 +2133,15 @@ // Note that this macro must leave the backedge_count + invocation_count in Rtmp! } -void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) { +void InterpreterMacroAssembler::increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { assert(UseCompiler, "incrementing must be useful"); -#ifdef CC_INTERP - Address be_counter (G5_method, Method::backedge_counter_offset() + - InvocationCounter::counter_offset()); - Address inv_counter(G5_method, Method::invocation_counter_offset() + + assert_different_registers(Rcounters, Rtmp, Rtmp2); + + Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + InvocationCounter::counter_offset()); -#else - Address be_counter (Lmethod, Method::backedge_counter_offset() + - InvocationCounter::counter_offset()); - Address inv_counter(Lmethod, Method::invocation_counter_offset() + - InvocationCounter::counter_offset()); -#endif /* CC_INTERP */ + Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); + int delta = InvocationCounter::count_increment; // Load each counter in a register ld( be_counter, Rtmp ); diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/sparc/vm/interp_masm_sparc.hpp --- a/src/cpu/sparc/vm/interp_masm_sparc.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/sparc/vm/interp_masm_sparc.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -263,8 +263,9 @@ void compute_stack_base( Register Rdest ); #endif /* CC_INTERP */ - void increment_invocation_counter( Register Rtmp, Register Rtmp2 ); - void increment_backedge_counter( Register Rtmp, Register Rtmp2 ); + void get_method_counters(Register method, Register Rcounters, Label& skip); + void increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ); + void increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ); #ifndef CC_INTERP void test_backedge_count_for_osr( Register backedge_count, Register branch_bcp, Register Rtmp ); diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/sparc/vm/jni_sparc.h --- a/src/cpu/sparc/vm/jni_sparc.h Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/sparc/vm/jni_sparc.h Wed Jun 19 10:45:56 2013 +0200 @@ -23,7 +23,12 @@ * questions. */ -#if defined(__GNUC__) && (__GNUC__ >= 4) +// Note: please do not change these without also changing jni_md.h in the JDK +// repository +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility) #define JNIEXPORT __attribute__((visibility("default"))) #define JNIIMPORT __attribute__((visibility("default"))) #else diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/sparc/vm/sparc.ad --- a/src/cpu/sparc/vm/sparc.ad Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/sparc/vm/sparc.ad Wed Jun 19 10:45:56 2013 +0200 @@ -1656,53 +1656,6 @@ } //============================================================================= - -// emit call stub, compiled java to interpretor -void emit_java_to_interp(CodeBuffer &cbuf ) { - - // Stub is fixed up when the corresponding call is converted from calling - // compiled code to calling interpreted code. - // set (empty), G5 - // jmp -1 - - address mark = cbuf.insts_mark(); // get mark within main instrs section - - MacroAssembler _masm(&cbuf); - - address base = - __ start_a_stub(Compile::MAX_stubs_size); - if (base == NULL) return; // CodeBuffer::expand failed - - // static stub relocation stores the instruction address of the call - __ relocate(static_stub_Relocation::spec(mark)); - - __ set_metadata(NULL, reg_to_register_object(Matcher::inline_cache_reg_encode())); - - __ set_inst_mark(); - AddressLiteral addrlit(-1); - __ JUMP(addrlit, G3, 0); - - __ delayed()->nop(); - - // Update current stubs pointer and restore code_end. - __ end_a_stub(); -} - -// size of call stub, compiled java to interpretor -uint size_java_to_interp() { - // This doesn't need to be accurate but it must be larger or equal to - // the real size of the stub. - return (NativeMovConstReg::instruction_size + // sethi/setlo; - NativeJump::instruction_size + // sethi; jmp; nop - (TraceJumps ? 20 * BytesPerInstWord : 0) ); -} -// relocation entries for call stub, compiled java to interpretor -uint reloc_java_to_interp() { - return 10; // 4 in emit_java_to_interp + 1 in Java_Static_Call -} - - -//============================================================================= #ifndef PRODUCT void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { st->print_cr("\nUEP:"); @@ -2576,15 +2529,15 @@ enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine // who we intended to call. - if ( !_method ) { + if (!_method) { emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type); } else if (_optimized_virtual) { emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type); } else { emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type); } - if( _method ) { // Emit stub for static call - emit_java_to_interp(cbuf); + if (_method) { // Emit stub for static call. + CompiledStaticCall::emit_to_interp_stub(cbuf); } %} diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/sparc/vm/templateInterpreter_sparc.cpp --- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -292,11 +292,15 @@ // ??: invocation counter // void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { - // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. + // Note: In tiered we increment either counters in MethodCounters* or in + // MDO depending if we're profiling or not. + const Register Rcounters = G3_scratch; + Label done; + if (TieredCompilation) { const int increment = InvocationCounter::count_increment; const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; - Label no_mdo, done; + Label no_mdo; if (ProfileInterpreter) { // If no method data exists, go to profile_continue. __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); @@ -311,23 +315,26 @@ __ ba_short(done); } - // Increment counter in Method* + // Increment counter in MethodCounters* __ bind(no_mdo); - Address invocation_counter(Lmethod, - in_bytes(Method::invocation_counter_offset()) + - in_bytes(InvocationCounter::counter_offset())); + Address invocation_counter(Rcounters, + in_bytes(MethodCounters::invocation_counter_offset()) + + in_bytes(InvocationCounter::counter_offset())); + __ get_method_counters(Lmethod, Rcounters, done); __ increment_mask_and_jump(invocation_counter, increment, mask, - G3_scratch, Lscratch, + G4_scratch, Lscratch, Assembler::zero, overflow); __ bind(done); } else { // Update standard invocation counters - __ increment_invocation_counter(O0, G3_scratch); - if (ProfileInterpreter) { // %%% Merge this into MethodData* - Address interpreter_invocation_counter(Lmethod,in_bytes(Method::interpreter_invocation_counter_offset())); - __ ld(interpreter_invocation_counter, G3_scratch); - __ inc(G3_scratch); - __ st(G3_scratch, interpreter_invocation_counter); + __ get_method_counters(Lmethod, Rcounters, done); + __ increment_invocation_counter(Rcounters, O0, G4_scratch); + if (ProfileInterpreter) { + Address interpreter_invocation_counter(Rcounters, + in_bytes(MethodCounters::interpreter_invocation_counter_offset())); + __ ld(interpreter_invocation_counter, G4_scratch); + __ inc(G4_scratch); + __ st(G4_scratch, interpreter_invocation_counter); } if (ProfileInterpreter && profile_method != NULL) { @@ -345,6 +352,7 @@ __ cmp(O0, G3_scratch); __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance __ delayed()->nop(); + __ bind(done); } } @@ -499,7 +507,7 @@ const int extra_space = rounded_vm_local_words + // frame local scratch space - //6815692//Method::extra_stack_words() + // extra push slots for MH adapters + Method::extra_stack_entries() + // extra stack for jsr 292 frame::memory_parameter_word_sp_offset + // register save area (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0); @@ -1563,7 +1571,6 @@ round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong); const int max_stack_words = max_stack * Interpreter::stackElementWords; return (round_to((max_stack_words - //6815692//+ Method::extra_stack_words() + rounded_vm_local_words + frame::memory_parameter_word_sp_offset), WordsPerLong) // already rounded diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/sparc/vm/templateTable_sparc.cpp --- a/src/cpu/sparc/vm/templateTable_sparc.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -63,6 +63,13 @@ noreg /* pre_val */, tmp, true /*preserve_o_regs*/); + // G1 barrier needs uncompressed oop for region cross check. + Register new_val = val; + if (UseCompressedOops && val != G0) { + new_val = tmp; + __ mov(val, new_val); + } + if (index == noreg ) { assert(Assembler::is_simm13(offset), "fix this code"); __ store_heap_oop(val, base, offset); @@ -79,7 +86,7 @@ __ add(base, index, base); } } - __ g1_write_barrier_post(base, val, tmp); + __ g1_write_barrier_post(base, new_val, tmp); } } break; @@ -1604,9 +1611,8 @@ // Normal (non-jsr) branch handling // Save the current Lbcp - const Register O0_cur_bcp = O0; - __ mov( Lbcp, O0_cur_bcp ); - + const Register l_cur_bcp = Lscratch; + __ mov( Lbcp, l_cur_bcp ); bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; if ( increment_invocation_counter_for_backward_branches ) { @@ -1616,6 +1622,9 @@ // Bump bytecode pointer by displacement (take the branch) __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr + const Register Rcounters = G3_scratch; + __ get_method_counters(Lmethod, Rcounters, Lforward); + if (TieredCompilation) { Label Lno_mdo, Loverflow; int increment = InvocationCounter::count_increment; @@ -1628,21 +1637,22 @@ // Increment backedge counter in the MDO Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset())); - __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch, + __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0, Assembler::notZero, &Lforward); __ ba_short(Loverflow); } - // If there's no MDO, increment counter in Method* + // If there's no MDO, increment counter in MethodCounters* __ bind(Lno_mdo); - Address backedge_counter(Lmethod, in_bytes(Method::backedge_counter_offset()) + - in_bytes(InvocationCounter::counter_offset())); - __ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch, + Address backedge_counter(Rcounters, + in_bytes(MethodCounters::backedge_counter_offset()) + + in_bytes(InvocationCounter::counter_offset())); + __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0, Assembler::notZero, &Lforward); __ bind(Loverflow); // notify point for loop, pass branch bytecode - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp); // Was an OSR adapter generated? // O0 = osr nmethod @@ -1679,15 +1689,15 @@ } else { // Update Backedge branch separately from invocations const Register G4_invoke_ctr = G4; - __ increment_backedge_counter(G4_invoke_ctr, G1_scratch); + __ increment_backedge_counter(Rcounters, G4_invoke_ctr, G1_scratch); if (ProfileInterpreter) { __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward); if (UseOnStackReplacement) { - __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch); + __ test_backedge_count_for_osr(O2_bumped_count, l_cur_bcp, G3_scratch); } } else { if (UseOnStackReplacement) { - __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch); + __ test_backedge_count_for_osr(G4_invoke_ctr, l_cur_bcp, G3_scratch); } } } diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/compiledIC_x86.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/cpu/x86/vm/compiledIC_x86.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,180 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "code/compiledIC.hpp" +#include "code/icBuffer.hpp" +#include "code/nmethod.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/safepoint.hpp" + +// Release the CompiledICHolder* associated with this call site is there is one. +void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + if (is_icholder_entry(call->destination())) { + NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); + InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); + } +} + +bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + return is_icholder_entry(call->destination()); +} + +//----------------------------------------------------------------------------- +// High-level access to an inline cache. Guaranteed to be MT-safe. + +CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) + : _ic_call(call) +{ + address ic_call = call->instruction_address(); + + assert(ic_call != NULL, "ic_call address must be set"); + assert(nm != NULL, "must pass nmethod"); + assert(nm->contains(ic_call), "must be in nmethod"); + + // Search for the ic_call at the given address. + RelocIterator iter(nm, ic_call, ic_call+1); + bool ret = iter.next(); + assert(ret == true, "relocInfo must exist at this address"); + assert(iter.addr() == ic_call, "must find ic_call"); + if (iter.type() == relocInfo::virtual_call_type) { + virtual_call_Relocation* r = iter.virtual_call_reloc(); + _is_optimized = false; + _value = nativeMovConstReg_at(r->cached_value()); + } else { + assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); + _is_optimized = true; + _value = NULL; + } +} + +// ---------------------------------------------------------------------------- + +#define __ _masm. +void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { + // Stub is fixed up when the corresponding call is converted from + // calling compiled code to calling interpreted code. + // movq rbx, 0 + // jmp -5 # to self + + address mark = cbuf.insts_mark(); // Get mark within main instrs section. + + // Note that the code buffer's insts_mark is always relative to insts. + // That's why we must use the macroassembler to generate a stub. + MacroAssembler _masm(&cbuf); + + address base = + __ start_a_stub(to_interp_stub_size()*2); + if (base == NULL) return; // CodeBuffer::expand failed. + // Static stub relocation stores the instruction address of the call. + __ relocate(static_stub_Relocation::spec(mark), Assembler::imm_operand); + // Static stub relocation also tags the Method* in the code-stream. + __ mov_metadata(rbx, (Metadata*) NULL); // Method is zapped till fixup time. + // This is recognized as unresolved by relocs/nativeinst/ic code. + __ jump(RuntimeAddress(__ pc())); + + // Update current stubs pointer and restore insts_end. + __ end_a_stub(); +} +#undef __ + +int CompiledStaticCall::to_interp_stub_size() { + return NOT_LP64(10) // movl; jmp + LP64_ONLY(15); // movq (1+1+8); jmp (1+4) +} + +// Relocation entries for call stub, compiled java to interpreter. +int CompiledStaticCall::reloc_to_interp_stub() { + return 4; // 3 in emit_to_interp_stub + 1 in emit_call +} + +void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { + address stub = find_stub(); + guarantee(stub != NULL, "stub not found"); + + if (TraceICs) { + ResourceMark rm; + tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", + instruction_address(), + callee->name_and_sig_as_C_string()); + } + + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + + assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), + "a) MT-unsafe modification of inline cache"); + assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, + "b) MT-unsafe modification of inline cache"); + + // Update stub. + method_holder->set_data((intptr_t)callee()); + jump->set_jump_destination(entry); + + // Update jump to call. + set_destination_mt_safe(stub); +} + +void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); + // Reset stub. + address stub = static_stub->addr(); + assert(stub != NULL, "stub not found"); + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + method_holder->set_data(0); + jump->set_jump_destination((address)-1); +} + +//----------------------------------------------------------------------------- +// Non-product mode code +#ifndef PRODUCT + +void CompiledStaticCall::verify() { + // Verify call. + NativeCall::verify(); + if (os::is_MP()) { + verify_alignment(); + } + + // Verify stub. + address stub = find_stub(); + assert(stub != NULL, "no stub found for static call"); + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + + // Verify state. + assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); +} + +#endif // !PRODUCT diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/cppInterpreter_x86.cpp --- a/src/cpu/x86/vm/cppInterpreter_x86.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -539,12 +539,11 @@ // compute full expression stack limit - const int extra_stack = 0; //6815692//Method::extra_stack_words(); __ movptr(rdx, Address(rbx, Method::const_offset())); __ load_unsigned_short(rdx, Address(rdx, ConstMethod::max_stack_offset())); // get size of expression stack in words __ negptr(rdx); // so we can subtract in next step // Allocate expression stack - __ lea(rsp, Address(rsp, rdx, Address::times_ptr, -extra_stack)); + __ lea(rsp, Address(rsp, rdx, Address::times_ptr, -Method::extra_stack_words())); __ movptr(STATE(_stack_limit), rsp); } @@ -570,20 +569,28 @@ // rcx: invocation counter // void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { - - const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset()); - const Address backedge_counter (rbx, Method::backedge_counter_offset() + InvocationCounter::counter_offset()); - - if (ProfileInterpreter) { // %%% Merge this into MethodData* - __ incrementl(Address(rbx,Method::interpreter_invocation_counter_offset())); + Label done; + const Address invocation_counter(rax, + MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); + const Address backedge_counter (rax, + MethodCounter::backedge_counter_offset() + + InvocationCounter::counter_offset()); + + __ get_method_counters(rbx, rax, done); + + if (ProfileInterpreter) { + __ incrementl(Address(rax, + MethodCounters::interpreter_invocation_counter_offset())); } // Update standard invocation counters - __ movl(rax, backedge_counter); // load backedge counter - + __ movl(rcx, invocation_counter); __ increment(rcx, InvocationCounter::count_increment); + __ movl(invocation_counter, rcx); // save invocation count + + __ movl(rax, backedge_counter); // load backedge counter __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits - __ movl(invocation_counter, rcx); // save invocation count __ addl(rcx, rax); // add both counters // profile_method is non-null only for interpreted method so @@ -593,7 +600,7 @@ __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); __ jcc(Assembler::aboveEqual, *overflow); - + __ bind(done); } void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { @@ -684,10 +691,9 @@ // Always give one monitor to allow us to start interp if sync method. // Any additional monitors need a check when moving the expression stack const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize; - const int extra_stack = 0; //6815692//Method::extra_stack_entries(); __ movptr(rax, Address(rbx, Method::const_offset())); __ load_unsigned_short(rax, Address(rax, ConstMethod::max_stack_offset())); // get size of expression stack in words - __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), extra_stack + one_monitor)); + __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), one_monitor+Method::extra_stack_words())); __ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size)); #ifdef ASSERT @@ -977,7 +983,6 @@ address entry_point = __ pc(); const Address constMethod (rbx, Method::const_offset()); - const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset()); const Address access_flags (rbx, Method::access_flags_offset()); const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset()); @@ -1029,8 +1034,6 @@ } #endif - if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count - const Register unlock_thread = LP64_ONLY(r15_thread) NOT_LP64(rax); NOT_LP64(__ movptr(unlock_thread, STATE(_thread));) // get thread // Since at this point in the method invocation the exception handler @@ -2260,8 +2263,7 @@ const int overhead_size = sizeof(BytecodeInterpreter)/wordSize + ( frame::sender_sp_offset - frame::link_offset) + 2; - const int extra_stack = 0; //6815692//Method::extra_stack_entries(); - const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) * + const int method_stack = (method->max_locals() + method->max_stack()) * Interpreter::stackElementWords; return overhead_size + method_stack + stub_code; } @@ -2326,8 +2328,7 @@ // Need +1 here because stack_base points to the word just above the first expr stack entry // and stack_limit is supposed to point to the word just below the last expr stack entry. // See generate_compute_interpreter_state. - int extra_stack = 0; //6815692//Method::extra_stack_entries(); - to_fill->_stack_limit = stack_base - (method->max_stack() + extra_stack + 1); + to_fill->_stack_limit = stack_base - (method->max_stack() + 1); to_fill->_monitor_base = (BasicObjectLock*) monitor_base; to_fill->_self_link = to_fill; @@ -2375,8 +2376,7 @@ monitor_size); // Now with full size expression stack - int extra_stack = 0; //6815692//Method::extra_stack_entries(); - int full_frame_size = short_frame_size + (method->max_stack() + extra_stack) * BytesPerWord; + int full_frame_size = short_frame_size + method->max_stack() * BytesPerWord; // and now with only live portion of the expression stack short_frame_size = short_frame_size + tempcount * BytesPerWord; diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/frame_x86.cpp --- a/src/cpu/x86/vm/frame_x86.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/frame_x86.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/monitorChunk.hpp" +#include "runtime/os.hpp" #include "runtime/signature.hpp" #include "runtime/stubCodeGenerator.hpp" #include "runtime/stubRoutines.hpp" @@ -54,16 +55,22 @@ address sp = (address)_sp; address fp = (address)_fp; address unextended_sp = (address)_unextended_sp; - // sp must be within the stack - bool sp_safe = (sp <= thread->stack_base()) && - (sp >= thread->stack_base() - thread->stack_size()); + + // consider stack guards when trying to determine "safe" stack pointers + static size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0; + size_t usable_stack_size = thread->stack_size() - stack_guard_size; + + // sp must be within the usable part of the stack (not in guards) + bool sp_safe = (sp < thread->stack_base()) && + (sp >= thread->stack_base() - usable_stack_size); + if (!sp_safe) { return false; } // unextended sp must be within the stack and above or equal sp - bool unextended_sp_safe = (unextended_sp <= thread->stack_base()) && + bool unextended_sp_safe = (unextended_sp < thread->stack_base()) && (unextended_sp >= sp); if (!unextended_sp_safe) { @@ -71,7 +78,8 @@ } // an fp must be within the stack and above (but not equal) sp - bool fp_safe = (fp <= thread->stack_base()) && (fp > sp); + // second evaluation on fp+ is added to handle situation where fp is -1 + bool fp_safe = (fp < thread->stack_base() && (fp > sp) && (((fp + (return_addr_offset * sizeof(void*))) < thread->stack_base()))); // We know sp/unextended_sp are safe only fp is questionable here @@ -86,6 +94,13 @@ // other generic buffer blobs are more problematic so we just assume they are // ok. adapter blobs never have a frame complete and are never ok. + // check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc + + if (!Interpreter::contains(_pc) && _cb->frame_size() <= 0) { + //assert(0, "Invalid frame_size"); + return false; + } + if (!_cb->is_frame_complete_at(_pc)) { if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) { return false; @@ -107,7 +122,7 @@ address jcw = (address)entry_frame_call_wrapper(); - bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > fp); + bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > fp); return jcw_safe; @@ -134,12 +149,6 @@ sender_pc = (address) *(sender_sp-1); } - // We must always be able to find a recognizable pc - CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc); - if (sender_pc == NULL || sender_blob == NULL) { - return false; - } - // If the potential sender is the interpreter then we can do some more checking if (Interpreter::contains(sender_pc)) { @@ -149,7 +158,7 @@ // is really a frame pointer. intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset); - bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp); + bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp); if (!saved_fp_safe) { return false; @@ -163,6 +172,17 @@ } + // We must always be able to find a recognizable pc + CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc); + if (sender_pc == NULL || sender_blob == NULL) { + return false; + } + + // Could be a zombie method + if (sender_blob->is_zombie() || sender_blob->is_unloaded()) { + return false; + } + // Could just be some random pointer within the codeBlob if (!sender_blob->code_contains(sender_pc)) { return false; @@ -174,10 +194,9 @@ } // Could be the call_stub - if (StubRoutines::returns_to_call_stub(sender_pc)) { intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset); - bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp); + bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp); if (!saved_fp_safe) { return false; @@ -190,15 +209,24 @@ // Validate the JavaCallWrapper an entry frame must have address jcw = (address)sender.entry_frame_call_wrapper(); - bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > (address)sender.fp()); + bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > (address)sender.fp()); return jcw_safe; } - // If the frame size is 0 something is bad because every nmethod has a non-zero frame size + if (sender_blob->is_nmethod()) { + nmethod* nm = sender_blob->as_nmethod_or_null(); + if (nm != NULL) { + if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc)) { + return false; + } + } + } + + // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size // because the return address counts against the callee's frame. - if (sender_blob->frame_size() == 0) { + if (sender_blob->frame_size() <= 0) { assert(!sender_blob->is_nmethod(), "should count return address at least"); return false; } @@ -208,7 +236,9 @@ // should not be anything but the call stub (already covered), the interpreter (already covered) // or an nmethod. - assert(sender_blob->is_nmethod(), "Impossible call chain"); + if (!sender_blob->is_nmethod()) { + return false; + } // Could put some more validation for the potential non-interpreted sender // frame we'd create by calling sender if I could think of any. Wait for next crash in forte... diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/globalDefinitions_x86.hpp --- a/src/cpu/x86/vm/globalDefinitions_x86.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/globalDefinitions_x86.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -27,4 +27,6 @@ const int StackAlignmentInBytes = 16; +#define SUPPORTS_NATIVE_CX8 + #endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/globals_x86.hpp --- a/src/cpu/x86/vm/globals_x86.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/globals_x86.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -77,7 +77,7 @@ #endif // GC Ergo Flags -define_pd_global(intx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread +define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \ \ diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/interp_masm_x86_32.cpp --- a/src/cpu/x86/vm/interp_masm_x86_32.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -266,6 +266,20 @@ addptr(cache, tmp); // construct pointer to cache entry } +void InterpreterMacroAssembler::get_method_counters(Register method, + Register mcs, Label& skip) { + Label has_counters; + movptr(mcs, Address(method, Method::method_counters_offset())); + testptr(mcs, mcs); + jcc(Assembler::notZero, has_counters); + call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::build_method_counters), method); + movptr(mcs, Address(method,Method::method_counters_offset())); + testptr(mcs, mcs); + jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory + bind(has_counters); +} + // Load object from cpool->resolved_references(index) void InterpreterMacroAssembler::load_resolved_reference_at_index( Register result, Register index) { diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/interp_masm_x86_32.hpp --- a/src/cpu/x86/vm/interp_masm_x86_32.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,6 +89,7 @@ void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2)); + void get_method_counters(Register method, Register mcs, Label& skip); // load cpool->resolved_references(index); void load_resolved_reference_at_index(Register result, Register index); diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/interp_masm_x86_64.cpp --- a/src/cpu/x86/vm/interp_masm_x86_64.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -271,6 +271,20 @@ addptr(cache, tmp); // construct pointer to cache entry } +void InterpreterMacroAssembler::get_method_counters(Register method, + Register mcs, Label& skip) { + Label has_counters; + movptr(mcs, Address(method, Method::method_counters_offset())); + testptr(mcs, mcs); + jcc(Assembler::notZero, has_counters); + call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::build_method_counters), method); + movptr(mcs, Address(method,Method::method_counters_offset())); + testptr(mcs, mcs); + jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory + bind(has_counters); +} + // Load object from cpool->resolved_references(index) void InterpreterMacroAssembler::load_resolved_reference_at_index( Register result, Register index) { diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/interp_masm_x86_64.hpp --- a/src/cpu/x86/vm/interp_masm_x86_64.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -112,6 +112,7 @@ void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2)); + void get_method_counters(Register method, Register mcs, Label& skip); // load cpool->resolved_references(index); void load_resolved_reference_at_index(Register result, Register index); diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/jni_x86.h --- a/src/cpu/x86/vm/jni_x86.h Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/jni_x86.h Wed Jun 19 10:45:56 2013 +0200 @@ -28,7 +28,13 @@ #if defined(SOLARIS) || defined(LINUX) || defined(_ALLBSD_SOURCE) -#if defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2) + +// Note: please do not change these without also changing jni_md.h in the JDK +// repository +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility) #define JNIEXPORT __attribute__((visibility("default"))) #define JNIIMPORT __attribute__((visibility("default"))) #else diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/stubGenerator_x86_32.cpp --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1498,27 +1498,29 @@ __ movptr(elem_klass, elem_klass_addr); // query the object klass generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp, &L_store_element, NULL); - // (On fall-through, we have failed the element type check.) + // (On fall-through, we have failed the element type check.) // ======== end loop ======== // It was a real error; we must depend on the caller to finish the job. // Register "count" = -1 * number of *remaining* oops, length_arg = *total* oops. // Emit GC store barriers for the oops we have copied (length_arg + count), // and report their number to the caller. + assert_different_registers(to, count, rax); + Label L_post_barrier; __ addl(count, length_arg); // transfers = (length - remaining) __ movl2ptr(rax, count); // save the value - __ notptr(rax); // report (-1^K) to caller - __ movptr(to, to_arg); // reload - assert_different_registers(to, count, rax); - gen_write_ref_array_post_barrier(to, count); - __ jmpb(L_done); + __ notptr(rax); // report (-1^K) to caller (does not affect flags) + __ jccb(Assembler::notZero, L_post_barrier); + __ jmp(L_done); // K == 0, nothing was copied, skip post barrier // Come here on success only. __ BIND(L_do_card_marks); + __ xorptr(rax, rax); // return 0 on success __ movl2ptr(count, length_arg); - __ movptr(to, to_arg); // reload + + __ BIND(L_post_barrier); + __ movptr(to, to_arg); // reload gen_write_ref_array_post_barrier(to, count); - __ xorptr(rax, rax); // return 0 on success // Common exit point (success or failure). __ BIND(L_done); diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/stubGenerator_x86_64.cpp --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1217,27 +1217,28 @@ // // Input: // start - register containing starting address of destination array - // end - register containing ending address of destination array + // count - elements count // scratch - scratch register // // The input registers are overwritten. - // The ending address is inclusive. - void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) { - assert_different_registers(start, end, scratch); + // + void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) { + assert_different_registers(start, count, scratch); BarrierSet* bs = Universe::heap()->barrier_set(); switch (bs->kind()) { case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: - { - __ pusha(); // push registers (overkill) - // must compute element count unless barrier set interface is changed (other platforms supply count) - assert_different_registers(start, end, scratch); - __ lea(scratch, Address(end, BytesPerHeapOop)); - __ subptr(scratch, start); // subtract start to get #bytes - __ shrptr(scratch, LogBytesPerHeapOop); // convert to element count - __ mov(c_rarg0, start); - __ mov(c_rarg1, scratch); + __ pusha(); // push registers (overkill) + if (c_rarg0 == count) { // On win64 c_rarg0 == rcx + assert_different_registers(c_rarg1, start); + __ mov(c_rarg1, count); + __ mov(c_rarg0, start); + } else { + assert_different_registers(c_rarg0, count); + __ mov(c_rarg0, start); + __ mov(c_rarg1, count); + } __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); __ popa(); } @@ -1249,22 +1250,16 @@ assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); Label L_loop; - - __ shrptr(start, CardTableModRefBS::card_shift); - __ addptr(end, BytesPerHeapOop); - __ shrptr(end, CardTableModRefBS::card_shift); - __ subptr(end, start); // number of bytes to copy - - intptr_t disp = (intptr_t) ct->byte_map_base; - if (Assembler::is_simm32(disp)) { - Address cardtable(noreg, noreg, Address::no_scale, disp); - __ lea(scratch, cardtable); - } else { - ExternalAddress cardtable((address)disp); - __ lea(scratch, cardtable); - } - - const Register count = end; // 'end' register contains bytes count now + const Register end = count; + + __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size + __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive + __ shrptr(start, CardTableModRefBS::card_shift); + __ shrptr(end, CardTableModRefBS::card_shift); + __ subptr(end, start); // end --> cards count + + int64_t disp = (int64_t) ct->byte_map_base; + __ mov64(scratch, disp); __ addptr(start, scratch); __ BIND(L_loop); __ movb(Address(start, count, Address::times_1), 0); @@ -1916,8 +1911,7 @@ __ BIND(L_exit); if (is_oop) { - __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4)); - gen_write_ref_array_post_barrier(saved_to, end_to, rax); + gen_write_ref_array_post_barrier(saved_to, dword_count, rax); } restore_arg_regs(); inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free @@ -2012,12 +2006,10 @@ // Copy in multi-bytes chunks copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); - __ bind(L_exit); - if (is_oop) { - Register end_to = rdx; - __ leaq(end_to, Address(to, dword_count, Address::times_4, -4)); - gen_write_ref_array_post_barrier(to, end_to, rax); - } + __ BIND(L_exit); + if (is_oop) { + gen_write_ref_array_post_barrier(to, dword_count, rax); + } restore_arg_regs(); inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free __ xorptr(rax, rax); // return 0 @@ -2055,6 +2047,7 @@ const Register end_from = from; // source array end address const Register end_to = rcx; // destination array end address const Register saved_to = to; + const Register saved_count = r11; // End pointers are inclusive, and if count is not zero they point // to the last unit copied: end_to[0] := end_from[0] @@ -2072,6 +2065,8 @@ // r9 and r10 may be used to save non-volatile registers // 'from', 'to' and 'qword_count' are now valid if (is_oop) { + // Save to and count for store barrier + __ movptr(saved_count, qword_count); // no registers are destroyed by this call gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); } @@ -2104,7 +2099,7 @@ if (is_oop) { __ BIND(L_exit); - gen_write_ref_array_post_barrier(saved_to, end_to, rax); + gen_write_ref_array_post_barrier(saved_to, saved_count, rax); } restore_arg_regs(); if (is_oop) { @@ -2187,8 +2182,7 @@ if (is_oop) { __ BIND(L_exit); - __ lea(rcx, Address(to, saved_count, Address::times_8, -8)); - gen_write_ref_array_post_barrier(to, rcx, rax); + gen_write_ref_array_post_barrier(to, saved_count, rax); } restore_arg_regs(); if (is_oop) { @@ -2375,20 +2369,20 @@ // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. // Emit GC store barriers for the oops we have copied (r14 + rdx), // and report their number to the caller. - assert_different_registers(rax, r14_length, count, to, end_to, rcx); - __ lea(end_to, to_element_addr); - __ addptr(end_to, -heapOopSize); // make an inclusive end pointer - gen_write_ref_array_post_barrier(to, end_to, rscratch1); - __ movptr(rax, r14_length); // original oops - __ addptr(rax, count); // K = (original - remaining) oops - __ notptr(rax); // report (-1^K) to caller - __ jmp(L_done); + assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); + Label L_post_barrier; + __ addptr(r14_length, count); // K = (original - remaining) oops + __ movptr(rax, r14_length); // save the value + __ notptr(rax); // report (-1^K) to caller (does not affect flags) + __ jccb(Assembler::notZero, L_post_barrier); + __ jmp(L_done); // K == 0, nothing was copied, skip post barrier // Come here on success only. __ BIND(L_do_card_marks); - __ addptr(end_to, -heapOopSize); // make an inclusive end pointer - gen_write_ref_array_post_barrier(to, end_to, rscratch1); - __ xorptr(rax, rax); // return 0 on success + __ xorptr(rax, rax); // return 0 on success + + __ BIND(L_post_barrier); + gen_write_ref_array_post_barrier(to, r14_length, rscratch1); // Common exit point (success or failure). __ BIND(L_done); diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/templateInterpreter_x86_32.cpp --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -344,13 +344,13 @@ // rcx: invocation counter // void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { - const Address invocation_counter(rbx, in_bytes(Method::invocation_counter_offset()) + - in_bytes(InvocationCounter::counter_offset())); - // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. + Label done; + // Note: In tiered we increment either counters in MethodCounters* or in MDO + // depending if we're profiling or not. if (TieredCompilation) { int increment = InvocationCounter::count_increment; int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; - Label no_mdo, done; + Label no_mdo; if (ProfileInterpreter) { // Are we profiling? __ movptr(rax, Address(rbx, Method::method_data_offset())); @@ -360,26 +360,41 @@ const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset())); __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); - __ jmpb(done); + __ jmp(done); } __ bind(no_mdo); - // Increment counter in Method* (we don't need to load it, it's in rcx). - __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow); + // Increment counter in MethodCounters + const Address invocation_counter(rax, + MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); + + __ get_method_counters(rbx, rax, done); + __ increment_mask_and_jump(invocation_counter, increment, mask, + rcx, false, Assembler::zero, overflow); __ bind(done); } else { - const Address backedge_counter (rbx, Method::backedge_counter_offset() + - InvocationCounter::counter_offset()); + const Address backedge_counter (rax, + MethodCounters::backedge_counter_offset() + + InvocationCounter::counter_offset()); + const Address invocation_counter(rax, + MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); + + __ get_method_counters(rbx, rax, done); - if (ProfileInterpreter) { // %%% Merge this into MethodData* - __ incrementl(Address(rbx,Method::interpreter_invocation_counter_offset())); + if (ProfileInterpreter) { + __ incrementl(Address(rax, + MethodCounters::interpreter_invocation_counter_offset())); } + // Update standard invocation counters - __ movl(rax, backedge_counter); // load backedge counter + __ movl(rcx, invocation_counter); + __ incrementl(rcx, InvocationCounter::count_increment); + __ movl(invocation_counter, rcx); // save invocation count - __ incrementl(rcx, InvocationCounter::count_increment); + __ movl(rax, backedge_counter); // load backedge counter __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits - __ movl(invocation_counter, rcx); // save invocation count __ addl(rcx, rax); // add both counters // profile_method is non-null only for interpreted method so @@ -399,6 +414,7 @@ __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); __ jcc(Assembler::aboveEqual, *overflow); + __ bind(done); } } @@ -868,7 +884,6 @@ address entry_point = __ pc(); const Address constMethod (rbx, Method::const_offset()); - const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset()); const Address access_flags (rbx, Method::access_flags_offset()); const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset()); @@ -897,9 +912,7 @@ // NULL oop temp (mirror or jni oop result) __ push((int32_t)NULL_WORD); - if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count // initialize fixed part of activation frame - generate_fixed_frame(true); // make sure method is native & not abstract @@ -1286,7 +1299,6 @@ address entry_point = __ pc(); const Address constMethod (rbx, Method::const_offset()); - const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset()); const Address access_flags (rbx, Method::access_flags_offset()); const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset()); const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset()); @@ -1326,7 +1338,6 @@ __ bind(exit); } - if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count // initialize fixed part of activation frame generate_fixed_frame(false); @@ -1554,8 +1565,7 @@ // be sure to change this if you add/subtract anything to/from the overhead area const int overhead_size = -frame::interpreter_frame_initial_sp_offset; - const int extra_stack = Method::extra_stack_entries(); - const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) * + const int method_stack = (method->max_locals() + method->max_stack()) * Interpreter::stackElementWords; return overhead_size + method_stack + stub_code; } diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/templateInterpreter_x86_64.cpp --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -304,9 +304,11 @@ #ifdef GRAAL void graal_initialize_time(JavaThread* thread) { + assert(ProfileInterpreter, "must be profiling interpreter"); frame fr = thread->last_frame(); assert(fr.is_interpreted_frame(), "must come from interpreter"); - fr.interpreter_frame_method()->set_graal_invocation_time(os::javaTimeNanos()); + assert(fr.interpreter_frame_method()->method_counters() != NULL, "need to initialize method counters"); + fr.interpreter_frame_method()->method_counters()->set_graal_invocation_time(os::javaTimeNanos()); } #endif // GRAAL @@ -323,13 +325,12 @@ Label* overflow, Label* profile_method, Label* profile_method_continue) { - const Address invocation_counter(rbx, in_bytes(Method::invocation_counter_offset()) + - in_bytes(InvocationCounter::counter_offset())); + Label done; // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. if (TieredCompilation) { int increment = InvocationCounter::count_increment; int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; - Label no_mdo, done; + Label no_mdo; if (ProfileInterpreter) { // Are we profiling? __ movptr(rax, Address(rbx, Method::method_data_offset())); @@ -339,20 +340,30 @@ const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset())); __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); - __ jmpb(done); + __ jmp(done); } __ bind(no_mdo); - // Increment counter in Method* (we don't need to load it, it's in ecx). - __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow); + // Increment counter in MethodCounters + const Address invocation_counter(rax, + MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); + __ get_method_counters(rbx, rax, done); + __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, + false, Assembler::zero, overflow); __ bind(done); } else { - const Address backedge_counter(rbx, - Method::backedge_counter_offset() + - InvocationCounter::counter_offset()); + const Address backedge_counter(rax, + MethodCounters::backedge_counter_offset() + + InvocationCounter::counter_offset()); + const Address invocation_counter(rax, + MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); - if (ProfileInterpreter) { // %%% Merge this into MethodData* - __ incrementl(Address(rbx, - Method::interpreter_invocation_counter_offset())); + __ get_method_counters(rbx, rax, done); + + if (ProfileInterpreter) { + __ incrementl(Address(rax, + MethodCounters::interpreter_invocation_counter_offset())); } #ifdef GRAAL @@ -376,12 +387,13 @@ #endif // GRAAL // Update standard invocation counters - __ movl(rax, backedge_counter); // load backedge counter + __ movl(rcx, invocation_counter); + __ incrementl(rcx, InvocationCounter::count_increment); + __ movl(invocation_counter, rcx); // save invocation count - __ incrementl(rcx, InvocationCounter::count_increment); + __ movl(rax, backedge_counter); // load backedge counter __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits - __ movl(invocation_counter, rcx); // save invocation count __ addl(rcx, rax); // add both counters // profile_method is non-null only for interpreted method so @@ -398,6 +410,7 @@ __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); __ jcc(Assembler::aboveEqual, *overflow); + __ bind(done); } } @@ -927,9 +940,6 @@ address entry_point = __ pc(); const Address constMethod (rbx, Method::const_offset()); - const Address invocation_counter(rbx, Method:: - invocation_counter_offset() + - InvocationCounter::counter_offset()); const Address access_flags (rbx, Method::access_flags_offset()); const Address size_of_parameters(rcx, ConstMethod:: size_of_parameters_offset()); @@ -960,10 +970,6 @@ // (static native method holder mirror/jni oop result) __ push((int) NULL_WORD); - if (inc_counter) { - __ movl(rcx, invocation_counter); // (pre-)fetch invocation count - } - // initialize fixed part of activation frame generate_fixed_frame(true); @@ -1380,9 +1386,6 @@ address entry_point = __ pc(); const Address constMethod(rbx, Method::const_offset()); - const Address invocation_counter(rbx, - Method::invocation_counter_offset() + - InvocationCounter::counter_offset()); const Address access_flags(rbx, Method::access_flags_offset()); const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset()); @@ -1427,10 +1430,6 @@ __ bind(exit); } - // (pre-)fetch invocation count - if (inc_counter) { - __ movl(rcx, invocation_counter); - } // initialize fixed part of activation frame generate_fixed_frame(false); @@ -1664,8 +1663,7 @@ -(frame::interpreter_frame_initial_sp_offset) + entry_size; const int stub_code = frame::entry_frame_after_call_words; - const int extra_stack = Method::extra_stack_entries(); - const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) * + const int method_stack = (method->max_locals() + method->max_stack()) * Interpreter::stackElementWords; return (overhead_size + method_stack + stub_code); } diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/templateTable_x86_32.cpp --- a/src/cpu/x86/vm/templateTable_x86_32.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1546,9 +1546,10 @@ __ get_method(rcx); // ECX holds method __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count - const ByteSize be_offset = Method::backedge_counter_offset() + InvocationCounter::counter_offset(); - const ByteSize inv_offset = Method::invocation_counter_offset() + InvocationCounter::counter_offset(); - const int method_offset = frame::interpreter_frame_method_offset * wordSize; + const ByteSize be_offset = MethodCounters::backedge_counter_offset() + + InvocationCounter::counter_offset(); + const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset(); // Load up EDX with the branch displacement __ movl(rdx, at_bcp(1)); @@ -1596,6 +1597,22 @@ __ testl(rdx, rdx); // check if forward or backward branch __ jcc(Assembler::positive, dispatch); // count only if backward branch + // check if MethodCounters exists + Label has_counters; + __ movptr(rax, Address(rcx, Method::method_counters_offset())); + __ testptr(rax, rax); + __ jcc(Assembler::notZero, has_counters); + __ push(rdx); + __ push(rcx); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), + rcx); + __ pop(rcx); + __ pop(rdx); + __ movptr(rax, Address(rcx, Method::method_counters_offset())); + __ testptr(rax, rax); + __ jcc(Assembler::zero, dispatch); + __ bind(has_counters); + if (TieredCompilation) { Label no_mdo; int increment = InvocationCounter::count_increment; @@ -1613,16 +1630,19 @@ __ jmp(dispatch); } __ bind(no_mdo); - // Increment backedge counter in Method* + // Increment backedge counter in MethodCounters* + __ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, rax, false, Assembler::zero, &backedge_counter_overflow); } else { // increment counter + __ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ movl(rax, Address(rcx, be_offset)); // load backedge counter __ incrementl(rax, InvocationCounter::count_increment); // increment counter __ movl(Address(rcx, be_offset), rax); // store counter __ movl(rax, Address(rcx, inv_offset)); // load invocation counter + __ andl(rax, InvocationCounter::count_mask_value); // and the status bits __ addl(rax, Address(rcx, be_offset)); // add both counters diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/templateTable_x86_64.cpp --- a/src/cpu/x86/vm/templateTable_x86_64.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -158,14 +158,19 @@ if (val == noreg) { __ store_heap_oop_null(Address(rdx, 0)); } else { + // G1 barrier needs uncompressed oop for region cross check. + Register new_val = val; + if (UseCompressedOops) { + new_val = rbx; + __ movptr(new_val, val); + } __ store_heap_oop(Address(rdx, 0), val); __ g1_write_barrier_post(rdx /* store_adr */, - val /* new_val */, + new_val /* new_val */, r15_thread /* thread */, r8 /* tmp */, rbx /* tmp2 */); } - } break; #endif // INCLUDE_ALL_GCS @@ -1564,11 +1569,10 @@ __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx // holds bumped taken count - const ByteSize be_offset = Method::backedge_counter_offset() + + const ByteSize be_offset = MethodCounters::backedge_counter_offset() + InvocationCounter::counter_offset(); - const ByteSize inv_offset = Method::invocation_counter_offset() + + const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + InvocationCounter::counter_offset(); - const int method_offset = frame::interpreter_frame_method_offset * wordSize; // Load up edx with the branch displacement __ movl(rdx, at_bcp(1)); @@ -1618,6 +1622,22 @@ // r14: locals pointer __ testl(rdx, rdx); // check if forward or backward branch __ jcc(Assembler::positive, dispatch); // count only if backward branch + + // check if MethodCounters exists + Label has_counters; + __ movptr(rax, Address(rcx, Method::method_counters_offset())); + __ testptr(rax, rax); + __ jcc(Assembler::notZero, has_counters); + __ push(rdx); + __ push(rcx); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), + rcx); + __ pop(rcx); + __ pop(rdx); + __ movptr(rax, Address(rcx, Method::method_counters_offset())); + __ jcc(Assembler::zero, dispatch); + __ bind(has_counters); + if (TieredCompilation) { Label no_mdo; int increment = InvocationCounter::count_increment; @@ -1635,16 +1655,19 @@ __ jmp(dispatch); } __ bind(no_mdo); - // Increment backedge counter in Method* + // Increment backedge counter in MethodCounters* + __ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, rax, false, Assembler::zero, &backedge_counter_overflow); } else { // increment counter + __ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ movl(rax, Address(rcx, be_offset)); // load backedge counter __ incrementl(rax, InvocationCounter::count_increment); // increment counter __ movl(Address(rcx, be_offset), rax); // store counter __ movl(rax, Address(rcx, inv_offset)); // load invocation counter + __ andl(rax, InvocationCounter::count_mask_value); // and the status bits __ addl(rax, Address(rcx, be_offset)); // add both counters diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/x86_32.ad --- a/src/cpu/x86/vm/x86_32.ad Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/x86_32.ad Wed Jun 19 10:45:56 2013 +0200 @@ -1257,43 +1257,6 @@ } //============================================================================= - -// emit call stub, compiled java to interpreter -void emit_java_to_interp(CodeBuffer &cbuf ) { - // Stub is fixed up when the corresponding call is converted from calling - // compiled code to calling interpreted code. - // mov rbx,0 - // jmp -1 - - address mark = cbuf.insts_mark(); // get mark within main instrs section - - // Note that the code buffer's insts_mark is always relative to insts. - // That's why we must use the macroassembler to generate a stub. - MacroAssembler _masm(&cbuf); - - address base = - __ start_a_stub(Compile::MAX_stubs_size); - if (base == NULL) return; // CodeBuffer::expand failed - // static stub relocation stores the instruction address of the call - __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM32); - // static stub relocation also tags the Method* in the code-stream. - __ mov_metadata(rbx, (Metadata*)NULL); // method is zapped till fixup time - // This is recognized as unresolved by relocs/nativeInst/ic code - __ jump(RuntimeAddress(__ pc())); - - __ end_a_stub(); - // Update current stubs pointer and restore insts_end. -} -// size of call stub, compiled java to interpretor -uint size_java_to_interp() { - return 10; // movl; jmp -} -// relocation entries for call stub, compiled java to interpretor -uint reloc_java_to_interp() { - return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call -} - -//============================================================================= #ifndef PRODUCT void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const { st->print_cr( "CMP EAX,[ECX+4]\t# Inline cache check"); @@ -1909,8 +1872,8 @@ emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), static_call_Relocation::spec(), RELOC_IMM32 ); } - if (_method) { // Emit stub for static call - emit_java_to_interp(cbuf); + if (_method) { // Emit stub for static call. + CompiledStaticCall::emit_to_interp_stub(cbuf); } %} diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/x86/vm/x86_64.ad --- a/src/cpu/x86/vm/x86_64.ad Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/x86/vm/x86_64.ad Wed Jun 19 10:45:56 2013 +0200 @@ -1388,48 +1388,6 @@ } //============================================================================= - -// emit call stub, compiled java to interpreter -void emit_java_to_interp(CodeBuffer& cbuf) -{ - // Stub is fixed up when the corresponding call is converted from - // calling compiled code to calling interpreted code. - // movq rbx, 0 - // jmp -5 # to self - - address mark = cbuf.insts_mark(); // get mark within main instrs section - - // Note that the code buffer's insts_mark is always relative to insts. - // That's why we must use the macroassembler to generate a stub. - MacroAssembler _masm(&cbuf); - - address base = - __ start_a_stub(Compile::MAX_stubs_size); - if (base == NULL) return; // CodeBuffer::expand failed - // static stub relocation stores the instruction address of the call - __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64); - // static stub relocation also tags the Method* in the code-stream. - __ mov_metadata(rbx, (Metadata*) NULL); // method is zapped till fixup time - // This is recognized as unresolved by relocs/nativeinst/ic code - __ jump(RuntimeAddress(__ pc())); - - // Update current stubs pointer and restore insts_end. - __ end_a_stub(); -} - -// size of call stub, compiled java to interpretor -uint size_java_to_interp() -{ - return 15; // movq (1+1+8); jmp (1+4) -} - -// relocation entries for call stub, compiled java to interpretor -uint reloc_java_to_interp() -{ - return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call -} - -//============================================================================= #ifndef PRODUCT void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const { @@ -2078,8 +2036,8 @@ RELOC_DISP32); } if (_method) { - // Emit stub for static call - emit_java_to_interp(cbuf); + // Emit stub for static call. + CompiledStaticCall::emit_to_interp_stub(cbuf); } %} @@ -2222,12 +2180,6 @@ $$$emit32$src$$constant; %} - enc_class Con64(immL src) - %{ - // Output immediate - emit_d64($src$$constant); - %} - enc_class Con32F_as_bits(immF src) %{ // Output Float immediate bits @@ -7608,7 +7560,7 @@ ins_pipe( pipe_cmpxchg ); %} -instruct xaddL_no_res( memory mem, Universe dummy, immL add, rFlagsReg cr) %{ +instruct xaddL_no_res( memory mem, Universe dummy, immL32 add, rFlagsReg cr) %{ predicate(n->as_LoadStore()->result_not_used()); match(Set dummy (GetAndAddL mem add)); effect(KILL cr); diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/zero/vm/compiledIC_zero.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/cpu/zero/vm/compiledIC_zero.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,122 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/systemDictionary.hpp" +#include "code/codeCache.hpp" +#include "code/compiledIC.hpp" +#include "code/icBuffer.hpp" +#include "code/nmethod.hpp" +#include "code/vtableStubs.hpp" +#include "interpreter/interpreter.hpp" +#include "interpreter/linkResolver.hpp" +#include "memory/metadataFactory.hpp" +#include "memory/oopFactory.hpp" +#include "oops/method.hpp" +#include "oops/oop.inline.hpp" +#include "oops/symbol.hpp" +#include "runtime/icache.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/stubRoutines.hpp" +#include "utilities/events.hpp" + + +// Release the CompiledICHolder* associated with this call site is there is one. +void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + if (is_icholder_entry(call->destination())) { + NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); + InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); + } +} + +bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + return is_icholder_entry(call->destination()); +} + +//----------------------------------------------------------------------------- +// High-level access to an inline cache. Guaranteed to be MT-safe. + +CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) + : _ic_call(call) +{ + address ic_call = call->instruction_address(); + + assert(ic_call != NULL, "ic_call address must be set"); + assert(nm != NULL, "must pass nmethod"); + assert(nm->contains(ic_call), "must be in nmethod"); + + // Search for the ic_call at the given address. + RelocIterator iter(nm, ic_call, ic_call+1); + bool ret = iter.next(); + assert(ret == true, "relocInfo must exist at this address"); + assert(iter.addr() == ic_call, "must find ic_call"); + if (iter.type() == relocInfo::virtual_call_type) { + virtual_call_Relocation* r = iter.virtual_call_reloc(); + _is_optimized = false; + _value = nativeMovConstReg_at(r->cached_value()); + } else { + assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); + _is_optimized = true; + _value = NULL; + } +} + +// ---------------------------------------------------------------------------- + +void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { + ShouldNotReachHere(); // Only needed for COMPILER2. +} + +int CompiledStaticCall::to_interp_stub_size() { + ShouldNotReachHere(); // Only needed for COMPILER2. + return 0; +} + +// Relocation entries for call stub, compiled java to interpreter. +int CompiledStaticCall::reloc_to_interp_stub() { + ShouldNotReachHere(); // Only needed for COMPILER2. + return 0; +} + +void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { + ShouldNotReachHere(); // Only needed for COMPILER2. +} + +void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { + ShouldNotReachHere(); // Only needed for COMPILER2. +} + +//----------------------------------------------------------------------------- +// Non-product mode code. +#ifndef PRODUCT + +void CompiledStaticCall::verify() { + ShouldNotReachHere(); // Only needed for COMPILER2. +} + +#endif // !PRODUCT diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/zero/vm/cppInterpreter_zero.cpp --- a/src/cpu/zero/vm/cppInterpreter_zero.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -212,7 +212,13 @@ // Update the invocation counter if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) { - InvocationCounter *counter = method->invocation_counter(); + MethodCounters* mcs = method->method_counters(); + if (mcs == NULL) { + CALL_VM_NOCHECK(mcs = InterpreterRuntime::build_method_counters(thread, method)); + if (HAS_PENDING_EXCEPTION) + goto unwind_and_return; + } + InvocationCounter *counter = mcs->invocation_counter(); counter->increment(); if (counter->reached_InvocationLimit()) { CALL_VM_NOCHECK( diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/zero/vm/globals_zero.hpp --- a/src/cpu/zero/vm/globals_zero.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/zero/vm/globals_zero.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -55,7 +55,7 @@ define_pd_global(bool, UseMembar, true); // GC Ergo Flags -define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread +define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) diff -r e0fb8a213650 -r 836a62f43af9 src/cpu/zero/vm/jni_zero.h --- a/src/cpu/zero/vm/jni_zero.h Tue Jun 18 14:23:29 2013 -0700 +++ b/src/cpu/zero/vm/jni_zero.h Wed Jun 19 10:45:56 2013 +0200 @@ -25,7 +25,13 @@ */ -#if defined(__GNUC__) && (__GNUC__ >= 4) + +// Note: please do not change these without also changing jni_md.h in the JDK +// repository +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility) #define JNIEXPORT __attribute__((visibility("default"))) #define JNIIMPORT __attribute__((visibility("default"))) #else diff -r e0fb8a213650 -r 836a62f43af9 src/os/bsd/vm/chaitin_bsd.cpp --- a/src/os/bsd/vm/chaitin_bsd.cpp Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,42 +0,0 @@ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "opto/chaitin.hpp" -#include "opto/machnode.hpp" - -void PhaseRegAlloc::pd_preallocate_hook() { - // no action -} - -#ifdef ASSERT -void PhaseRegAlloc::pd_postallocate_verify_hook() { - // no action -} -#endif - - -// Reconciliation History -// chaitin_solaris.cpp 1.7 99/07/12 23:54:22 -// End diff -r e0fb8a213650 -r 836a62f43af9 src/os/bsd/vm/osThread_bsd.hpp --- a/src/os/bsd/vm/osThread_bsd.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os/bsd/vm/osThread_bsd.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -94,7 +94,7 @@ // flags that support signal based suspend/resume on Bsd are in a // separate class to avoid confusion with many flags in OSThread that // are used by VM level suspend/resume. - os::Bsd::SuspendResume sr; + os::SuspendResume sr; // _ucontext and _siginfo are used by SR_handler() to save thread context, // and they will later be used to walk the stack or reposition thread PC. diff -r e0fb8a213650 -r 836a62f43af9 src/os/bsd/vm/os_bsd.cpp --- a/src/os/bsd/vm/os_bsd.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os/bsd/vm/os_bsd.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -626,8 +626,6 @@ ////////////////////////////////////////////////////////////////////////////// // create new thread -static address highest_vm_reserved_address(); - // check if it's safe to start a new thread static bool _thread_safety_check(Thread* thread) { return true; @@ -935,10 +933,10 @@ return (1000 * 1000); } -// XXX: For now, code this as if BSD does not support vtime. -bool os::supports_vtime() { return false; } +bool os::supports_vtime() { return true; } bool os::enable_vtime() { return false; } bool os::vtime_enabled() { return false; } + double os::elapsedVTime() { // better than nothing, but not much return elapsedTime(); @@ -1230,10 +1228,6 @@ return retval; } -const char* os::get_current_directory(char *buf, int buflen) { - return getcwd(buf, buflen); -} - // check if addr is inside libjvm.so bool os::address_is_in_vm(address addr) { static address libjvm_base_addr; @@ -1858,17 +1852,118 @@ // Bsd(POSIX) specific hand shaking semaphore. #ifdef __APPLE__ -static semaphore_t sig_sem; +typedef semaphore_t os_semaphore_t; #define SEM_INIT(sem, value) semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, value) -#define SEM_WAIT(sem) semaphore_wait(sem); -#define SEM_POST(sem) semaphore_signal(sem); +#define SEM_WAIT(sem) semaphore_wait(sem) +#define SEM_POST(sem) semaphore_signal(sem) +#define SEM_DESTROY(sem) semaphore_destroy(mach_task_self(), sem) #else -static sem_t sig_sem; +typedef sem_t os_semaphore_t; #define SEM_INIT(sem, value) sem_init(&sem, 0, value) -#define SEM_WAIT(sem) sem_wait(&sem); -#define SEM_POST(sem) sem_post(&sem); +#define SEM_WAIT(sem) sem_wait(&sem) +#define SEM_POST(sem) sem_post(&sem) +#define SEM_DESTROY(sem) sem_destroy(&sem) #endif +class Semaphore : public StackObj { + public: + Semaphore(); + ~Semaphore(); + void signal(); + void wait(); + bool trywait(); + bool timedwait(unsigned int sec, int nsec); + private: + jlong currenttime() const; + semaphore_t _semaphore; +}; + +Semaphore::Semaphore() : _semaphore(0) { + SEM_INIT(_semaphore, 0); +} + +Semaphore::~Semaphore() { + SEM_DESTROY(_semaphore); +} + +void Semaphore::signal() { + SEM_POST(_semaphore); +} + +void Semaphore::wait() { + SEM_WAIT(_semaphore); +} + +jlong Semaphore::currenttime() const { + struct timeval tv; + gettimeofday(&tv, NULL); + return (tv.tv_sec * NANOSECS_PER_SEC) + (tv.tv_usec * 1000); +} + +#ifdef __APPLE__ +bool Semaphore::trywait() { + return timedwait(0, 0); +} + +bool Semaphore::timedwait(unsigned int sec, int nsec) { + kern_return_t kr = KERN_ABORTED; + mach_timespec_t waitspec; + waitspec.tv_sec = sec; + waitspec.tv_nsec = nsec; + + jlong starttime = currenttime(); + + kr = semaphore_timedwait(_semaphore, waitspec); + while (kr == KERN_ABORTED) { + jlong totalwait = (sec * NANOSECS_PER_SEC) + nsec; + + jlong current = currenttime(); + jlong passedtime = current - starttime; + + if (passedtime >= totalwait) { + waitspec.tv_sec = 0; + waitspec.tv_nsec = 0; + } else { + jlong waittime = totalwait - (current - starttime); + waitspec.tv_sec = waittime / NANOSECS_PER_SEC; + waitspec.tv_nsec = waittime % NANOSECS_PER_SEC; + } + + kr = semaphore_timedwait(_semaphore, waitspec); + } + + return kr == KERN_SUCCESS; +} + +#else + +bool Semaphore::trywait() { + return sem_trywait(&_semaphore) == 0; +} + +bool Semaphore::timedwait(unsigned int sec, int nsec) { + struct timespec ts; + jlong endtime = unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec); + + while (1) { + int result = sem_timedwait(&_semaphore, &ts); + if (result == 0) { + return true; + } else if (errno == EINTR) { + continue; + } else if (errno == ETIMEDOUT) { + return false; + } else { + return false; + } + } +} + +#endif // __APPLE__ + +static os_semaphore_t sig_sem; +static Semaphore sr_semaphore; + void os::signal_init_pd() { // Initialize signal structures ::memset((void*)pending_signals, 0, sizeof(pending_signals)); @@ -2080,9 +2175,10 @@ flags |= MAP_FIXED; } - // Map uncommitted pages PROT_READ and PROT_WRITE, change access - // to PROT_EXEC if executable when we commit the page. - addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE, + // Map reserved/uncommitted pages PROT_NONE so we fail early if we + // touch an uncommitted page. Otherwise, the read/write might + // succeed if we have enough swap space to back the physical page. + addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, flags, -1, 0); if (addr != MAP_FAILED) { @@ -2115,10 +2211,6 @@ return anon_munmap(addr, size); } -static address highest_vm_reserved_address() { - return _highest_vm_reserved_address; -} - static bool bsd_mprotect(char* addr, size_t size, int prot) { // Bsd wants the mprotect address argument to be page aligned. char* bottom = (char*)align_size_down((intptr_t)addr, os::Bsd::page_size()); @@ -2162,43 +2254,6 @@ return false; } -/* -* Set the coredump_filter bits to include largepages in core dump (bit 6) -* -* From the coredump_filter documentation: -* -* - (bit 0) anonymous private memory -* - (bit 1) anonymous shared memory -* - (bit 2) file-backed private memory -* - (bit 3) file-backed shared memory -* - (bit 4) ELF header pages in file-backed private memory areas (it is -* effective only if the bit 2 is cleared) -* - (bit 5) hugetlb private memory -* - (bit 6) hugetlb shared memory -*/ -static void set_coredump_filter(void) { - FILE *f; - long cdm; - - if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) { - return; - } - - if (fscanf(f, "%lx", &cdm) != 1) { - fclose(f); - return; - } - - rewind(f); - - if ((cdm & LARGEPAGES_BIT) == 0) { - cdm |= LARGEPAGES_BIT; - fprintf(f, "%#lx", cdm); - } - - fclose(f); -} - // Large page support static size_t _large_page_size = 0; @@ -2662,9 +2717,6 @@ static void resume_clear_context(OSThread *osthread) { osthread->set_ucontext(NULL); osthread->set_siginfo(NULL); - - // notify the suspend action is completed, we have now resumed - osthread->sr.clear_suspended(); } static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) { @@ -2684,7 +2736,7 @@ // its signal handlers run and prevents sigwait()'s use with the // mutex granting granting signal. // -// Currently only ever called on the VMThread +// Currently only ever called on the VMThread or JavaThread // static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) { // Save and restore errno to avoid confusing native code with EINTR @@ -2693,38 +2745,48 @@ Thread* thread = Thread::current(); OSThread* osthread = thread->osthread(); - assert(thread->is_VM_thread(), "Must be VMThread"); - // read current suspend action - int action = osthread->sr.suspend_action(); - if (action == os::Bsd::SuspendResume::SR_SUSPEND) { + assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); + + os::SuspendResume::State current = osthread->sr.state(); + if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { suspend_save_context(osthread, siginfo, context); - // Notify the suspend action is about to be completed. do_suspend() - // waits until SR_SUSPENDED is set and then returns. We will wait - // here for a resume signal and that completes the suspend-other - // action. do_suspend/do_resume is always called as a pair from - // the same thread - so there are no races - - // notify the caller - osthread->sr.set_suspended(); - - sigset_t suspend_set; // signals for sigsuspend() - - // get current set of blocked signals and unblock resume signal - pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); - sigdelset(&suspend_set, SR_signum); - - // wait here until we are resumed - do { - sigsuspend(&suspend_set); - // ignore all returns until we get a resume signal - } while (osthread->sr.suspend_action() != os::Bsd::SuspendResume::SR_CONTINUE); + // attempt to switch the state, we assume we had a SUSPEND_REQUEST + os::SuspendResume::State state = osthread->sr.suspended(); + if (state == os::SuspendResume::SR_SUSPENDED) { + sigset_t suspend_set; // signals for sigsuspend() + + // get current set of blocked signals and unblock resume signal + pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); + sigdelset(&suspend_set, SR_signum); + + sr_semaphore.signal(); + // wait here until we are resumed + while (1) { + sigsuspend(&suspend_set); + + os::SuspendResume::State result = osthread->sr.running(); + if (result == os::SuspendResume::SR_RUNNING) { + sr_semaphore.signal(); + break; + } else if (result != os::SuspendResume::SR_SUSPENDED) { + ShouldNotReachHere(); + } + } + + } else if (state == os::SuspendResume::SR_RUNNING) { + // request was cancelled, continue + } else { + ShouldNotReachHere(); + } resume_clear_context(osthread); - + } else if (current == os::SuspendResume::SR_RUNNING) { + // request was cancelled, continue + } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { + // ignore } else { - assert(action == os::Bsd::SuspendResume::SR_CONTINUE, "unexpected sr action"); - // nothing special to do - just leave the handler + // ignore } errno = old_errno; @@ -2768,42 +2830,82 @@ return 0; } +static int sr_notify(OSThread* osthread) { + int status = pthread_kill(osthread->pthread_id(), SR_signum); + assert_status(status == 0, status, "pthread_kill"); + return status; +} + +// "Randomly" selected value for how long we want to spin +// before bailing out on suspending a thread, also how often +// we send a signal to a thread we want to resume +static const int RANDOMLY_LARGE_INTEGER = 1000000; +static const int RANDOMLY_LARGE_INTEGER2 = 100; // returns true on success and false on error - really an error is fatal // but this seems the normal response to library errors static bool do_suspend(OSThread* osthread) { + assert(osthread->sr.is_running(), "thread should be running"); + assert(!sr_semaphore.trywait(), "semaphore has invalid state"); + // mark as suspended and send signal - osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_SUSPEND); - int status = pthread_kill(osthread->pthread_id(), SR_signum); - assert_status(status == 0, status, "pthread_kill"); - - // check status and wait until notified of suspension - if (status == 0) { - for (int i = 0; !osthread->sr.is_suspended(); i++) { - os::yield_all(i); - } - osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE); - return true; - } - else { - osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE); + if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { + // failed to switch, state wasn't running? + ShouldNotReachHere(); return false; } + + if (sr_notify(osthread) != 0) { + ShouldNotReachHere(); + } + + // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED + while (true) { + if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { + break; + } else { + // timeout + os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); + if (cancelled == os::SuspendResume::SR_RUNNING) { + return false; + } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { + // make sure that we consume the signal on the semaphore as well + sr_semaphore.wait(); + break; + } else { + ShouldNotReachHere(); + return false; + } + } + } + + guarantee(osthread->sr.is_suspended(), "Must be suspended"); + return true; } static void do_resume(OSThread* osthread) { assert(osthread->sr.is_suspended(), "thread should be suspended"); - osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_CONTINUE); - - int status = pthread_kill(osthread->pthread_id(), SR_signum); - assert_status(status == 0, status, "pthread_kill"); - // check status and wait unit notified of resumption - if (status == 0) { - for (int i = 0; osthread->sr.is_suspended(); i++) { - os::yield_all(i); + assert(!sr_semaphore.trywait(), "invalid semaphore state"); + + if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { + // failed to switch to WAKEUP_REQUEST + ShouldNotReachHere(); + return; + } + + while (true) { + if (sr_notify(osthread) == 0) { + if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { + if (osthread->sr.is_running()) { + return; + } + } + } else { + ShouldNotReachHere(); } } - osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE); + + guarantee(osthread->sr.is_running(), "Must be running!"); } //////////////////////////////////////////////////////////////////////////////// @@ -3033,6 +3135,19 @@ sigAct.sa_sigaction = signalHandler; sigAct.sa_flags = SA_SIGINFO|SA_RESTART; } +#if __APPLE__ + // Needed for main thread as XNU (Mac OS X kernel) will only deliver SIGSEGV + // (which starts as SIGBUS) on main thread with faulting address inside "stack+guard pages" + // if the signal handler declares it will handle it on alternate stack. + // Notice we only declare we will handle it on alt stack, but we are not + // actually going to use real alt stack - this is just a workaround. + // Please see ux_exception.c, method catch_mach_exception_raise for details + // link http://www.opensource.apple.com/source/xnu/xnu-2050.18.24/bsd/uxkern/ux_exception.c + if (sig == SIGSEGV) { + sigAct.sa_flags |= SA_ONSTACK; + } +#endif + // Save flags, which are set by ours assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range"); sigflags[sig] = sigAct.sa_flags; @@ -3541,7 +3656,40 @@ return false; } +void os::SuspendedThreadTask::internal_do_task() { + if (do_suspend(_thread->osthread())) { + SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); + do_task(context); + do_resume(_thread->osthread()); + } +} + /// +class PcFetcher : public os::SuspendedThreadTask { +public: + PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} + ExtendedPC result(); +protected: + void do_task(const os::SuspendedThreadTaskContext& context); +private: + ExtendedPC _epc; +}; + +ExtendedPC PcFetcher::result() { + guarantee(is_done(), "task is not done yet."); + return _epc; +} + +void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { + Thread* thread = context.thread(); + OSThread* osthread = thread->osthread(); + if (osthread->ucontext() != NULL) { + _epc = os::Bsd::ucontext_get_pc((ucontext_t *) context.ucontext()); + } else { + // NULL context is unexpected, double-check this is the VMThread + guarantee(thread->is_VM_thread(), "can only be called for VMThread"); + } +} // Suspends the target using the signal mechanism and then grabs the PC before // resuming the target. Used by the flat-profiler only @@ -3550,22 +3698,9 @@ assert(Thread::current()->is_Watcher_thread(), "Must be watcher"); assert(thread->is_VM_thread(), "Can only be called for VMThread"); - ExtendedPC epc; - - OSThread* osthread = thread->osthread(); - if (do_suspend(osthread)) { - if (osthread->ucontext() != NULL) { - epc = os::Bsd::ucontext_get_pc(osthread->ucontext()); - } else { - // NULL context is unexpected, double-check this is the VMThread - guarantee(thread->is_VM_thread(), "can only be called for VMThread"); - } - do_resume(osthread); - } - // failure means pthread_kill failed for some reason - arguably this is - // a fatal problem, but such problems are ignored elsewhere - - return epc; + PcFetcher fetcher(thread); + fetcher.run(); + return fetcher.result(); } int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) @@ -4550,3 +4685,4 @@ return n; } + diff -r e0fb8a213650 -r 836a62f43af9 src/os/bsd/vm/os_bsd.hpp --- a/src/os/bsd/vm/os_bsd.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os/bsd/vm/os_bsd.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -145,36 +145,6 @@ // BsdThreads work-around for 6292965 static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime); - - // Bsd suspend/resume support - this helper is a shadow of its former - // self now that low-level suspension is barely used, and old workarounds - // for BsdThreads are no longer needed. - class SuspendResume { - private: - volatile int _suspend_action; - volatile jint _state; - public: - // values for suspend_action: - enum { - SR_NONE = 0x00, - SR_SUSPEND = 0x01, // suspend request - SR_CONTINUE = 0x02, // resume request - SR_SUSPENDED = 0x20 // values for _state: + SR_NONE - }; - - SuspendResume() { _suspend_action = SR_NONE; _state = SR_NONE; } - - int suspend_action() const { return _suspend_action; } - void set_suspend_action(int x) { _suspend_action = x; } - - // atomic updates for _state - inline void set_suspended(); - inline void clear_suspended(); - bool is_suspended() { return _state & SR_SUSPENDED; } - - #undef SR_SUSPENDED - }; - private: typedef int (*sched_getcpu_func_t)(void); typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen); @@ -250,7 +220,7 @@ int TryPark () ; int park (jlong millis) ; void SetAssociation (Thread * a) { _Assoc = a ; } -} ; +}; class PlatformParker : public CHeapObj { protected: @@ -268,6 +238,6 @@ status = pthread_mutex_init (_mutex, NULL); assert_status(status == 0, status, "mutex_init"); } -} ; +}; #endif // OS_BSD_VM_OS_BSD_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/os/bsd/vm/os_bsd.inline.hpp --- a/src/os/bsd/vm/os_bsd.inline.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os/bsd/vm/os_bsd.inline.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -286,20 +286,4 @@ return ::setsockopt(fd, level, optname, optval, optlen); } -inline void os::Bsd::SuspendResume::set_suspended() { - jint temp, temp2; - do { - temp = _state; - temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp); - } while (temp2 != temp); -} - -inline void os::Bsd::SuspendResume::clear_suspended() { - jint temp, temp2; - do { - temp = _state; - temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp); - } while (temp2 != temp); -} - #endif // OS_BSD_VM_OS_BSD_INLINE_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/os/linux/vm/chaitin_linux.cpp --- a/src/os/linux/vm/chaitin_linux.cpp Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,42 +0,0 @@ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "opto/chaitin.hpp" -#include "opto/machnode.hpp" - -void PhaseRegAlloc::pd_preallocate_hook() { - // no action -} - -#ifdef ASSERT -void PhaseRegAlloc::pd_postallocate_verify_hook() { - // no action -} -#endif - - -// Reconciliation History -// chaitin_solaris.cpp 1.7 99/07/12 23:54:22 -// End diff -r e0fb8a213650 -r 836a62f43af9 src/os/linux/vm/osThread_linux.hpp --- a/src/os/linux/vm/osThread_linux.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os/linux/vm/osThread_linux.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,7 +77,7 @@ // flags that support signal based suspend/resume on Linux are in a // separate class to avoid confusion with many flags in OSThread that // are used by VM level suspend/resume. - os::Linux::SuspendResume sr; + os::SuspendResume sr; // _ucontext and _siginfo are used by SR_handler() to save thread context, // and they will later be used to walk the stack or reposition thread PC. diff -r e0fb8a213650 -r 836a62f43af9 src/os/linux/vm/os_linux.cpp --- a/src/os/linux/vm/os_linux.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os/linux/vm/os_linux.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -101,6 +101,12 @@ # include # include +// if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling +// getrusage() is prepared to handle the associated failure. +#ifndef RUSAGE_THREAD +#define RUSAGE_THREAD (1) /* only the calling thread */ +#endif + #define MAX_PATH (2 * K) // for timer info max values which include all bits @@ -119,6 +125,7 @@ Mutex* os::Linux::_createThread_lock = NULL; pthread_t os::Linux::_main_thread; int os::Linux::_page_size = -1; +const int os::Linux::_vm_default_page_size = (8 * K); bool os::Linux::_is_floating_stack = false; bool os::Linux::_is_NPTL = false; bool os::Linux::_supports_fast_thread_cpu_time = false; @@ -144,6 +151,9 @@ /* Used to protect dlsym() calls */ static pthread_mutex_t dl_mutex; +// Declarations +static void unpackTime(timespec* absTime, bool isAbsolute, jlong time); + #ifdef JAVASE_EMBEDDED class MemNotifyThread: public Thread { friend class VMStructs; @@ -1335,15 +1345,19 @@ return (1000 * 1000); } -// For now, we say that linux does not support vtime. I have no idea -// whether it can actually be made to (DLD, 9/13/05). - -bool os::supports_vtime() { return false; } +bool os::supports_vtime() { return true; } bool os::enable_vtime() { return false; } bool os::vtime_enabled() { return false; } + double os::elapsedVTime() { - // better than nothing, but not much - return elapsedTime(); + struct rusage usage; + int retval = getrusage(RUSAGE_THREAD, &usage); + if (retval == 0) { + return (double) (usage.ru_utime.tv_sec + usage.ru_stime.tv_sec) + (double) (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000 * 1000); + } else { + // better than nothing, but not much + return elapsedTime(); + } } jlong os::javaTimeMillis() { @@ -1662,10 +1676,6 @@ return retval; } -const char* os::get_current_directory(char *buf, int buflen) { - return getcwd(buf, buflen); -} - // check if addr is inside libjvm.so bool os::address_is_in_vm(address addr) { static address libjvm_base_addr; @@ -2400,6 +2410,57 @@ return CAST_FROM_FN_PTR(void*, UserHandler); } +class Semaphore : public StackObj { + public: + Semaphore(); + ~Semaphore(); + void signal(); + void wait(); + bool trywait(); + bool timedwait(unsigned int sec, int nsec); + private: + sem_t _semaphore; +}; + + +Semaphore::Semaphore() { + sem_init(&_semaphore, 0, 0); +} + +Semaphore::~Semaphore() { + sem_destroy(&_semaphore); +} + +void Semaphore::signal() { + sem_post(&_semaphore); +} + +void Semaphore::wait() { + sem_wait(&_semaphore); +} + +bool Semaphore::trywait() { + return sem_trywait(&_semaphore) == 0; +} + +bool Semaphore::timedwait(unsigned int sec, int nsec) { + struct timespec ts; + unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec); + + while (1) { + int result = sem_timedwait(&_semaphore, &ts); + if (result == 0) { + return true; + } else if (errno == EINTR) { + continue; + } else if (errno == ETIMEDOUT) { + return false; + } else { + return false; + } + } +} + extern "C" { typedef void (*sa_handler_t)(int); typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); @@ -2439,6 +2500,7 @@ // Linux(POSIX) specific hand shaking semaphore. static sem_t sig_sem; +static Semaphore sr_semaphore; void os::signal_init_pd() { // Initialize signal structures @@ -2906,9 +2968,10 @@ flags |= MAP_FIXED; } - // Map uncommitted pages PROT_READ and PROT_WRITE, change access - // to PROT_EXEC if executable when we commit the page. - addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE, + // Map reserved/uncommitted pages PROT_NONE so we fail early if we + // touch an uncommitted page. Otherwise, the read/write might + // succeed if we have enough swap space to back the physical page. + addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, flags, -1, 0); if (addr != MAP_FAILED) { @@ -3551,9 +3614,6 @@ static void resume_clear_context(OSThread *osthread) { osthread->set_ucontext(NULL); osthread->set_siginfo(NULL); - - // notify the suspend action is completed, we have now resumed - osthread->sr.clear_suspended(); } static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) { @@ -3573,7 +3633,7 @@ // its signal handlers run and prevents sigwait()'s use with the // mutex granting granting signal. // -// Currently only ever called on the VMThread +// Currently only ever called on the VMThread and JavaThreads (PC sampling) // static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) { // Save and restore errno to avoid confusing native code with EINTR @@ -3582,38 +3642,46 @@ Thread* thread = Thread::current(); OSThread* osthread = thread->osthread(); - assert(thread->is_VM_thread(), "Must be VMThread"); - // read current suspend action - int action = osthread->sr.suspend_action(); - if (action == os::Linux::SuspendResume::SR_SUSPEND) { + assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); + + os::SuspendResume::State current = osthread->sr.state(); + if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { suspend_save_context(osthread, siginfo, context); - // Notify the suspend action is about to be completed. do_suspend() - // waits until SR_SUSPENDED is set and then returns. We will wait - // here for a resume signal and that completes the suspend-other - // action. do_suspend/do_resume is always called as a pair from - // the same thread - so there are no races - - // notify the caller - osthread->sr.set_suspended(); - - sigset_t suspend_set; // signals for sigsuspend() - - // get current set of blocked signals and unblock resume signal - pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); - sigdelset(&suspend_set, SR_signum); - - // wait here until we are resumed - do { - sigsuspend(&suspend_set); - // ignore all returns until we get a resume signal - } while (osthread->sr.suspend_action() != os::Linux::SuspendResume::SR_CONTINUE); + // attempt to switch the state, we assume we had a SUSPEND_REQUEST + os::SuspendResume::State state = osthread->sr.suspended(); + if (state == os::SuspendResume::SR_SUSPENDED) { + sigset_t suspend_set; // signals for sigsuspend() + + // get current set of blocked signals and unblock resume signal + pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); + sigdelset(&suspend_set, SR_signum); + + sr_semaphore.signal(); + // wait here until we are resumed + while (1) { + sigsuspend(&suspend_set); + + os::SuspendResume::State result = osthread->sr.running(); + if (result == os::SuspendResume::SR_RUNNING) { + sr_semaphore.signal(); + break; + } + } + + } else if (state == os::SuspendResume::SR_RUNNING) { + // request was cancelled, continue + } else { + ShouldNotReachHere(); + } resume_clear_context(osthread); - + } else if (current == os::SuspendResume::SR_RUNNING) { + // request was cancelled, continue + } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { + // ignore } else { - assert(action == os::Linux::SuspendResume::SR_CONTINUE, "unexpected sr action"); - // nothing special to do - just leave the handler + // ignore } errno = old_errno; @@ -3657,42 +3725,82 @@ return 0; } +static int sr_notify(OSThread* osthread) { + int status = pthread_kill(osthread->pthread_id(), SR_signum); + assert_status(status == 0, status, "pthread_kill"); + return status; +} + +// "Randomly" selected value for how long we want to spin +// before bailing out on suspending a thread, also how often +// we send a signal to a thread we want to resume +static const int RANDOMLY_LARGE_INTEGER = 1000000; +static const int RANDOMLY_LARGE_INTEGER2 = 100; // returns true on success and false on error - really an error is fatal // but this seems the normal response to library errors static bool do_suspend(OSThread* osthread) { + assert(osthread->sr.is_running(), "thread should be running"); + assert(!sr_semaphore.trywait(), "semaphore has invalid state"); + // mark as suspended and send signal - osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_SUSPEND); - int status = pthread_kill(osthread->pthread_id(), SR_signum); - assert_status(status == 0, status, "pthread_kill"); - - // check status and wait until notified of suspension - if (status == 0) { - for (int i = 0; !osthread->sr.is_suspended(); i++) { - os::yield_all(i); + if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { + // failed to switch, state wasn't running? + ShouldNotReachHere(); + return false; + } + + if (sr_notify(osthread) != 0) { + ShouldNotReachHere(); + } + + // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED + while (true) { + if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { + break; + } else { + // timeout + os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); + if (cancelled == os::SuspendResume::SR_RUNNING) { + return false; + } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { + // make sure that we consume the signal on the semaphore as well + sr_semaphore.wait(); + break; + } else { + ShouldNotReachHere(); + return false; + } } - osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE); - return true; - } - else { - osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE); - return false; - } + } + + guarantee(osthread->sr.is_suspended(), "Must be suspended"); + return true; } static void do_resume(OSThread* osthread) { assert(osthread->sr.is_suspended(), "thread should be suspended"); - osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_CONTINUE); - - int status = pthread_kill(osthread->pthread_id(), SR_signum); - assert_status(status == 0, status, "pthread_kill"); - // check status and wait unit notified of resumption - if (status == 0) { - for (int i = 0; osthread->sr.is_suspended(); i++) { - os::yield_all(i); + assert(!sr_semaphore.trywait(), "invalid semaphore state"); + + if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { + // failed to switch to WAKEUP_REQUEST + ShouldNotReachHere(); + return; + } + + while (true) { + if (sr_notify(osthread) == 0) { + if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { + if (osthread->sr.is_running()) { + return; + } + } + } else { + ShouldNotReachHere(); } } - osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE); + + guarantee(osthread->sr.is_running(), "Must be running!"); } //////////////////////////////////////////////////////////////////////////////// @@ -4249,6 +4357,15 @@ Linux::clock_init(); initial_time_count = os::elapsed_counter(); pthread_mutex_init(&dl_mutex, NULL); + + // If the pagesize of the VM is greater than 8K determine the appropriate + // number of initial guard pages. The user can change this with the + // command line arguments, if needed. + if (vm_page_size() > (int)Linux::vm_default_page_size()) { + StackYellowPages = 1; + StackRedPages = 1; + StackShadowPages = round_to((StackShadowPages*Linux::vm_default_page_size()), vm_page_size()) / vm_page_size(); + } } // To install functions for atexit system call @@ -4302,8 +4419,8 @@ // Add in 2*BytesPerWord times page size to account for VM stack during // class initialization depending on 32 or 64 bit VM. os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed, - (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ - 2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::page_size()); + (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() + + (2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size()); size_t threadStackSizeInBytes = ThreadStackSize * K; if (threadStackSizeInBytes != 0 && @@ -4455,6 +4572,40 @@ /// +void os::SuspendedThreadTask::internal_do_task() { + if (do_suspend(_thread->osthread())) { + SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); + do_task(context); + do_resume(_thread->osthread()); + } +} + +class PcFetcher : public os::SuspendedThreadTask { +public: + PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} + ExtendedPC result(); +protected: + void do_task(const os::SuspendedThreadTaskContext& context); +private: + ExtendedPC _epc; +}; + +ExtendedPC PcFetcher::result() { + guarantee(is_done(), "task is not done yet."); + return _epc; +} + +void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { + Thread* thread = context.thread(); + OSThread* osthread = thread->osthread(); + if (osthread->ucontext() != NULL) { + _epc = os::Linux::ucontext_get_pc((ucontext_t *) context.ucontext()); + } else { + // NULL context is unexpected, double-check this is the VMThread + guarantee(thread->is_VM_thread(), "can only be called for VMThread"); + } +} + // Suspends the target using the signal mechanism and then grabs the PC before // resuming the target. Used by the flat-profiler only ExtendedPC os::get_thread_pc(Thread* thread) { @@ -4462,22 +4613,9 @@ assert(Thread::current()->is_Watcher_thread(), "Must be watcher"); assert(thread->is_VM_thread(), "Can only be called for VMThread"); - ExtendedPC epc; - - OSThread* osthread = thread->osthread(); - if (do_suspend(osthread)) { - if (osthread->ucontext() != NULL) { - epc = os::Linux::ucontext_get_pc(osthread->ucontext()); - } else { - // NULL context is unexpected, double-check this is the VMThread - guarantee(thread->is_VM_thread(), "can only be called for VMThread"); - } - do_resume(osthread); - } - // failure means pthread_kill failed for some reason - arguably this is - // a fatal problem, but such problems are ignored elsewhere - - return epc; + PcFetcher fetcher(thread); + fetcher.run(); + return fetcher.result(); } int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) @@ -5599,4 +5737,5 @@ new MemNotifyThread(fd); } } + #endif // JAVASE_EMBEDDED diff -r e0fb8a213650 -r 836a62f43af9 src/os/linux/vm/os_linux.hpp --- a/src/os/linux/vm/os_linux.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os/linux/vm/os_linux.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -70,6 +70,7 @@ static pthread_t _main_thread; static Mutex* _createThread_lock; static int _page_size; + static const int _vm_default_page_size; static julong available_memory(); static julong physical_memory() { return _physical_memory; } @@ -116,6 +117,8 @@ static int page_size(void) { return _page_size; } static void set_page_size(int val) { _page_size = val; } + static int vm_default_page_size(void) { return _vm_default_page_size; } + static address ucontext_get_pc(ucontext_t* uc); static intptr_t* ucontext_get_sp(ucontext_t* uc); static intptr_t* ucontext_get_fp(ucontext_t* uc); @@ -207,35 +210,6 @@ // LinuxThreads work-around for 6292965 static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime); - - // Linux suspend/resume support - this helper is a shadow of its former - // self now that low-level suspension is barely used, and old workarounds - // for LinuxThreads are no longer needed. - class SuspendResume { - private: - volatile int _suspend_action; - volatile jint _state; - public: - // values for suspend_action: - enum { - SR_NONE = 0x00, - SR_SUSPEND = 0x01, // suspend request - SR_CONTINUE = 0x02, // resume request - SR_SUSPENDED = 0x20 // values for _state: + SR_NONE - }; - - SuspendResume() { _suspend_action = SR_NONE; _state = SR_NONE; } - - int suspend_action() const { return _suspend_action; } - void set_suspend_action(int x) { _suspend_action = x; } - - // atomic updates for _state - inline void set_suspended(); - inline void clear_suspended(); - bool is_suspended() { return _state & SR_SUSPENDED; } - - }; - private: typedef int (*sched_getcpu_func_t)(void); typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen); @@ -330,6 +304,6 @@ status = pthread_mutex_init (_mutex, NULL); assert_status(status == 0, status, "mutex_init"); } -} ; +}; #endif // OS_LINUX_VM_OS_LINUX_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/os/linux/vm/os_linux.inline.hpp --- a/src/os/linux/vm/os_linux.inline.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os/linux/vm/os_linux.inline.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -288,20 +288,4 @@ return ::setsockopt(fd, level, optname, optval, optlen); } -inline void os::Linux::SuspendResume::set_suspended() { - jint temp, temp2; - do { - temp = _state; - temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp); - } while (temp2 != temp); -} - -inline void os::Linux::SuspendResume::clear_suspended() { - jint temp, temp2; - do { - temp = _state; - temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp); - } while (temp2 != temp); -} - #endif // OS_LINUX_VM_OS_LINUX_INLINE_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/os/posix/launcher/java_md.c --- a/src/os/posix/launcher/java_md.c Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1936 +0,0 @@ -/* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - - -#include "java.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef GAMMA -#include "manifest_info.h" -#include "version_comp.h" -#endif - -#if defined(__linux__) || defined(_ALLBSD_SOURCE) -#include -#else -#include -#endif - -#ifdef __APPLE__ -#define JVM_DLL "libjvm.dylib" -#define JAVA_DLL "libjava.dylib" -#define LD_LIBRARY_PATH "DYLD_LIBRARY_PATH" -#else -#define JVM_DLL "libjvm.so" -#define JAVA_DLL "libjava.so" -#define LD_LIBRARY_PATH "LD_LIBRARY_PATH" -#endif - -#ifndef GAMMA /* launcher.make defines ARCH */ -/* - * If a processor / os combination has the ability to run binaries of - * two data models and cohabitation of jre/jdk bits with both data - * models is supported, then DUAL_MODE is defined. When DUAL_MODE is - * defined, the architecture names for the narrow and wide version of - * the architecture are defined in LIBARCH64NAME and LIBARCH32NAME. Currently - * only Solaris on sparc/sparcv9 and i586/amd64 is DUAL_MODE; linux - * i586/amd64 could be defined as DUAL_MODE but that is not the - * current policy. - */ - -#ifndef LIBARCHNAME -# error "The macro LIBARCHNAME was not defined on the compile line" -#endif - -#ifdef __sun -# define DUAL_MODE -# ifndef LIBARCH32NAME -# error "The macro LIBARCH32NAME was not defined on the compile line" -# endif -# ifndef LIBARCH64NAME -# error "The macro LIBARCH64NAME was not defined on the compile line" -# endif -# include -# include -# include -#endif - -#endif /* ifndef GAMMA */ - -/* pointer to environment */ -extern char **environ; - -#ifndef GAMMA -/* - * A collection of useful strings. One should think of these as #define - * entries, but actual strings can be more efficient (with many compilers). - */ -#ifdef __linux__ -static const char *system_dir = "/usr/java"; -static const char *user_dir = "/java"; -#else /* Solaris */ -static const char *system_dir = "/usr/jdk"; -static const char *user_dir = "/jdk"; -#endif - -#endif /* ifndef GAMMA */ - -/* - * Flowchart of launcher execs and options processing on unix - * - * The selection of the proper vm shared library to open depends on - * several classes of command line options, including vm "flavor" - * options (-client, -server) and the data model options, -d32 and - * -d64, as well as a version specification which may have come from - * the command line or from the manifest of an executable jar file. - * The vm selection options are not passed to the running - * virtual machine; they must be screened out by the launcher. - * - * The version specification (if any) is processed first by the - * platform independent routine SelectVersion. This may result in - * the exec of the specified launcher version. - * - * Typically, the launcher execs at least once to ensure a suitable - * LD_LIBRARY_PATH is in effect for the process. The first exec - * screens out all the data model options; leaving the choice of data - * model implicit in the binary selected to run. However, in case no - * exec is done, the data model options are screened out before the vm - * is invoked. - * - * incoming argv ------------------------------ - * | | - * \|/ | - * CheckJVMType | - * (removes -client, -server, etc.) | - * \|/ - * CreateExecutionEnvironment - * (removes -d32 and -d64, - * determines desired data model, - * sets up LD_LIBRARY_PATH, - * and exec's) - * | - * -------------------------------------------- - * | - * \|/ - * exec child 1 incoming argv ----------------- - * | | - * \|/ | - * CheckJVMType | - * (removes -client, -server, etc.) | - * | \|/ - * | CreateExecutionEnvironment - * | (verifies desired data model - * | is running and acceptable - * | LD_LIBRARY_PATH; - * | no-op in child) - * | - * \|/ - * TranslateDashJArgs... - * (Prepare to pass args to vm) - * | - * | - * | - * \|/ - * ParseArguments - * (ignores -d32 and -d64, - * processes version options, - * creates argument list for vm, - * etc.) - * - */ - -static char *SetExecname(char **argv); -static char * GetExecname(); -static jboolean GetJVMPath(const char *jrepath, const char *jvmtype, - char *jvmpath, jint jvmpathsize, char * arch); -static jboolean GetJREPath(char *path, jint pathsize, char * arch, jboolean speculative); - -#ifndef GAMMA -const char * -GetArch() -{ - return LIBARCHNAME; -} -#endif /* ifndef GAMMA */ - -void -CreateExecutionEnvironment(int *_argcp, - char ***_argvp, - char jrepath[], - jint so_jrepath, - char jvmpath[], - jint so_jvmpath, - char **original_argv) { - /* - * First, determine if we are running the desired data model. If we - * are running the desired data model, all the error messages - * associated with calling GetJREPath, ReadKnownVMs, etc. should be - * output. However, if we are not running the desired data model, - * some of the errors should be suppressed since it is more - * informative to issue an error message based on whether or not the - * os/processor combination has dual mode capabilities. - */ - - char *execname = NULL; - int original_argc = *_argcp; - jboolean jvmpathExists; - - /* Compute the name of the executable */ - execname = SetExecname(*_argvp); - -#ifndef GAMMA - /* Set the LD_LIBRARY_PATH environment variable, check data model - flags, and exec process, if needed */ - { - char *arch = (char *)GetArch(); /* like sparc or sparcv9 */ - char * jvmtype = NULL; - int argc = *_argcp; - char **argv = original_argv; - - char *runpath = NULL; /* existing effective LD_LIBRARY_PATH - setting */ - - int running = /* What data model is being ILP32 => - 32 bit vm; LP64 => 64 bit vm */ -#ifdef _LP64 - 64; -#else - 32; -#endif - - int wanted = running; /* What data mode is being - asked for? Current model is - fine unless another model - is asked for */ - - char* new_runpath = NULL; /* desired new LD_LIBRARY_PATH string */ - char* newpath = NULL; /* path on new LD_LIBRARY_PATH */ - char* lastslash = NULL; - - char** newenvp = NULL; /* current environment */ - - char** newargv = NULL; - int newargc = 0; -#ifdef __sun - char* dmpath = NULL; /* data model specific LD_LIBRARY_PATH, - Solaris only */ -#endif - - /* - * Starting in 1.5, all unix platforms accept the -d32 and -d64 - * options. On platforms where only one data-model is supported - * (e.g. ia-64 Linux), using the flag for the other data model is - * an error and will terminate the program. - */ - - { /* open new scope to declare local variables */ - int i; - - newargv = (char **)JLI_MemAlloc((argc+1) * sizeof(*newargv)); - newargv[newargc++] = argv[0]; - - /* scan for data model arguments and remove from argument list; - last occurrence determines desired data model */ - for (i=1; i < argc; i++) { - - if (strcmp(argv[i], "-J-d64") == 0 || strcmp(argv[i], "-d64") == 0) { - wanted = 64; - continue; - } - if (strcmp(argv[i], "-J-d32") == 0 || strcmp(argv[i], "-d32") == 0) { - wanted = 32; - continue; - } - newargv[newargc++] = argv[i]; - -#ifdef JAVA_ARGS - if (argv[i][0] != '-') - continue; -#else - if (strcmp(argv[i], "-classpath") == 0 || strcmp(argv[i], "-cp") == 0) { - i++; - if (i >= argc) break; - newargv[newargc++] = argv[i]; - continue; - } - if (argv[i][0] != '-') { i++; break; } -#endif - } - - /* copy rest of args [i .. argc) */ - while (i < argc) { - newargv[newargc++] = argv[i++]; - } - newargv[newargc] = NULL; - - /* - * newargv has all proper arguments here - */ - - argc = newargc; - argv = newargv; - } - - /* If the data model is not changing, it is an error if the - jvmpath does not exist */ - if (wanted == running) { - /* Find out where the JRE is that we will be using. */ - if (!GetJREPath(jrepath, so_jrepath, arch, JNI_FALSE) ) { - fprintf(stderr, "Error: could not find Java 2 Runtime Environment.\n"); - exit(2); - } - - /* Find the specified JVM type */ - if (ReadKnownVMs(jrepath, arch, JNI_FALSE) < 1) { - fprintf(stderr, "Error: no known VMs. (check for corrupt jvm.cfg file)\n"); - exit(1); - } - - jvmpath[0] = '\0'; - jvmtype = CheckJvmType(_argcp, _argvp, JNI_FALSE); - - if (!GetJVMPath(jrepath, jvmtype, jvmpath, so_jvmpath, arch )) { - fprintf(stderr, "Error: no `%s' JVM at `%s'.\n", jvmtype, jvmpath); - exit(4); - } - } else { /* do the same speculatively or exit */ -#ifdef DUAL_MODE - if (running != wanted) { - /* Find out where the JRE is that we will be using. */ - if (!GetJREPath(jrepath, so_jrepath, ((wanted==64)?LIBARCH64NAME:LIBARCH32NAME), JNI_TRUE)) { - goto EndDataModelSpeculate; - } - - /* - * Read in jvm.cfg for target data model and process vm - * selection options. - */ - if (ReadKnownVMs(jrepath, ((wanted==64)?LIBARCH64NAME:LIBARCH32NAME), JNI_TRUE) < 1) { - goto EndDataModelSpeculate; - } - jvmpath[0] = '\0'; - jvmtype = CheckJvmType(_argcp, _argvp, JNI_TRUE); - /* exec child can do error checking on the existence of the path */ - jvmpathExists = GetJVMPath(jrepath, jvmtype, jvmpath, so_jvmpath, - ((wanted==64)?LIBARCH64NAME:LIBARCH32NAME)); - - } - EndDataModelSpeculate: /* give up and let other code report error message */ - ; -#else - fprintf(stderr, "Running a %d-bit JVM is not supported on this platform.\n", wanted); - exit(1); -#endif - } - - /* - * We will set the LD_LIBRARY_PATH as follows: - * - * o $JVMPATH (directory portion only) - * o $JRE/lib/$LIBARCHNAME - * o $JRE/../lib/$LIBARCHNAME - * - * followed by the user's previous effective LD_LIBRARY_PATH, if - * any. - */ - -#ifdef __sun - /* - * Starting in Solaris 7, ld.so.1 supports three LD_LIBRARY_PATH - * variables: - * - * 1. LD_LIBRARY_PATH -- used for 32 and 64 bit searches if - * data-model specific variables are not set. - * - * 2. LD_LIBRARY_PATH_64 -- overrides and replaces LD_LIBRARY_PATH - * for 64-bit binaries. - * - * 3. LD_LIBRARY_PATH_32 -- overrides and replaces LD_LIBRARY_PATH - * for 32-bit binaries. - * - * The vm uses LD_LIBRARY_PATH to set the java.library.path system - * property. To shield the vm from the complication of multiple - * LD_LIBRARY_PATH variables, if the appropriate data model - * specific variable is set, we will act as if LD_LIBRARY_PATH had - * the value of the data model specific variant and the data model - * specific variant will be unset. Note that the variable for the - * *wanted* data model must be used (if it is set), not simply the - * current running data model. - */ - - switch(wanted) { - case 0: - if(running == 32) { - dmpath = getenv("LD_LIBRARY_PATH_32"); - wanted = 32; - } - else { - dmpath = getenv("LD_LIBRARY_PATH_64"); - wanted = 64; - } - break; - - case 32: - dmpath = getenv("LD_LIBRARY_PATH_32"); - break; - - case 64: - dmpath = getenv("LD_LIBRARY_PATH_64"); - break; - - default: - fprintf(stderr, "Improper value at line %d.", __LINE__); - exit(1); /* unknown value in wanted */ - break; - } - - /* - * If dmpath is NULL, the relevant data model specific variable is - * not set and normal LD_LIBRARY_PATH should be used. - */ - if( dmpath == NULL) { - runpath = getenv("LD_LIBRARY_PATH"); - } - else { - runpath = dmpath; - } -#else - /* - * If not on Solaris, assume only a single LD_LIBRARY_PATH - * variable. - */ - runpath = getenv(LD_LIBRARY_PATH); -#endif /* __sun */ - -#if defined(__linux__) - /* - * On linux, if a binary is running as sgid or suid, glibc sets - * LD_LIBRARY_PATH to the empty string for security purposes. (In - * contrast, on Solaris the LD_LIBRARY_PATH variable for a - * privileged binary does not lose its settings; but the dynamic - * linker does apply more scrutiny to the path.) The launcher uses - * the value of LD_LIBRARY_PATH to prevent an exec loop. - * Therefore, if we are running sgid or suid, this function's - * setting of LD_LIBRARY_PATH will be ineffective and we should - * return from the function now. Getting the right libraries to - * be found must be handled through other mechanisms. - */ - if((getgid() != getegid()) || (getuid() != geteuid()) ) { - return; - } -#elif defined(_ALLBSD_SOURCE) - /* - * On BSD, if a binary is running as sgid or suid, libc sets - * LD_LIBRARY_PATH to the empty string for security purposes. (In - * contrast, on Solaris the LD_LIBRARY_PATH variable for a - * privileged binary does not lose its settings; but the dynamic - * linker does apply more scrutiny to the path.) The launcher uses - * the value of LD_LIBRARY_PATH to prevent an exec loop. - * Therefore, if we are running sgid or suid, this function's - * setting of LD_LIBRARY_PATH will be ineffective and we should - * return from the function now. Getting the right libraries to - * be found must be handled through other mechanisms. - */ - if(issetugid()) { - return; - } -#endif - - /* runpath contains current effective LD_LIBRARY_PATH setting */ - - jvmpath = JLI_StringDup(jvmpath); - new_runpath = JLI_MemAlloc( ((runpath!=NULL)?strlen(runpath):0) + - 2*strlen(jrepath) + 2*strlen(arch) + - strlen(jvmpath) + 52); - newpath = new_runpath + strlen(LD_LIBRARY_PATH "="); - - - /* - * Create desired LD_LIBRARY_PATH value for target data model. - */ - { - /* remove the name of the .so from the JVM path */ - lastslash = strrchr(jvmpath, '/'); - if (lastslash) - *lastslash = '\0'; - - - /* jvmpath, ((running != wanted)?((wanted==64)?"/"LIBARCH64NAME:"/.."):""), */ - - sprintf(new_runpath, LD_LIBRARY_PATH "=" - "%s:" - "%s/lib/%s:" - "%s/../lib/%s", - jvmpath, -#ifdef DUAL_MODE - jrepath, ((wanted==64)?LIBARCH64NAME:LIBARCH32NAME), - jrepath, ((wanted==64)?LIBARCH64NAME:LIBARCH32NAME) -#else - jrepath, arch, - jrepath, arch -#endif - ); - - - /* - * Check to make sure that the prefix of the current path is the - * desired environment variable setting. - */ - if (runpath != NULL && - strncmp(newpath, runpath, strlen(newpath))==0 && - (runpath[strlen(newpath)] == 0 || runpath[strlen(newpath)] == ':') && - (running == wanted) /* data model does not have to be changed */ -#ifdef __sun - && (dmpath == NULL) /* data model specific variables not set */ -#endif - ) { - - return; - - } - } - - /* - * Place the desired environment setting onto the prefix of - * LD_LIBRARY_PATH. Note that this prevents any possible infinite - * loop of execv() because we test for the prefix, above. - */ - if (runpath != 0) { - strcat(new_runpath, ":"); - strcat(new_runpath, runpath); - } - - if( putenv(new_runpath) != 0) { - exit(1); /* problem allocating memory; LD_LIBRARY_PATH not set - properly */ - } - - /* - * Unix systems document that they look at LD_LIBRARY_PATH only - * once at startup, so we have to re-exec the current executable - * to get the changed environment variable to have an effect. - */ - -#ifdef __sun - /* - * If dmpath is not NULL, remove the data model specific string - * in the environment for the exec'ed child. - */ - - if( dmpath != NULL) - (void)UnsetEnv((wanted==32)?"LD_LIBRARY_PATH_32":"LD_LIBRARY_PATH_64"); -#endif - - newenvp = environ; - - { - char *newexec = execname; -#ifdef DUAL_MODE - /* - * If the data model is being changed, the path to the - * executable must be updated accordingly; the executable name - * and directory the executable resides in are separate. In the - * case of 32 => 64, the new bits are assumed to reside in, e.g. - * "olddir/LIBARCH64NAME/execname"; in the case of 64 => 32, - * the bits are assumed to be in "olddir/../execname". For example, - * - * olddir/sparcv9/execname - * olddir/amd64/execname - * - * for Solaris SPARC and Linux amd64, respectively. - */ - - if (running != wanted) { - char *oldexec = strcpy(JLI_MemAlloc(strlen(execname) + 1), execname); - char *olddir = oldexec; - char *oldbase = strrchr(oldexec, '/'); - - - newexec = JLI_MemAlloc(strlen(execname) + 20); - *oldbase++ = 0; - sprintf(newexec, "%s/%s/%s", olddir, - ((wanted==64) ? LIBARCH64NAME : ".."), oldbase); - argv[0] = newexec; - } -#endif - - (void)fflush(stdout); - (void)fflush(stderr); - execve(newexec, argv, newenvp); - perror("execve()"); - - fprintf(stderr, "Error trying to exec %s.\n", newexec); - fprintf(stderr, "Check if file exists and permissions are set correctly.\n"); - -#ifdef DUAL_MODE - if (running != wanted) { - fprintf(stderr, "Failed to start a %d-bit JVM process from a %d-bit JVM.\n", - wanted, running); -# ifdef __sun - -# ifdef __sparc - fprintf(stderr, "Verify all necessary J2SE components have been installed.\n" ); - fprintf(stderr, - "(Solaris SPARC 64-bit components must be installed after 32-bit components.)\n" ); -# else - fprintf(stderr, "Either 64-bit processes are not supported by this platform\n"); - fprintf(stderr, "or the 64-bit components have not been installed.\n"); -# endif - } -# endif -#endif - - } - - exit(1); - } - -#else /* ifndef GAMMA */ - - /* - * gamma launcher is simpler in that it doesn't handle VM flavors, data - * model, LD_LIBRARY_PATH, etc. Assuming everything is set-up correctly - * all we need to do here is to return correct path names. See also - * GetJVMPath() and GetApplicationHome(). - */ - - { char *arch = (char *) ARCH; /* like sparc or sparcv9 */ - char *p; - - if (!GetJREPath(jrepath, so_jrepath, arch, JNI_FALSE) ) { - fprintf(stderr, "Error: could not find Java 2 Runtime Environment.\n"); - exit(2); - } - - if (!GetJVMPath(jrepath, NULL, jvmpath, so_jvmpath, arch )) { - fprintf(stderr, "Error: no JVM at `%s'.\n", jvmpath); - exit(4); - } - } - -#endif /* ifndef GAMMA */ -} - - -/* - * On Solaris VM choosing is done by the launcher (java.c). - */ -static jboolean -GetJVMPath(const char *jrepath, const char *jvmtype, - char *jvmpath, jint jvmpathsize, char * arch) -{ - struct stat s; - -#ifndef GAMMA - if (strchr(jvmtype, '/')) { - sprintf(jvmpath, "%s/" JVM_DLL, jvmtype); - } else { - sprintf(jvmpath, "%s/lib/%s/%s/" JVM_DLL, jrepath, arch, jvmtype); - } -#else - /* - * For gamma launcher, JVM is either built-in or in the same directory. - * Either way we return "/libjvm.so" where is the - * directory where gamma launcher is located. - */ - - char *p; - - snprintf(jvmpath, jvmpathsize, "%s", GetExecname()); - p = strrchr(jvmpath, '/'); - if (p) { - /* replace executable name with libjvm.so */ - snprintf(p + 1, jvmpathsize - (p + 1 - jvmpath), "%s", JVM_DLL); - } else { - /* this case shouldn't happen */ - snprintf(jvmpath, jvmpathsize, "%s", JVM_DLL); - } -#endif /* ifndef GAMMA */ - - if (_launcher_debug) - printf("Does `%s' exist ... ", jvmpath); - - if (stat(jvmpath, &s) == 0) { - if (_launcher_debug) - printf("yes.\n"); - return JNI_TRUE; - } else { - if (_launcher_debug) - printf("no.\n"); - return JNI_FALSE; - } -} - -/* - * Find path to JRE based on .exe's location or registry settings. - */ -static jboolean -GetJREPath(char *path, jint pathsize, char * arch, jboolean speculative) -{ - char libjava[MAXPATHLEN]; - - if (GetApplicationHome(path, pathsize)) { - - /* Is the JRE universal, i.e. no arch dir? */ - sprintf(libjava, "%s/jre/lib/" JAVA_DLL, path); - if (access(libjava, F_OK) == 0) { - strcat(path, "/jre"); - goto found; - } - - /* Is JRE co-located with the application? */ - sprintf(libjava, "%s/lib/%s/" JAVA_DLL, path, arch); - if (access(libjava, F_OK) == 0) { - goto found; - } - - /* Does the app ship a private JRE in /jre directory? */ - sprintf(libjava, "%s/jre/lib/%s/" JAVA_DLL, path, arch); - if (access(libjava, F_OK) == 0) { - strcat(path, "/jre"); - goto found; - } - } - - if (!speculative) - fprintf(stderr, "Error: could not find " JAVA_DLL "\n"); - return JNI_FALSE; - - found: - if (_launcher_debug) - printf("JRE path is %s\n", path); - return JNI_TRUE; -} - -jboolean -LoadJavaVM(const char *jvmpath, InvocationFunctions *ifn) -{ -#ifdef GAMMA - /* JVM is directly linked with gamma launcher; no dlopen() */ - ifn->CreateJavaVM = JNI_CreateJavaVM; - ifn->GetDefaultJavaVMInitArgs = JNI_GetDefaultJavaVMInitArgs; - return JNI_TRUE; -#else - Dl_info dlinfo; - void *libjvm; - - if (_launcher_debug) { - printf("JVM path is %s\n", jvmpath); - } - - libjvm = dlopen(jvmpath, RTLD_NOW + RTLD_GLOBAL); - if (libjvm == NULL) { -#if defined(__sparc) && !defined(_LP64) /* i.e. 32-bit sparc */ - FILE * fp; - Elf32_Ehdr elf_head; - int count; - int location; - - fp = fopen(jvmpath, "r"); - if(fp == NULL) - goto error; - - /* read in elf header */ - count = fread((void*)(&elf_head), sizeof(Elf32_Ehdr), 1, fp); - fclose(fp); - if(count < 1) - goto error; - - /* - * Check for running a server vm (compiled with -xarch=v8plus) - * on a stock v8 processor. In this case, the machine type in - * the elf header would not be included the architecture list - * provided by the isalist command, which is turn is gotten from - * sysinfo. This case cannot occur on 64-bit hardware and thus - * does not have to be checked for in binaries with an LP64 data - * model. - */ - if(elf_head.e_machine == EM_SPARC32PLUS) { - char buf[257]; /* recommended buffer size from sysinfo man - page */ - long length; - char* location; - - length = sysinfo(SI_ISALIST, buf, 257); - if(length > 0) { - location = strstr(buf, "sparcv8plus "); - if(location == NULL) { - fprintf(stderr, "SPARC V8 processor detected; Server compiler requires V9 or better.\n"); - fprintf(stderr, "Use Client compiler on V8 processors.\n"); - fprintf(stderr, "Could not create the Java virtual machine.\n"); - return JNI_FALSE; - } - } - } -#endif - fprintf(stderr, "dl failure on line %d", __LINE__); - goto error; - } - - ifn->CreateJavaVM = (CreateJavaVM_t) - dlsym(libjvm, "JNI_CreateJavaVM"); - if (ifn->CreateJavaVM == NULL) - goto error; - - ifn->GetDefaultJavaVMInitArgs = (GetDefaultJavaVMInitArgs_t) - dlsym(libjvm, "JNI_GetDefaultJavaVMInitArgs"); - if (ifn->GetDefaultJavaVMInitArgs == NULL) - goto error; - - return JNI_TRUE; - -error: - fprintf(stderr, "Error: failed %s, because %s\n", jvmpath, dlerror()); - return JNI_FALSE; -#endif /* ifndef GAMMA */ -} - -/* - * If app is "/foo/bin/javac", or "/foo/bin/sparcv9/javac" then put - * "/foo" into buf. - */ -jboolean -GetApplicationHome(char *buf, jint bufsize) -{ -#if defined(__linux__) || defined(_ALLBSD_SOURCE) - char *execname = GetExecname(); - if (execname) { - strncpy(buf, execname, bufsize-1); - buf[bufsize-1] = '\0'; - } else { - return JNI_FALSE; - } -#else - Dl_info dlinfo; - - dladdr((void *)GetApplicationHome, &dlinfo); - if (realpath(dlinfo.dli_fname, buf) == NULL) { - fprintf(stderr, "Error: realpath(`%s') failed.\n", dlinfo.dli_fname); - return JNI_FALSE; - } -#endif - -#ifdef GAMMA - { - /* gamma launcher uses JAVA_HOME environment variable to find JDK/JRE */ - char* java_home_var = getenv("JAVA_HOME"); - if (java_home_var == NULL) { - printf("JAVA_HOME must point to a valid JDK/JRE to run gamma\n"); - return JNI_FALSE; - } - snprintf(buf, bufsize, "%s", java_home_var); - } -#else - if (strrchr(buf, '/') == 0) { - buf[0] = '\0'; - return JNI_FALSE; - } - *(strrchr(buf, '/')) = '\0'; /* executable file */ - if (strlen(buf) < 4 || strrchr(buf, '/') == 0) { - buf[0] = '\0'; - return JNI_FALSE; - } - if (strcmp("/bin", buf + strlen(buf) - 4) != 0) - *(strrchr(buf, '/')) = '\0'; /* sparcv9 or amd64 */ - if (strlen(buf) < 4 || strcmp("/bin", buf + strlen(buf) - 4) != 0) { - buf[0] = '\0'; - return JNI_FALSE; - } - *(strrchr(buf, '/')) = '\0'; /* bin */ -#endif /* ifndef GAMMA */ - - return JNI_TRUE; -} - - -/* - * Return true if the named program exists - */ -static int -ProgramExists(char *name) -{ - struct stat sb; - if (stat(name, &sb) != 0) return 0; - if (S_ISDIR(sb.st_mode)) return 0; - return (sb.st_mode & S_IEXEC) != 0; -} - - -/* - * Find a command in a directory, returning the path. - */ -static char * -Resolve(char *indir, char *cmd) -{ - char name[PATH_MAX + 2], *real; - - if ((strlen(indir) + strlen(cmd) + 1) > PATH_MAX) return 0; - sprintf(name, "%s%c%s", indir, FILE_SEPARATOR, cmd); - if (!ProgramExists(name)) return 0; - real = JLI_MemAlloc(PATH_MAX + 2); - if (!realpath(name, real)) - strcpy(real, name); - return real; -} - - -/* - * Find a path for the executable - */ -static char * -FindExecName(char *program) -{ - char cwdbuf[PATH_MAX+2]; - char *path; - char *tmp_path; - char *f; - char *result = NULL; - - /* absolute path? */ - if (*program == FILE_SEPARATOR || - (FILE_SEPARATOR=='\\' && strrchr(program, ':'))) - return Resolve("", program+1); - - /* relative path? */ - if (strrchr(program, FILE_SEPARATOR) != 0) { - char buf[PATH_MAX+2]; - return Resolve(getcwd(cwdbuf, sizeof(cwdbuf)), program); - } - - /* from search path? */ - path = getenv("PATH"); - if (!path || !*path) path = "."; - tmp_path = JLI_MemAlloc(strlen(path) + 2); - strcpy(tmp_path, path); - - for (f=tmp_path; *f && result==0; ) { - char *s = f; - while (*f && (*f != PATH_SEPARATOR)) ++f; - if (*f) *f++ = 0; - if (*s == FILE_SEPARATOR) - result = Resolve(s, program); - else { - /* relative path element */ - char dir[2*PATH_MAX]; - sprintf(dir, "%s%c%s", getcwd(cwdbuf, sizeof(cwdbuf)), - FILE_SEPARATOR, s); - result = Resolve(dir, program); - } - if (result != 0) break; - } - - JLI_MemFree(tmp_path); - return result; -} - - -/* Store the name of the executable once computed */ -static char *execname = NULL; - -/* - * Compute the name of the executable - * - * In order to re-exec securely we need the absolute path of the - * executable. On Solaris getexecname(3c) may not return an absolute - * path so we use dladdr to get the filename of the executable and - * then use realpath to derive an absolute path. From Solaris 9 - * onwards the filename returned in DL_info structure from dladdr is - * an absolute pathname so technically realpath isn't required. - * On Linux we read the executable name from /proc/self/exe. - * As a fallback, and for platforms other than Solaris and Linux, - * we use FindExecName to compute the executable name. - */ -static char * -SetExecname(char **argv) -{ - char* exec_path = NULL; - - if (execname != NULL) /* Already determined */ - return (execname); - -#if defined(__sun) - { - Dl_info dlinfo; - if (dladdr((void*)&SetExecname, &dlinfo)) { - char *resolved = (char*)JLI_MemAlloc(PATH_MAX+1); - if (resolved != NULL) { - exec_path = realpath(dlinfo.dli_fname, resolved); - if (exec_path == NULL) { - JLI_MemFree(resolved); - } - } - } - } -#elif defined(__linux__) - { - const char* self = "/proc/self/exe"; - char buf[PATH_MAX+1]; - int len = readlink(self, buf, PATH_MAX); - if (len >= 0) { - buf[len] = '\0'; /* readlink doesn't nul terminate */ - exec_path = JLI_StringDup(buf); - } - } -#else /* !__sun && !__linux */ - { - /* Not implemented */ - } -#endif - - if (exec_path == NULL) { - exec_path = FindExecName(argv[0]); - } - execname = exec_path; - return exec_path; -} - -/* - * Return the name of the executable. Used in java_md.c to find the JRE area. - */ -static char * -GetExecname() { - return execname; -} - -void ReportErrorMessage(char * message, jboolean always) { - if (always) { - fprintf(stderr, "%s\n", message); - } -} - -void ReportErrorMessage2(char * format, char * string, jboolean always) { - if (always) { - fprintf(stderr, format, string); - fprintf(stderr, "\n"); - } -} - -void ReportExceptionDescription(JNIEnv * env) { - (*env)->ExceptionDescribe(env); -} - -/* - * Return JNI_TRUE for an option string that has no effect but should - * _not_ be passed on to the vm; return JNI_FALSE otherwise. On - * Solaris SPARC, this screening needs to be done if: - * 1) LD_LIBRARY_PATH does _not_ need to be reset and - * 2) -d32 or -d64 is passed to a binary with a matching data model - * (the exec in SetLibraryPath removes -d options and points the - * exec to the proper binary). When this exec is not done, these options - * would end up getting passed onto the vm. - */ -jboolean RemovableMachineDependentOption(char * option) { - /* - * Unconditionally remove both -d32 and -d64 options since only - * the last such options has an effect; e.g. - * java -d32 -d64 -d32 -version - * is equivalent to - * java -d32 -version - */ - - if( (strcmp(option, "-d32") == 0 ) || - (strcmp(option, "-d64") == 0 )) - return JNI_TRUE; - else - return JNI_FALSE; -} - -void PrintMachineDependentOptions() { - fprintf(stdout, - " -d32 use a 32-bit data model if available\n" - "\n" - " -d64 use a 64-bit data model if available\n"); - return; -} - -#ifndef GAMMA -/* - * The following methods (down to ServerClassMachine()) answer - * the question about whether a machine is a "server-class" - * machine. A server-class machine is loosely defined as one - * with 2 or more processors and 2 gigabytes or more physical - * memory. The definition of a processor is a physical package, - * not a hyperthreaded chip masquerading as a multi-processor. - * The definition of memory is also somewhat fuzzy, since x86 - * machines seem not to report all the memory in their DIMMs, we - * think because of memory mapping of graphics cards, etc. - * - * This code is somewhat more confused with #ifdef's than we'd - * like because this file is used by both Solaris and Linux - * platforms, and so needs to be parameterized for SPARC and - * i586 hardware. The other Linux platforms (amd64 and ia64) - * don't even ask this question, because they only come with - * server JVMs. */ - -# define KB (1024UL) -# define MB (1024UL * KB) -# define GB (1024UL * MB) - -/* Compute physical memory by asking the OS */ -uint64_t -physical_memory(void) { - const uint64_t pages = (uint64_t) sysconf(_SC_PHYS_PAGES); - const uint64_t page_size = (uint64_t) sysconf(_SC_PAGESIZE); - const uint64_t result = pages * page_size; -# define UINT64_FORMAT "%" PRIu64 - - if (_launcher_debug) { - printf("pages: " UINT64_FORMAT - " page_size: " UINT64_FORMAT - " physical memory: " UINT64_FORMAT " (%.3fGB)\n", - pages, page_size, result, result / (double) GB); - } - return result; -} - -#if defined(__sun) && defined(__sparc) - -/* Methods for solaris-sparc: these are easy. */ - -/* Ask the OS how many processors there are. */ -unsigned long -physical_processors(void) { - const unsigned long sys_processors = sysconf(_SC_NPROCESSORS_CONF); - - if (_launcher_debug) { - printf("sysconf(_SC_NPROCESSORS_CONF): %lu\n", sys_processors); - } - return sys_processors; -} - -/* The solaris-sparc version of the "server-class" predicate. */ -jboolean -solaris_sparc_ServerClassMachine(void) { - jboolean result = JNI_FALSE; - /* How big is a server class machine? */ - const unsigned long server_processors = 2UL; - const uint64_t server_memory = 2UL * GB; - const uint64_t actual_memory = physical_memory(); - - /* Is this a server class machine? */ - if (actual_memory >= server_memory) { - const unsigned long actual_processors = physical_processors(); - if (actual_processors >= server_processors) { - result = JNI_TRUE; - } - } - if (_launcher_debug) { - printf("solaris_" LIBARCHNAME "_ServerClassMachine: %s\n", - (result == JNI_TRUE ? "JNI_TRUE" : "JNI_FALSE")); - } - return result; -} - -#endif /* __sun && __sparc */ - -#if defined(__sun) && defined(i586) - -/* - * A utility method for asking the CPU about itself. - * There's a corresponding version of linux-i586 - * because the compilers are different. - */ -void -get_cpuid(uint32_t arg, - uint32_t* eaxp, - uint32_t* ebxp, - uint32_t* ecxp, - uint32_t* edxp) { -#ifdef _LP64 - asm( - /* rbx is a callee-saved register */ - " movq %rbx, %r11 \n" - /* rdx and rcx are 3rd and 4th argument registers */ - " movq %rdx, %r10 \n" - " movq %rcx, %r9 \n" - " movl %edi, %eax \n" - " cpuid \n" - " movl %eax, (%rsi)\n" - " movl %ebx, (%r10)\n" - " movl %ecx, (%r9) \n" - " movl %edx, (%r8) \n" - /* Restore rbx */ - " movq %r11, %rbx"); -#else - /* EBX is a callee-saved register */ - asm(" pushl %ebx"); - /* Need ESI for storing through arguments */ - asm(" pushl %esi"); - asm(" movl 8(%ebp), %eax \n" - " cpuid \n" - " movl 12(%ebp), %esi \n" - " movl %eax, (%esi) \n" - " movl 16(%ebp), %esi \n" - " movl %ebx, (%esi) \n" - " movl 20(%ebp), %esi \n" - " movl %ecx, (%esi) \n" - " movl 24(%ebp), %esi \n" - " movl %edx, (%esi) "); - /* Restore ESI and EBX */ - asm(" popl %esi"); - /* Restore EBX */ - asm(" popl %ebx"); -#endif -} - -#endif /* __sun && i586 */ - -#if (defined(__linux__) || defined(_ALLBSD_SOURCE)) && defined(i586) - -/* - * A utility method for asking the CPU about itself. - * There's a corresponding version of solaris-i586 - * because the compilers are different. - */ -void -get_cpuid(uint32_t arg, - uint32_t* eaxp, - uint32_t* ebxp, - uint32_t* ecxp, - uint32_t* edxp) { -#ifdef _LP64 - __asm__ volatile (/* Instructions */ - " movl %4, %%eax \n" - " cpuid \n" - " movl %%eax, (%0)\n" - " movl %%ebx, (%1)\n" - " movl %%ecx, (%2)\n" - " movl %%edx, (%3)\n" - : /* Outputs */ - : /* Inputs */ - "r" (eaxp), - "r" (ebxp), - "r" (ecxp), - "r" (edxp), - "r" (arg) - : /* Clobbers */ - "%rax", "%rbx", "%rcx", "%rdx", "memory" - ); -#else - uint32_t value_of_eax = 0; - uint32_t value_of_ebx = 0; - uint32_t value_of_ecx = 0; - uint32_t value_of_edx = 0; - __asm__ volatile (/* Instructions */ - /* ebx is callee-save, so push it */ - " pushl %%ebx \n" - " movl %4, %%eax \n" - " cpuid \n" - " movl %%eax, %0 \n" - " movl %%ebx, %1 \n" - " movl %%ecx, %2 \n" - " movl %%edx, %3 \n" - /* restore ebx */ - " popl %%ebx \n" - - : /* Outputs */ - "=m" (value_of_eax), - "=m" (value_of_ebx), - "=m" (value_of_ecx), - "=m" (value_of_edx) - : /* Inputs */ - "m" (arg) - : /* Clobbers */ - "%eax", "%ecx", "%edx" - ); - *eaxp = value_of_eax; - *ebxp = value_of_ebx; - *ecxp = value_of_ecx; - *edxp = value_of_edx; -#endif -} - -#endif /* __linux__ && i586 */ - -#ifdef i586 -/* - * Routines shared by solaris-i586 and linux-i586. - */ - -enum HyperThreadingSupport_enum { - hts_supported = 1, - hts_too_soon_to_tell = 0, - hts_not_supported = -1, - hts_not_pentium4 = -2, - hts_not_intel = -3 -}; -typedef enum HyperThreadingSupport_enum HyperThreadingSupport; - -/* Determine if hyperthreading is supported */ -HyperThreadingSupport -hyperthreading_support(void) { - HyperThreadingSupport result = hts_too_soon_to_tell; - /* Bits 11 through 8 is family processor id */ -# define FAMILY_ID_SHIFT 8 -# define FAMILY_ID_MASK 0xf - /* Bits 23 through 20 is extended family processor id */ -# define EXT_FAMILY_ID_SHIFT 20 -# define EXT_FAMILY_ID_MASK 0xf - /* Pentium 4 family processor id */ -# define PENTIUM4_FAMILY_ID 0xf - /* Bit 28 indicates Hyper-Threading Technology support */ -# define HT_BIT_SHIFT 28 -# define HT_BIT_MASK 1 - uint32_t vendor_id[3] = { 0U, 0U, 0U }; - uint32_t value_of_eax = 0U; - uint32_t value_of_edx = 0U; - uint32_t dummy = 0U; - - /* Yes, this is supposed to be [0], [2], [1] */ - get_cpuid(0, &dummy, &vendor_id[0], &vendor_id[2], &vendor_id[1]); - if (_launcher_debug) { - printf("vendor: %c %c %c %c %c %c %c %c %c %c %c %c \n", - ((vendor_id[0] >> 0) & 0xff), - ((vendor_id[0] >> 8) & 0xff), - ((vendor_id[0] >> 16) & 0xff), - ((vendor_id[0] >> 24) & 0xff), - ((vendor_id[1] >> 0) & 0xff), - ((vendor_id[1] >> 8) & 0xff), - ((vendor_id[1] >> 16) & 0xff), - ((vendor_id[1] >> 24) & 0xff), - ((vendor_id[2] >> 0) & 0xff), - ((vendor_id[2] >> 8) & 0xff), - ((vendor_id[2] >> 16) & 0xff), - ((vendor_id[2] >> 24) & 0xff)); - } - get_cpuid(1, &value_of_eax, &dummy, &dummy, &value_of_edx); - if (_launcher_debug) { - printf("value_of_eax: 0x%x value_of_edx: 0x%x\n", - value_of_eax, value_of_edx); - } - if ((((value_of_eax >> FAMILY_ID_SHIFT) & FAMILY_ID_MASK) == PENTIUM4_FAMILY_ID) || - (((value_of_eax >> EXT_FAMILY_ID_SHIFT) & EXT_FAMILY_ID_MASK) != 0)) { - if ((((vendor_id[0] >> 0) & 0xff) == 'G') && - (((vendor_id[0] >> 8) & 0xff) == 'e') && - (((vendor_id[0] >> 16) & 0xff) == 'n') && - (((vendor_id[0] >> 24) & 0xff) == 'u') && - (((vendor_id[1] >> 0) & 0xff) == 'i') && - (((vendor_id[1] >> 8) & 0xff) == 'n') && - (((vendor_id[1] >> 16) & 0xff) == 'e') && - (((vendor_id[1] >> 24) & 0xff) == 'I') && - (((vendor_id[2] >> 0) & 0xff) == 'n') && - (((vendor_id[2] >> 8) & 0xff) == 't') && - (((vendor_id[2] >> 16) & 0xff) == 'e') && - (((vendor_id[2] >> 24) & 0xff) == 'l')) { - if (((value_of_edx >> HT_BIT_SHIFT) & HT_BIT_MASK) == HT_BIT_MASK) { - if (_launcher_debug) { - printf("Hyperthreading supported\n"); - } - result = hts_supported; - } else { - if (_launcher_debug) { - printf("Hyperthreading not supported\n"); - } - result = hts_not_supported; - } - } else { - if (_launcher_debug) { - printf("Not GenuineIntel\n"); - } - result = hts_not_intel; - } - } else { - if (_launcher_debug) { - printf("not Pentium 4 or extended\n"); - } - result = hts_not_pentium4; - } - return result; -} - -/* Determine how many logical processors there are per CPU */ -unsigned int -logical_processors_per_package(void) { - /* - * After CPUID with EAX==1, register EBX bits 23 through 16 - * indicate the number of logical processors per package - */ -# define NUM_LOGICAL_SHIFT 16 -# define NUM_LOGICAL_MASK 0xff - unsigned int result = 1U; - const HyperThreadingSupport hyperthreading = hyperthreading_support(); - - if (hyperthreading == hts_supported) { - uint32_t value_of_ebx = 0U; - uint32_t dummy = 0U; - - get_cpuid(1, &dummy, &value_of_ebx, &dummy, &dummy); - result = (value_of_ebx >> NUM_LOGICAL_SHIFT) & NUM_LOGICAL_MASK; - if (_launcher_debug) { - printf("logical processors per package: %u\n", result); - } - } - return result; -} - -/* Compute the number of physical processors, not logical processors */ -unsigned long -physical_processors(void) { - const long sys_processors = sysconf(_SC_NPROCESSORS_CONF); - unsigned long result = sys_processors; - - if (_launcher_debug) { - printf("sysconf(_SC_NPROCESSORS_CONF): %lu\n", sys_processors); - } - if (sys_processors > 1) { - unsigned int logical_processors = logical_processors_per_package(); - if (logical_processors > 1) { - result = (unsigned long) sys_processors / logical_processors; - } - } - if (_launcher_debug) { - printf("physical processors: %lu\n", result); - } - return result; -} - -#endif /* i586 */ - -#if defined(__sun) && defined(i586) - -/* The definition of a server-class machine for solaris-i586/amd64 */ -jboolean -solaris_i586_ServerClassMachine(void) { - jboolean result = JNI_FALSE; - /* How big is a server class machine? */ - const unsigned long server_processors = 2UL; - const uint64_t server_memory = 2UL * GB; - /* - * We seem not to get our full complement of memory. - * We allow some part (1/8?) of the memory to be "missing", - * based on the sizes of DIMMs, and maybe graphics cards. - */ - const uint64_t missing_memory = 256UL * MB; - const uint64_t actual_memory = physical_memory(); - - /* Is this a server class machine? */ - if (actual_memory >= (server_memory - missing_memory)) { - const unsigned long actual_processors = physical_processors(); - if (actual_processors >= server_processors) { - result = JNI_TRUE; - } - } - if (_launcher_debug) { - printf("solaris_" LIBARCHNAME "_ServerClassMachine: %s\n", - (result == JNI_TRUE ? "true" : "false")); - } - return result; -} - -#endif /* __sun && i586 */ - -#if defined(__linux__) && defined(i586) - -/* The definition of a server-class machine for linux-i586 */ -jboolean -linux_i586_ServerClassMachine(void) { - jboolean result = JNI_FALSE; - /* How big is a server class machine? */ - const unsigned long server_processors = 2UL; - const uint64_t server_memory = 2UL * GB; - /* - * We seem not to get our full complement of memory. - * We allow some part (1/8?) of the memory to be "missing", - * based on the sizes of DIMMs, and maybe graphics cards. - */ - const uint64_t missing_memory = 256UL * MB; - const uint64_t actual_memory = physical_memory(); - - /* Is this a server class machine? */ - if (actual_memory >= (server_memory - missing_memory)) { - const unsigned long actual_processors = physical_processors(); - if (actual_processors >= server_processors) { - result = JNI_TRUE; - } - } - if (_launcher_debug) { - printf("linux_" LIBARCHNAME "_ServerClassMachine: %s\n", - (result == JNI_TRUE ? "true" : "false")); - } - return result; -} - -#endif /* __linux__ && i586 */ - -#if defined(_ALLBSD_SOURCE) && defined(i586) - -/* The definition of a server-class machine for bsd-i586 */ -jboolean -bsd_i586_ServerClassMachine(void) { - jboolean result = JNI_FALSE; - /* How big is a server class machine? */ - const unsigned long server_processors = 2UL; - const uint64_t server_memory = 2UL * GB; - /* - * We seem not to get our full complement of memory. - * We allow some part (1/8?) of the memory to be "missing", - * based on the sizes of DIMMs, and maybe graphics cards. - */ - const uint64_t missing_memory = 256UL * MB; - const uint64_t actual_memory = physical_memory(); - - /* Is this a server class machine? */ - if (actual_memory >= (server_memory - missing_memory)) { - const unsigned long actual_processors = physical_processors(); - if (actual_processors >= server_processors) { - result = JNI_TRUE; - } - } - if (_launcher_debug) { - printf("linux_" LIBARCHNAME "_ServerClassMachine: %s\n", - (result == JNI_TRUE ? "true" : "false")); - } - return result; -} - -#endif /* _ALLBSD_SOURCE && i586 */ - -/* Dispatch to the platform-specific definition of "server-class" */ -jboolean -ServerClassMachine(void) { - jboolean result = JNI_FALSE; -#if defined(NEVER_ACT_AS_SERVER_CLASS_MACHINE) - result = JNI_FALSE; -#elif defined(ALWAYS_ACT_AS_SERVER_CLASS_MACHINE) - result = JNI_TRUE; -#elif defined(__sun) && defined(__sparc) - result = solaris_sparc_ServerClassMachine(); -#elif defined(__sun) && defined(i586) - result = solaris_i586_ServerClassMachine(); -#elif defined(__linux__) && defined(i586) - result = linux_i586_ServerClassMachine(); -#elif defined(_ALLBSD_SOURCE) && defined(i586) - result = bsd_i586_ServerClassMachine(); -#else - if (_launcher_debug) { - printf("ServerClassMachine: returns default value of %s\n", - (result == JNI_TRUE ? "true" : "false")); - } -#endif - return result; -} - -/* - * Since using the file system as a registry is a bit risky, perform - * additional sanity checks on the identified directory to validate - * it as a valid jre/sdk. - * - * Return 0 if the tests fail; otherwise return non-zero (true). - * - * Note that checking for anything more than the existence of an - * executable object at bin/java relative to the path being checked - * will break the regression tests. - */ -static int -CheckSanity(char *path, char *dir) -{ - char buffer[PATH_MAX]; - - if (strlen(path) + strlen(dir) + 11 > PATH_MAX) - return (0); /* Silently reject "impossibly" long paths */ - - (void)strcat(strcat(strcat(strcpy(buffer, path), "/"), dir), "/bin/java"); - return ((access(buffer, X_OK) == 0) ? 1 : 0); -} - -/* - * Determine if there is an acceptable JRE in the directory dirname. - * Upon locating the "best" one, return a fully qualified path to - * it. "Best" is defined as the most advanced JRE meeting the - * constraints contained in the manifest_info. If no JRE in this - * directory meets the constraints, return NULL. - * - * Note that we don't check for errors in reading the directory - * (which would be done by checking errno). This is because it - * doesn't matter if we get an error reading the directory, or - * we just don't find anything interesting in the directory. We - * just return NULL in either case. - * - * The historical names of j2sdk and j2re were changed to jdk and - * jre respecively as part of the 1.5 rebranding effort. Since the - * former names are legacy on Linux, they must be recognized for - * all time. Fortunately, this is a minor cost. - */ -static char -*ProcessDir(manifest_info *info, char *dirname) -{ - DIR *dirp; - struct dirent *dp; - char *best = NULL; - int offset; - int best_offset = 0; - char *ret_str = NULL; - char buffer[PATH_MAX]; - - if ((dirp = opendir(dirname)) == NULL) - return (NULL); - - do { - if ((dp = readdir(dirp)) != NULL) { - offset = 0; - if ((strncmp(dp->d_name, "jre", 3) == 0) || - (strncmp(dp->d_name, "jdk", 3) == 0)) - offset = 3; - else if (strncmp(dp->d_name, "j2re", 4) == 0) - offset = 4; - else if (strncmp(dp->d_name, "j2sdk", 5) == 0) - offset = 5; - if (offset > 0) { - if ((JLI_AcceptableRelease(dp->d_name + offset, - info->jre_version)) && CheckSanity(dirname, dp->d_name)) - if ((best == NULL) || (JLI_ExactVersionId( - dp->d_name + offset, best + best_offset) > 0)) { - if (best != NULL) - JLI_MemFree(best); - best = JLI_StringDup(dp->d_name); - best_offset = offset; - } - } - } - } while (dp != NULL); - (void) closedir(dirp); - if (best == NULL) - return (NULL); - else { - ret_str = JLI_MemAlloc(strlen(dirname) + strlen(best) + 2); - ret_str = strcat(strcat(strcpy(ret_str, dirname), "/"), best); - JLI_MemFree(best); - return (ret_str); - } -} - -/* - * This is the global entry point. It examines the host for the optimal - * JRE to be used by scanning a set of directories. The set of directories - * is platform dependent and can be overridden by the environment - * variable JAVA_VERSION_PATH. - * - * This routine itself simply determines the set of appropriate - * directories before passing control onto ProcessDir(). - */ -char* -LocateJRE(manifest_info* info) -{ - char *path; - char *home; - char *target = NULL; - char *dp; - char *cp; - - /* - * Start by getting JAVA_VERSION_PATH - */ - if (info->jre_restrict_search) - path = JLI_StringDup(system_dir); - else if ((path = getenv("JAVA_VERSION_PATH")) != NULL) - path = JLI_StringDup(path); - else - if ((home = getenv("HOME")) != NULL) { - path = (char *)JLI_MemAlloc(strlen(home) + strlen(system_dir) + - strlen(user_dir) + 2); - path = strcat(strcat(strcat(strcpy(path, home), - user_dir), ":"), system_dir); - } else - path = JLI_StringDup(system_dir); - - /* - * Step through each directory on the path. Terminate the scan with - * the first directory with an acceptable JRE. - */ - cp = dp = path; - while (dp != NULL) { - cp = strchr(dp, (int)':'); - if (cp != NULL) - *cp = (char)NULL; - if ((target = ProcessDir(info, dp)) != NULL) - break; - dp = cp; - if (dp != NULL) - dp++; - } - JLI_MemFree(path); - return (target); -} - -/* - * Given a path to a jre to execute, this routine checks if this process - * is indeed that jre. If not, it exec's that jre. - * - * We want to actually check the paths rather than just the version string - * built into the executable, so that given version specification (and - * JAVA_VERSION_PATH) will yield the exact same Java environment, regardless - * of the version of the arbitrary launcher we start with. - */ -void -ExecJRE(char *jre, char **argv) -{ - char wanted[PATH_MAX]; - char *execname; - char *progname; - - /* - * Resolve the real path to the directory containing the selected JRE. - */ - if (realpath(jre, wanted) == NULL) { - fprintf(stderr, "Unable to resolve %s\n", jre); - exit(1); - } - - /* - * Resolve the real path to the currently running launcher. - */ - execname = SetExecname(argv); - if (execname == NULL) { - fprintf(stderr, "Unable to resolve current executable\n"); - exit(1); - } - - /* - * If the path to the selected JRE directory is a match to the initial - * portion of the path to the currently executing JRE, we have a winner! - * If so, just return. - */ - if (strncmp(wanted, execname, strlen(wanted)) == 0) - return; /* I am the droid you were looking for */ - - /* - * If this isn't the selected version, exec the selected version. - */ -#ifdef JAVA_ARGS /* javac, jar and friends. */ - progname = "java"; -#else /* java, oldjava, javaw and friends */ -#ifdef PROGNAME - progname = PROGNAME; -#else - progname = *argv; - if ((s = strrchr(progname, FILE_SEPARATOR)) != 0) { - progname = s + 1; - } -#endif /* PROGNAME */ -#endif /* JAVA_ARGS */ - - /* - * This should never happen (because of the selection code in SelectJRE), - * but check for "impossibly" long path names just because buffer overruns - * can be so deadly. - */ - if (strlen(wanted) + strlen(progname) + 6 > PATH_MAX) { - fprintf(stderr, "Path length exceeds maximum length (PATH_MAX)\n"); - exit(1); - } - - /* - * Construct the path and exec it. - */ - (void)strcat(strcat(wanted, "/bin/"), progname); - argv[0] = progname; - if (_launcher_debug) { - int i; - printf("ReExec Command: %s (%s)\n", wanted, argv[0]); - printf("ReExec Args:"); - for (i = 1; argv[i] != NULL; i++) - printf(" %s", argv[i]); - printf("\n"); - } - (void)fflush(stdout); - (void)fflush(stderr); - execv(wanted, argv); - perror("execv()"); - fprintf(stderr, "Exec of %s failed\n", wanted); - exit(1); -} -#endif /* ifndef GAMMA */ - -/* - * "Borrowed" from Solaris 10 where the unsetenv() function is being added - * to libc thanks to SUSv3 (Standard Unix Specification, version 3). As - * such, in the fullness of time this will appear in libc on all relevant - * Solaris/Linux platforms and maybe even the Windows platform. At that - * time, this stub can be removed. - * - * This implementation removes the environment locking for multithreaded - * applications. (We don't have access to these mutexes within libc and - * the launcher isn't multithreaded.) Note that what remains is platform - * independent, because it only relies on attributes that a POSIX environment - * defines. - * - * Returns 0 on success, -1 on failure. - * - * Also removed was the setting of errno. The only value of errno set - * was EINVAL ("Invalid Argument"). - */ - -/* - * s1(environ) is name=value - * s2(name) is name(not the form of name=value). - * if names match, return value of 1, else return 0 - */ -static int -match_noeq(const char *s1, const char *s2) -{ - while (*s1 == *s2++) { - if (*s1++ == '=') - return (1); - } - if (*s1 == '=' && s2[-1] == '\0') - return (1); - return (0); -} - -/* - * added for SUSv3 standard - * - * Delete entry from environ. - * Do not free() memory! Other threads may be using it. - * Keep it around forever. - */ -static int -borrowed_unsetenv(const char *name) -{ - long idx; /* index into environ */ - - if (name == NULL || *name == '\0' || - strchr(name, '=') != NULL) { - return (-1); - } - - for (idx = 0; environ[idx] != NULL; idx++) { - if (match_noeq(environ[idx], name)) - break; - } - if (environ[idx] == NULL) { - /* name not found but still a success */ - return (0); - } - /* squeeze up one entry */ - do { - environ[idx] = environ[idx+1]; - } while (environ[++idx] != NULL); - - return (0); -} -/* --- End of "borrowed" code --- */ - -/* - * Wrapper for unsetenv() function. - */ -int -UnsetEnv(char *name) -{ - return(borrowed_unsetenv(name)); -} - -/* --- Splash Screen shared library support --- */ - -static const char* SPLASHSCREEN_SO = "libsplashscreen.so"; - -static void* hSplashLib = NULL; - -void* SplashProcAddress(const char* name) { - if (!hSplashLib) { - hSplashLib = dlopen(SPLASHSCREEN_SO, RTLD_LAZY | RTLD_GLOBAL); - } - if (hSplashLib) { - void* sym = dlsym(hSplashLib, name); - return sym; - } else { - return NULL; - } -} - -void SplashFreeLibrary() { - if (hSplashLib) { - dlclose(hSplashLib); - hSplashLib = NULL; - } -} - -/* - * Block current thread and continue execution in a new thread - */ -int -ContinueInNewThread(int (JNICALL *continuation)(void *), jlong stack_size, void * args) { - int rslt; -#if defined(__linux__) || defined(_ALLBSD_SOURCE) - pthread_t tid; - pthread_attr_t attr; - pthread_attr_init(&attr); - pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); - - if (stack_size > 0) { - pthread_attr_setstacksize(&attr, stack_size); - } - - if (pthread_create(&tid, &attr, (void *(*)(void*))continuation, (void*)args) == 0) { - void * tmp; - pthread_join(tid, &tmp); - rslt = (int)(intptr_t)tmp; - } else { - /* - * Continue execution in current thread if for some reason (e.g. out of - * memory/LWP) a new thread can't be created. This will likely fail - * later in continuation as JNI_CreateJavaVM needs to create quite a - * few new threads, anyway, just give it a try.. - */ - rslt = continuation(args); - } - - pthread_attr_destroy(&attr); -#else - thread_t tid; - long flags = 0; - if (thr_create(NULL, stack_size, (void *(*)(void *))continuation, args, flags, &tid) == 0) { - void * tmp; - thr_join(tid, NULL, &tmp); - rslt = (int)(intptr_t)tmp; - } else { - /* See above. Continue in current thread if thr_create() failed */ - rslt = continuation(args); - } -#endif - return rslt; -} - -/* Coarse estimation of number of digits assuming the worst case is a 64-bit pid. */ -#define MAX_PID_STR_SZ 20 - -void SetJavaLauncherPlatformProps() { - /* Linux only */ -#ifdef __linux__ - const char *substr = "-Dsun.java.launcher.pid="; - char *pid_prop_str = (char *)JLI_MemAlloc(strlen(substr) + MAX_PID_STR_SZ + 1); - sprintf(pid_prop_str, "%s%d", substr, getpid()); - AddOption(pid_prop_str, NULL); -#endif -} diff -r e0fb8a213650 -r 836a62f43af9 src/os/posix/launcher/java_md.h --- a/src/os/posix/launcher/java_md.h Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,82 +0,0 @@ -/* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef JAVA_MD_H -#define JAVA_MD_H - -#include -#include -#include -#ifndef GAMMA -#include "manifest_info.h" -#endif -#include "jli_util.h" - -#define PATH_SEPARATOR ':' -#define FILESEP "/" -#define FILE_SEPARATOR '/' -#define IS_FILE_SEPARATOR(c) ((c) == '/') -#ifndef MAXNAMELEN -#define MAXNAMELEN PATH_MAX -#endif - -#ifdef JAVA_ARGS -/* - * ApplicationHome is prepended to each of these entries; the resulting - * strings are concatenated (separated by PATH_SEPARATOR) and used as the - * value of -cp option to the launcher. - */ -#ifndef APP_CLASSPATH -#define APP_CLASSPATH { "/lib/tools.jar", "/classes" } -#endif -#endif - -#ifdef HAVE_GETHRTIME -/* - * Support for doing cheap, accurate interval timing. - */ -#include -#define CounterGet() (gethrtime()/1000) -#define Counter2Micros(counts) (counts) -#else -#define CounterGet() (0) -#define Counter2Micros(counts) (1) -#endif /* HAVE_GETHRTIME */ - -#ifdef _LP64 -#define JLONG_FORMAT "%ld" -#else -#define JLONG_FORMAT "%lld" -#endif - -/* - * Function prototypes. - */ -#ifndef GAMMA -char *LocateJRE(manifest_info *info); -void ExecJRE(char *jre, char **argv); -#endif -int UnsetEnv(char *name); - -#endif diff -r e0fb8a213650 -r 836a62f43af9 src/os/posix/launcher/launcher.script --- a/src/os/posix/launcher/launcher.script Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,218 +0,0 @@ -#!/bin/sh - -# Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. - - -# This script launches HotSpot. -# -# If the first parameter is either "-gdb" or "-gud", HotSpot will be -# launched inside gdb. "-gud" means "open an Emacs window and run gdb -# inside Emacs". -# -# If the first parameter is "-dbx", HotSpot will be launched inside dbx. -# -# If the first parameter is "-valgrind", HotSpot will be launched -# inside Valgrind (http://valgrind.kde.org) using the Memcheck skin, -# and with memory leak detection enabled. This currently (2005jan19) -# requires at least Valgrind 2.3.0. -Xmx16m will also be passed as -# the first parameter to HotSpot, since lowering HotSpot's memory -# consumption makes execution inside of Valgrind *a lot* faster. -# - - -# -# User changeable parameters ------------------------------------------------ -# - -# This is the name of the gdb binary to use -if [ ! "$GDB" ] -then - GDB=gdb -fi - -# This is the name of the gdb binary to use -if [ ! "$DBX" ] -then - DBX=dbx -fi - -# This is the name of the Valgrind binary to use -if [ ! "$VALGRIND" ] -then - VALGRIND=valgrind -fi - -# This is the name of Emacs for running GUD -EMACS=emacs - -# -# End of user changeable parameters ----------------------------------------- -# - -# Make sure the paths are fully specified, i.e. they must begin with /. -REL_MYDIR=`dirname $0` -MYDIR=`cd $REL_MYDIR && pwd` - -# Look whether the user wants to run inside gdb -case "$1" in - -gdb) - MODE=gdb - shift - ;; - -gud) - MODE=gud - shift - ;; - -dbx) - MODE=dbx - shift - ;; - -valgrind) - MODE=valgrind - shift - ;; - *) - MODE=run - ;; -esac - -JDK= -if [ "${ALT_JAVA_HOME}" = "" ]; then - . ${MYDIR}/jdkpath.sh -else - JDK=${ALT_JAVA_HOME%%/jre}; -fi - -if [ "${JDK}" = "" ]; then - echo Failed to find JDK. ALT_JAVA_HOME is not set or ./jdkpath.sh is empty or not found. - exit 1 -fi - -# We will set the LD_LIBRARY_PATH as follows: -# o $JVMPATH (directory portion only) -# o $JRE/lib/$ARCH -# followed by the user's previous effective LD_LIBRARY_PATH, if -# any. -JRE=$JDK/jre -JAVA_HOME=$JDK -export JAVA_HOME - -ARCH=@@LIBARCH@@ -SBP=${MYDIR}:${JRE}/lib/${ARCH} - - -# Set up a suitable LD_LIBRARY_PATH or DYLD_LIBRARY_PATH -OS=`uname -s` -if [ "${OS}" = "Darwin" ] -then - if [ -z "$DYLD_LIBRARY_PATH" ] - then - DYLD_LIBRARY_PATH="$SBP" - else - DYLD_LIBRARY_PATH="$SBP:$DYLD_LIBRARY_PATH" - fi - export DYLD_LIBRARY_PATH -else - # not 'Darwin' - if [ -z "$LD_LIBRARY_PATH" ] - then - LD_LIBRARY_PATH="$SBP" - else - LD_LIBRARY_PATH="$SBP:$LD_LIBRARY_PATH" - fi - export LD_LIBRARY_PATH -fi - -JPARMS="$@ $JAVA_ARGS"; - -# Locate the gamma development launcher -LAUNCHER=${MYDIR}/gamma -if [ ! -x $LAUNCHER ] ; then - echo Error: Cannot find the gamma development launcher \"$LAUNCHER\" - exit 1 -fi - -GDBSRCDIR=$MYDIR -BASEDIR=`cd $MYDIR/../../.. && pwd` - -init_gdb() { -# Create a gdb script in case we should run inside gdb - GDBSCR=/tmp/hsl.$$ - rm -f $GDBSCR - cat >>$GDBSCR <= 22.1 - case `$EMACS -version 2> /dev/null` in - *GNU\ Emacs\ 2[23]*) - emacs_gud_cmd="gdba" - emacs_gud_args="--annotate=3" - ;; - *) - emacs_gud_cmd="gdb" - emacs_gud_args= - ;; - esac - $EMACS --eval "($emacs_gud_cmd \"$GDB $emacs_gud_args -x $GDBSCR\")"; - rm -f $GDBSCR - ;; - dbx) - $DBX -s $HOME/.dbxrc $LAUNCHER $JPARMS - ;; - valgrind) - echo Warning: Defaulting to 16Mb heap to make Valgrind run faster, use -Xmx for larger heap - echo - $VALGRIND --tool=memcheck --leak-check=yes --num-callers=50 $LAUNCHER -Xmx16m $JPARMS - ;; - run) - LD_PRELOAD=$PRELOADING exec $LAUNCHER $JPARMS - ;; - *) - echo Error: Internal error, unknown launch mode \"$MODE\" - exit 1 - ;; -esac -RETVAL=$? -exit $RETVAL diff -r e0fb8a213650 -r 836a62f43af9 src/os/posix/vm/os_posix.cpp --- a/src/os/posix/vm/os_posix.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os/posix/vm/os_posix.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -251,3 +251,11 @@ return true; #endif } + +const char* os::get_current_directory(char *buf, size_t buflen) { + return getcwd(buf, buflen); +} + +FILE* os::open(int fd, const char* mode) { + return ::fdopen(fd, mode); +} diff -r e0fb8a213650 -r 836a62f43af9 src/os/solaris/vm/chaitin_solaris.cpp --- a/src/os/solaris/vm/chaitin_solaris.cpp Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,46 +0,0 @@ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "opto/chaitin.hpp" -#include "opto/machnode.hpp" - -void PhaseRegAlloc::pd_preallocate_hook() { - // no action -} - -#ifdef ASSERT -void PhaseRegAlloc::pd_postallocate_verify_hook() { - // no action -} -#endif - - -//Reconciliation History -// 1.1 99/02/12 15:35:26 chaitin_win32.cpp -// 1.2 99/02/18 15:38:56 chaitin_win32.cpp -// 1.4 99/03/09 10:37:48 chaitin_win32.cpp -// 1.6 99/03/25 11:07:44 chaitin_win32.cpp -// 1.8 99/06/22 16:38:58 chaitin_win32.cpp -//End diff -r e0fb8a213650 -r 836a62f43af9 src/os/solaris/vm/osThread_solaris.cpp --- a/src/os/solaris/vm/osThread_solaris.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os/solaris/vm/osThread_solaris.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,10 +41,6 @@ _thread_id = 0; sigemptyset(&_caller_sigmask); - _current_callback = NULL; - _current_callback_lock = VM_Version::supports_compare_and_exchange() ? NULL - : new Mutex(Mutex::suspend_resume, "Callback_lock", true); - _saved_interrupt_thread_state = _thread_new; _vm_created_thread = false; } @@ -52,172 +48,6 @@ void OSThread::pd_destroy() { } -// Synchronous interrupt support -// -// _current_callback == NULL no pending callback -// == 1 callback_in_progress -// == other value pointer to the pending callback -// - -// CAS on v8 is implemented by using a global atomic_memory_operation_lock, -// which is shared by other atomic functions. It is OK for normal uses, but -// dangerous if used after some thread is suspended or if used in signal -// handlers. Instead here we use a special per-thread lock to synchronize -// updating _current_callback if we are running on v8. Note in general trying -// to grab locks after a thread is suspended is not safe, but it is safe for -// updating _current_callback, because synchronous interrupt callbacks are -// currently only used in: -// 1. GetThreadPC_Callback - used by WatcherThread to profile VM thread -// There is no overlap between the callbacks, which means we won't try to -// grab a thread's sync lock after the thread has been suspended while holding -// the same lock. - -// used after a thread is suspended -static intptr_t compare_and_exchange_current_callback ( - intptr_t callback, intptr_t *addr, intptr_t compare_value, Mutex *sync) { - if (VM_Version::supports_compare_and_exchange()) { - return Atomic::cmpxchg_ptr(callback, addr, compare_value); - } else { - MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag); - if (*addr == compare_value) { - *addr = callback; - return compare_value; - } else { - return callback; - } - } -} - -// used in signal handler -static intptr_t exchange_current_callback(intptr_t callback, intptr_t *addr, Mutex *sync) { - if (VM_Version::supports_compare_and_exchange()) { - return Atomic::xchg_ptr(callback, addr); - } else { - MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag); - intptr_t cb = *addr; - *addr = callback; - return cb; - } -} - -// one interrupt at a time. spin if _current_callback != NULL -int OSThread::set_interrupt_callback(Sync_Interrupt_Callback * cb) { - int count = 0; - while (compare_and_exchange_current_callback( - (intptr_t)cb, (intptr_t *)&_current_callback, (intptr_t)NULL, _current_callback_lock) != NULL) { - while (_current_callback != NULL) { - count++; -#ifdef ASSERT - if ((WarnOnStalledSpinLock > 0) && - (count % WarnOnStalledSpinLock == 0)) { - warning("_current_callback seems to be stalled: %p", _current_callback); - } -#endif - os::yield_all(count); - } - } - return 0; -} - -// reset _current_callback, spin if _current_callback is callback_in_progress -void OSThread::remove_interrupt_callback(Sync_Interrupt_Callback * cb) { - int count = 0; - while (compare_and_exchange_current_callback( - (intptr_t)NULL, (intptr_t *)&_current_callback, (intptr_t)cb, _current_callback_lock) != (intptr_t)cb) { -#ifdef ASSERT - intptr_t p = (intptr_t)_current_callback; - assert(p == (intptr_t)callback_in_progress || - p == (intptr_t)cb, "wrong _current_callback value"); -#endif - while (_current_callback != cb) { - count++; -#ifdef ASSERT - if ((WarnOnStalledSpinLock > 0) && - (count % WarnOnStalledSpinLock == 0)) { - warning("_current_callback seems to be stalled: %p", _current_callback); - } -#endif - os::yield_all(count); - } - } -} - -void OSThread::do_interrupt_callbacks_at_interrupt(InterruptArguments *args) { - Sync_Interrupt_Callback * cb; - cb = (Sync_Interrupt_Callback *)exchange_current_callback( - (intptr_t)callback_in_progress, (intptr_t *)&_current_callback, _current_callback_lock); - - if (cb == NULL) { - // signal is delivered too late (thread is masking interrupt signal??). - // there is nothing we need to do because requesting thread has given up. - } else if ((intptr_t)cb == (intptr_t)callback_in_progress) { - fatal("invalid _current_callback state"); - } else { - assert(cb->target()->osthread() == this, "wrong target"); - cb->execute(args); - cb->leave_callback(); // notify the requester - } - - // restore original _current_callback value - intptr_t p; - p = exchange_current_callback((intptr_t)cb, (intptr_t *)&_current_callback, _current_callback_lock); - assert(p == (intptr_t)callback_in_progress, "just checking"); -} - -// Called by the requesting thread to send a signal to target thread and -// execute "this" callback from the signal handler. -int OSThread::Sync_Interrupt_Callback::interrupt(Thread * target, int timeout) { - // Let signals to the vm_thread go even if the Threads_lock is not acquired - assert(Threads_lock->owned_by_self() || (target == VMThread::vm_thread()), - "must have threads lock to call this"); - - OSThread * osthread = target->osthread(); - - // may block if target thread already has a pending callback - osthread->set_interrupt_callback(this); - - _target = target; - - int rslt = thr_kill(osthread->thread_id(), os::Solaris::SIGasync()); - assert(rslt == 0, "thr_kill != 0"); - - bool status = false; - jlong t1 = os::javaTimeMillis(); - { // don't use safepoint check because we might be the watcher thread. - MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag); - while (!is_done()) { - status = _sync->wait(Mutex::_no_safepoint_check_flag, timeout); - - // status == true if timed out - if (status) break; - - // update timeout - jlong t2 = os::javaTimeMillis(); - timeout -= t2 - t1; - t1 = t2; - } - } - - // reset current_callback - osthread->remove_interrupt_callback(this); - - return status; -} - -void OSThread::Sync_Interrupt_Callback::leave_callback() { - if (!_sync->owned_by_self()) { - // notify requesting thread - MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag); - _is_done = true; - _sync->notify_all(); - } else { - // Current thread is interrupted while it is holding the _sync lock, trying - // to grab it again will deadlock. The requester will timeout anyway, - // so just return. - _is_done = true; - } -} - // copied from synchronizer.cpp void OSThread::handle_spinlock_contention(int tries) { @@ -229,3 +59,7 @@ os::yield(); // Yield to threads of same or higher priority } } + +void OSThread::SR_handler(Thread* thread, ucontext_t* uc) { + os::Solaris::SR_handler(thread, uc); +} diff -r e0fb8a213650 -r 836a62f43af9 src/os/solaris/vm/osThread_solaris.hpp --- a/src/os/solaris/vm/osThread_solaris.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os/solaris/vm/osThread_solaris.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,61 +72,15 @@ // *************************************************************** public: - - class InterruptArguments : StackObj { - private: - Thread* _thread; // the thread to signal was dispatched to - ucontext_t* _ucontext; // the machine context at the time of the signal - - public: - InterruptArguments(Thread* thread, ucontext_t* ucontext) { - _thread = thread; - _ucontext = ucontext; - } - - Thread* thread() const { return _thread; } - ucontext_t* ucontext() const { return _ucontext; } - }; - - // There are currently no asynchronous callbacks - and we'd better not - // support them in the future either, as they need to be deallocated from - // the interrupt handler, which is not safe; they also require locks to - // protect the callback queue. - - class Sync_Interrupt_Callback : private StackObj { - protected: - volatile bool _is_done; - Monitor* _sync; - Thread* _target; - public: - Sync_Interrupt_Callback(Monitor * sync) { - _is_done = false; _target = NULL; _sync = sync; - } - - bool is_done() const { return _is_done; } - Thread* target() const { return _target; } - - int interrupt(Thread * target, int timeout); - - // override to implement the callback. - virtual void execute(InterruptArguments *args) = 0; - - void leave_callback(); - }; + os::SuspendResume sr; private: - - Sync_Interrupt_Callback * volatile _current_callback; - enum { - callback_in_progress = 1 - }; - Mutex * _current_callback_lock; // only used on v8 + ucontext_t* _ucontext; public: - - int set_interrupt_callback (Sync_Interrupt_Callback * cb); - void remove_interrupt_callback(Sync_Interrupt_Callback * cb); - void do_interrupt_callbacks_at_interrupt(InterruptArguments *args); + ucontext_t* ucontext() const { return _ucontext; } + void set_ucontext(ucontext_t* ptr) { _ucontext = ptr; } + static void SR_handler(Thread* thread, ucontext_t* uc); // *************************************************************** // java.lang.Thread.interrupt state. diff -r e0fb8a213650 -r 836a62f43af9 src/os/solaris/vm/os_share_solaris.hpp --- a/src/os/solaris/vm/os_share_solaris.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os/solaris/vm/os_share_solaris.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,28 +27,6 @@ // Defines the interfaces to Solaris operating systems that vary across platforms - -// This is a simple callback that just fetches a PC for an interrupted thread. -// The thread need not be suspended and the fetched PC is just a hint. -// Returned PC and nPC are not necessarily consecutive. -// This one is currently used for profiling the VMThread ONLY! - -// Must be synchronous -class GetThreadPC_Callback : public OSThread::Sync_Interrupt_Callback { - private: - ExtendedPC _addr; - - public: - - GetThreadPC_Callback(Monitor *sync) : - OSThread::Sync_Interrupt_Callback(sync) { } - ExtendedPC addr() const { return _addr; } - - void set_addr(ExtendedPC addr) { _addr = addr; } - - void execute(OSThread::InterruptArguments *args); -}; - // misc extern "C" { void signalHandler(int, siginfo_t*, void*); diff -r e0fb8a213650 -r 836a62f43af9 src/os/solaris/vm/os_solaris.cpp --- a/src/os/solaris/vm/os_solaris.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os/solaris/vm/os_solaris.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -240,6 +240,8 @@ static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } } +static void unpackTime(timespec* absTime, bool isAbsolute, jlong time); + // Thread Local Storage // This is common to all Solaris platforms so it is defined here, // in this common file. @@ -824,7 +826,7 @@ // allocate new buffer and initialize info = (Dl_serinfo*)malloc(_info.dls_size); if (info == NULL) { - vm_exit_out_of_memory(_info.dls_size, + vm_exit_out_of_memory(_info.dls_size, OOM_MALLOC_ERROR, "init_system_properties_values info"); } info->dls_size = _info.dls_size; @@ -866,7 +868,7 @@ common_path = malloc(bufsize); if (common_path == NULL) { free(info); - vm_exit_out_of_memory(bufsize, + vm_exit_out_of_memory(bufsize, OOM_MALLOC_ERROR, "init_system_properties_values common_path"); } sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch); @@ -879,7 +881,7 @@ if (library_path == NULL) { free(info); free(common_path); - vm_exit_out_of_memory(bufsize, + vm_exit_out_of_memory(bufsize, OOM_MALLOC_ERROR, "init_system_properties_values library_path"); } library_path[0] = '\0'; @@ -1623,7 +1625,8 @@ // %%% this is used only in threadLocalStorage.cpp if (thr_setspecific((thread_key_t)index, value)) { if (errno == ENOMEM) { - vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space"); + vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR, + "thr_setspecific: out of swap space"); } else { fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed " "(%s)", strerror(errno))); @@ -1915,10 +1918,6 @@ return retval; } -const char* os::get_current_directory(char *buf, int buflen) { - return getcwd(buf, buflen); -} - // check if addr is inside libjvm.so bool os::address_is_in_vm(address addr) { static address libjvm_base_addr; @@ -2583,6 +2582,57 @@ return CAST_FROM_FN_PTR(void*, UserHandler); } +class Semaphore : public StackObj { + public: + Semaphore(); + ~Semaphore(); + void signal(); + void wait(); + bool trywait(); + bool timedwait(unsigned int sec, int nsec); + private: + sema_t _semaphore; +}; + + +Semaphore::Semaphore() { + sema_init(&_semaphore, 0, NULL, NULL); +} + +Semaphore::~Semaphore() { + sema_destroy(&_semaphore); +} + +void Semaphore::signal() { + sema_post(&_semaphore); +} + +void Semaphore::wait() { + sema_wait(&_semaphore); +} + +bool Semaphore::trywait() { + return sema_trywait(&_semaphore) == 0; +} + +bool Semaphore::timedwait(unsigned int sec, int nsec) { + struct timespec ts; + unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec); + + while (1) { + int result = sema_timedwait(&_semaphore, &ts); + if (result == 0) { + return true; + } else if (errno == EINTR) { + continue; + } else if (errno == ETIME) { + return false; + } else { + return false; + } + } +} + extern "C" { typedef void (*sa_handler_t)(int); typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); @@ -4167,6 +4217,68 @@ schedctl_start(schedctl_init()); } +static void resume_clear_context(OSThread *osthread) { + osthread->set_ucontext(NULL); +} + +static void suspend_save_context(OSThread *osthread, ucontext_t* context) { + osthread->set_ucontext(context); +} + +static Semaphore sr_semaphore; + +void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) { + // Save and restore errno to avoid confusing native code with EINTR + // after sigsuspend. + int old_errno = errno; + + OSThread* osthread = thread->osthread(); + assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); + + os::SuspendResume::State current = osthread->sr.state(); + if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { + suspend_save_context(osthread, uc); + + // attempt to switch the state, we assume we had a SUSPEND_REQUEST + os::SuspendResume::State state = osthread->sr.suspended(); + if (state == os::SuspendResume::SR_SUSPENDED) { + sigset_t suspend_set; // signals for sigsuspend() + + // get current set of blocked signals and unblock resume signal + thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set); + sigdelset(&suspend_set, os::Solaris::SIGasync()); + + sr_semaphore.signal(); + // wait here until we are resumed + while (1) { + sigsuspend(&suspend_set); + + os::SuspendResume::State result = osthread->sr.running(); + if (result == os::SuspendResume::SR_RUNNING) { + sr_semaphore.signal(); + break; + } + } + + } else if (state == os::SuspendResume::SR_RUNNING) { + // request was cancelled, continue + } else { + ShouldNotReachHere(); + } + + resume_clear_context(osthread); + } else if (current == os::SuspendResume::SR_RUNNING) { + // request was cancelled, continue + } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { + // ignore + } else { + // ignore + } + + errno = old_errno; +} + + void os::interrupt(Thread* thread) { assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer"); @@ -4250,6 +4362,116 @@ return buf[0] == 'y' || buf[0] == 'Y'; } +static int sr_notify(OSThread* osthread) { + int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync()); + assert_status(status == 0, status, "thr_kill"); + return status; +} + +// "Randomly" selected value for how long we want to spin +// before bailing out on suspending a thread, also how often +// we send a signal to a thread we want to resume +static const int RANDOMLY_LARGE_INTEGER = 1000000; +static const int RANDOMLY_LARGE_INTEGER2 = 100; + +static bool do_suspend(OSThread* osthread) { + assert(osthread->sr.is_running(), "thread should be running"); + assert(!sr_semaphore.trywait(), "semaphore has invalid state"); + + // mark as suspended and send signal + if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { + // failed to switch, state wasn't running? + ShouldNotReachHere(); + return false; + } + + if (sr_notify(osthread) != 0) { + ShouldNotReachHere(); + } + + // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED + while (true) { + if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) { + break; + } else { + // timeout + os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); + if (cancelled == os::SuspendResume::SR_RUNNING) { + return false; + } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { + // make sure that we consume the signal on the semaphore as well + sr_semaphore.wait(); + break; + } else { + ShouldNotReachHere(); + return false; + } + } + } + + guarantee(osthread->sr.is_suspended(), "Must be suspended"); + return true; +} + +static void do_resume(OSThread* osthread) { + assert(osthread->sr.is_suspended(), "thread should be suspended"); + assert(!sr_semaphore.trywait(), "invalid semaphore state"); + + if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { + // failed to switch to WAKEUP_REQUEST + ShouldNotReachHere(); + return; + } + + while (true) { + if (sr_notify(osthread) == 0) { + if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { + if (osthread->sr.is_running()) { + return; + } + } + } else { + ShouldNotReachHere(); + } + } + + guarantee(osthread->sr.is_running(), "Must be running!"); +} + +void os::SuspendedThreadTask::internal_do_task() { + if (do_suspend(_thread->osthread())) { + SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); + do_task(context); + do_resume(_thread->osthread()); + } +} + +class PcFetcher : public os::SuspendedThreadTask { +public: + PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} + ExtendedPC result(); +protected: + void do_task(const os::SuspendedThreadTaskContext& context); +private: + ExtendedPC _epc; +}; + +ExtendedPC PcFetcher::result() { + guarantee(is_done(), "task is not done yet."); + return _epc; +} + +void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { + Thread* thread = context.thread(); + OSThread* osthread = thread->osthread(); + if (osthread->ucontext() != NULL) { + _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext()); + } else { + // NULL context is unexpected, double-check this is the VMThread + guarantee(thread->is_VM_thread(), "can only be called for VMThread"); + } +} + // A lightweight implementation that does not suspend the target thread and // thus returns only a hint. Used for profiling only! ExtendedPC os::get_thread_pc(Thread* thread) { @@ -4257,21 +4479,9 @@ assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock"); // For now, is only used to profile the VM Thread assert(thread->is_VM_thread(), "Can only be called for VMThread"); - ExtendedPC epc; - - GetThreadPC_Callback cb(ProfileVM_lock); - OSThread *osthread = thread->osthread(); - const int time_to_wait = 400; // 400ms wait for initial response - int status = cb.interrupt(thread, time_to_wait); - - if (cb.is_done() ) { - epc = cb.addr(); - } else { - DEBUG_ONLY(tty->print_cr("Failed to get pc for thread: %d got %d status", - osthread->thread_id(), status);); - // epc is already NULL - } - return epc; + PcFetcher fetcher(thread); + fetcher.run(); + return fetcher.result(); } diff -r e0fb8a213650 -r 836a62f43af9 src/os/solaris/vm/os_solaris.hpp --- a/src/os/solaris/vm/os_solaris.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os/solaris/vm/os_solaris.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -127,7 +127,6 @@ static void set_SIGinterrupt(int newsig) { _SIGinterrupt = newsig; } static void set_SIGasync(int newsig) { _SIGasync = newsig; } - public: // Large Page Support--ISM. static bool largepage_range(char* addr, size_t size); @@ -145,6 +144,7 @@ static intptr_t* ucontext_get_sp(ucontext_t* uc); // ucontext_get_fp() is only used by Solaris X86 (see note below) static intptr_t* ucontext_get_fp(ucontext_t* uc); + static address ucontext_get_pc(ucontext_t* uc); // For Analyzer Forte AsyncGetCallTrace profiling support: // Parameter ret_fp is only used by Solaris X86. @@ -157,6 +157,8 @@ static void hotspot_sigmask(Thread* thread); + // SR_handler + static void SR_handler(Thread* thread, ucontext_t* uc); protected: // Solaris-specific interface goes here static julong available_memory(); diff -r e0fb8a213650 -r 836a62f43af9 src/os/windows/launcher/java_md.c --- a/src/os/windows/launcher/java_md.c Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1507 +0,0 @@ -/* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include "java.h" -#ifndef GAMMA -#include "version_comp.h" -#endif - -#define JVM_DLL "jvm.dll" -#define JAVA_DLL "java.dll" -#define CRT_DLL "msvcr71.dll" - -/* - * Prototypes. - */ -static jboolean GetPublicJREHome(char *path, jint pathsize); -static jboolean GetJVMPath(const char *jrepath, const char *jvmtype, - char *jvmpath, jint jvmpathsize); -static jboolean GetJREPath(char *path, jint pathsize); -static void EnsureJreInstallation(const char *jrepath); - -/* We supports warmup for UI stack that is performed in parallel - * to VM initialization. - * This helps to improve startup of UI application as warmup phase - * might be long due to initialization of OS or hardware resources. - * It is not CPU bound and therefore it does not interfere with VM init. - * Obviously such warmup only has sense for UI apps and therefore it needs - * to be explicitly requested by passing -Dsun.awt.warmup=true property - * (this is always the case for plugin/javaws). - * - * Implementation launches new thread after VM starts and use it to perform - * warmup code (platform dependent). - * This thread is later reused as AWT toolkit thread as graphics toolkit - * often assume that they are used from the same thread they were launched on. - * - * At the moment we only support warmup for D3D. It only possible on windows - * and only if other flags do not prohibit this (e.g. OpenGL support requested). - */ -#undef ENABLE_AWT_PRELOAD -#ifndef JAVA_ARGS /* turn off AWT preloading for javac, jar, etc */ - #ifdef _X86_ /* for now disable AWT preloading for 64bit */ - #define ENABLE_AWT_PRELOAD - #endif -#endif - -#ifdef ENABLE_AWT_PRELOAD -/* "AWT was preloaded" flag; - * Turned on by AWTPreload(). - */ -int awtPreloaded = 0; - -/* Calls a function with the name specified. - * The function must be int(*fn)(void). - */ -int AWTPreload(const char *funcName); -/* Stops AWT preloading. */ -void AWTPreloadStop(); - -/* D3D preloading */ -/* -1: not initialized; 0: OFF, 1: ON */ -int awtPreloadD3D = -1; -/* Command line parameter to swith D3D preloading on. */ -#define PARAM_PRELOAD_D3D "-Dsun.awt.warmup" -/* D3D/OpenGL management parameters (may disable D3D preloading) */ -#define PARAM_NODDRAW "-Dsun.java2d.noddraw" -#define PARAM_D3D "-Dsun.java2d.d3d" -#define PARAM_OPENGL "-Dsun.java2d.opengl" -/* funtion in awt.dll (src/windows/native/sun/java2d/d3d/D3DPipelineManager.cpp) */ -#define D3D_PRELOAD_FUNC "preloadD3D" - - -/* Extracts value of a parameter with the specified name - * from command line argument (returns pointer in the argument). - * Returns NULL if the argument does not contains the parameter. - * e.g.: - * GetParamValue("theParam", "theParam=value") returns pointer to "value". - */ -const char * GetParamValue(const char *paramName, const char *arg) { - int nameLen = strlen(paramName); - if (strncmp(paramName, arg, nameLen) == 0) { - // arg[nameLen] is valid (may contain final NULL) - if (arg[nameLen] == '=') { - return arg + nameLen + 1; - } - } - return NULL; -} - -/* Checks if commandline argument contains property specified - * and analyze it as boolean property (true/false). - * Returns -1 if the argument does not contain the parameter; - * Returns 1 if the argument contains the parameter and its value is "true"; - * Returns 0 if the argument contains the parameter and its value is "false". - */ -int GetBoolParamValue(const char *paramName, const char *arg) { - const char * paramValue = GetParamValue(paramName, arg); - if (paramValue != NULL) { - if (stricmp(paramValue, "true") == 0) { - return 1; - } - if (stricmp(paramValue, "false") == 0) { - return 0; - } - } - return -1; -} -#endif /* ENABLE_AWT_PRELOAD */ - - -const char * -GetArch() -{ - -#ifdef _M_AMD64 - return "amd64"; -#elif defined(_M_IA64) - return "ia64"; -#else - return "i386"; -#endif -} - -/* - * - */ -void -CreateExecutionEnvironment(int *_argc, - char ***_argv, - char jrepath[], - jint so_jrepath, - char jvmpath[], - jint so_jvmpath, - char **original_argv) { -#ifndef GAMMA - char * jvmtype; - - /* Find out where the JRE is that we will be using. */ - if (!GetJREPath(jrepath, so_jrepath)) { - ReportErrorMessage("Error: could not find Java SE Runtime Environment.", - JNI_TRUE); - exit(2); - } - - /* Do this before we read jvm.cfg */ - EnsureJreInstallation(jrepath); - - /* Find the specified JVM type */ - if (ReadKnownVMs(jrepath, (char*)GetArch(), JNI_FALSE) < 1) { - ReportErrorMessage("Error: no known VMs. (check for corrupt jvm.cfg file)", - JNI_TRUE); - exit(1); - } - jvmtype = CheckJvmType(_argc, _argv, JNI_FALSE); - - jvmpath[0] = '\0'; - if (!GetJVMPath(jrepath, jvmtype, jvmpath, so_jvmpath)) { - char * message=NULL; - const char * format = "Error: no `%s' JVM at `%s'."; - message = (char *)JLI_MemAlloc((strlen(format)+strlen(jvmtype)+ - strlen(jvmpath)) * sizeof(char)); - sprintf(message,format, jvmtype, jvmpath); - ReportErrorMessage(message, JNI_TRUE); - exit(4); - } - /* If we got here, jvmpath has been correctly initialized. */ - -#else /* ifndef GAMMA */ - - /* - * gamma launcher is simpler in that it doesn't handle VM flavors, data - * model, etc. Assuming everything is set-up correctly - * all we need to do here is to return correct path names. See also - * GetJVMPath() and GetApplicationHome(). - */ - - { - if (!GetJREPath(jrepath, so_jrepath) ) { - ReportErrorMessage("Error: could not find Java SE Runtime Environment.", - JNI_TRUE); - exit(2); - } - - if (!GetJVMPath(jrepath, NULL, jvmpath, so_jvmpath)) { - char * message=NULL; - const char * format = "Error: no JVM at `%s'."; - message = (char *)JLI_MemAlloc((strlen(format)+ - strlen(jvmpath)) * sizeof(char)); - sprintf(message, format, jvmpath); - ReportErrorMessage(message, JNI_TRUE); - exit(4); - } - } - -#endif /* ifndef GAMMA */ - -} - - -static jboolean -LoadMSVCRT() -{ - // Only do this once - static int loaded = 0; - char crtpath[MAXPATHLEN]; - - if (!loaded) { - /* - * The Microsoft C Runtime Library needs to be loaded first. A copy is - * assumed to be present in the "JRE path" directory. If it is not found - * there (or "JRE path" fails to resolve), skip the explicit load and let - * nature take its course, which is likely to be a failure to execute. - */ - if (GetJREPath(crtpath, MAXPATHLEN)) { - (void)strcat(crtpath, "\\bin\\" CRT_DLL); /* Add crt dll */ - if (_launcher_debug) { - printf("CRT path is %s\n", crtpath); - } - if (_access(crtpath, 0) == 0) { - if (LoadLibrary(crtpath) == 0) { - ReportErrorMessage2("Error loading: %s", crtpath, JNI_TRUE); - return JNI_FALSE; - } - } - } - loaded = 1; - } - return JNI_TRUE; -} - -/* - * The preJVMStart is a function in the jkernel.dll, which - * performs the final step of synthesizing back the decomposed - * modules (partial install) to the full JRE. Any tool which - * uses the JRE must peform this step to ensure the complete synthesis. - * The EnsureJreInstallation function calls preJVMStart based on - * the conditions outlined below, noting that the operation - * will fail silently if any of conditions are not met. - * NOTE: this call must be made before jvm.dll is loaded, or jvm.cfg - * is read, since jvm.cfg will be modified by the preJVMStart. - * 1. Are we on a supported platform. - * 2. Find the location of the JRE or the Kernel JRE. - * 3. check existence of JREHOME/lib/bundles - * 4. check jkernel.dll and invoke the entry-point - */ -typedef VOID (WINAPI *PREJVMSTART)(); - -static void -EnsureJreInstallation(const char* jrepath) -{ - HINSTANCE handle; - char tmpbuf[MAXPATHLEN]; - PREJVMSTART PreJVMStart; - struct stat s; - - /* 32 bit windows only please */ - if (strcmp(GetArch(), "i386") != 0 ) { - if (_launcher_debug) { - printf("EnsureJreInstallation:unsupported platform\n"); - } - return; - } - /* Does our bundle directory exist ? */ - strcpy(tmpbuf, jrepath); - strcat(tmpbuf, "\\lib\\bundles"); - if (stat(tmpbuf, &s) != 0) { - if (_launcher_debug) { - printf("EnsureJreInstallation:<%s>:not found\n", tmpbuf); - } - return; - } - /* Does our jkernel dll exist ? */ - strcpy(tmpbuf, jrepath); - strcat(tmpbuf, "\\bin\\jkernel.dll"); - if (stat(tmpbuf, &s) != 0) { - if (_launcher_debug) { - printf("EnsureJreInstallation:<%s>:not found\n", tmpbuf); - } - return; - } - /* The Microsoft C Runtime Library needs to be loaded first. */ - if (!LoadMSVCRT()) { - if (_launcher_debug) { - printf("EnsureJreInstallation:could not load C runtime DLL\n"); - } - return; - } - /* Load the jkernel.dll */ - if ((handle = LoadLibrary(tmpbuf)) == 0) { - if (_launcher_debug) { - printf("EnsureJreInstallation:%s:load failed\n", tmpbuf); - } - return; - } - /* Get the function address */ - PreJVMStart = (PREJVMSTART)GetProcAddress(handle, "preJVMStart"); - if (PreJVMStart == NULL) { - if (_launcher_debug) { - printf("EnsureJreInstallation:preJVMStart:function lookup failed\n"); - } - FreeLibrary(handle); - return; - } - PreJVMStart(); - if (_launcher_debug) { - printf("EnsureJreInstallation:preJVMStart:called\n"); - } - FreeLibrary(handle); - return; -} - -/* - * Find path to JRE based on .exe's location or registry settings. - */ -jboolean -GetJREPath(char *path, jint pathsize) -{ - char javadll[MAXPATHLEN]; - struct stat s; - - if (GetApplicationHome(path, pathsize)) { - /* Is JRE co-located with the application? */ - sprintf(javadll, "%s\\bin\\" JAVA_DLL, path); - if (stat(javadll, &s) == 0) { - goto found; - } - - /* Does this app ship a private JRE in \jre directory? */ - sprintf(javadll, "%s\\jre\\bin\\" JAVA_DLL, path); - if (stat(javadll, &s) == 0) { - strcat(path, "\\jre"); - goto found; - } - } - -#ifndef GAMMA - /* Look for a public JRE on this machine. */ - if (GetPublicJREHome(path, pathsize)) { - goto found; - } -#endif - - fprintf(stderr, "Error: could not find " JAVA_DLL "\n"); - return JNI_FALSE; - - found: - if (_launcher_debug) - printf("JRE path is %s\n", path); - return JNI_TRUE; -} - -/* - * Given a JRE location and a JVM type, construct what the name the - * JVM shared library will be. Return true, if such a library - * exists, false otherwise. - */ -static jboolean -GetJVMPath(const char *jrepath, const char *jvmtype, - char *jvmpath, jint jvmpathsize) -{ - struct stat s; - -#ifndef GAMMA - if (strchr(jvmtype, '/') || strchr(jvmtype, '\\')) { - sprintf(jvmpath, "%s\\" JVM_DLL, jvmtype); - } else { - sprintf(jvmpath, "%s\\bin\\%s\\" JVM_DLL, jrepath, jvmtype); - } -#else - /* - * For gamma launcher, JVM is either built-in or in the same directory. - * Either way we return "/jvm.dll" where is the - * directory where gamma launcher is located. - */ - - char *p; - GetModuleFileName(0, jvmpath, jvmpathsize); - - p = strrchr(jvmpath, '\\'); - if (p) { - /* replace executable name with libjvm.so */ - snprintf(p + 1, jvmpathsize - (p + 1 - jvmpath), "%s", JVM_DLL); - } else { - /* this case shouldn't happen */ - snprintf(jvmpath, jvmpathsize, "%s", JVM_DLL); - } -#endif /* ifndef GAMMA */ - - if (stat(jvmpath, &s) == 0) { - return JNI_TRUE; - } else { - return JNI_FALSE; - } -} - -/* - * Load a jvm from "jvmpath" and initialize the invocation functions. - */ -jboolean -LoadJavaVM(const char *jvmpath, InvocationFunctions *ifn) -{ -#ifdef GAMMA - /* JVM is directly linked with gamma launcher; no Loadlibrary() */ - ifn->CreateJavaVM = JNI_CreateJavaVM; - ifn->GetDefaultJavaVMInitArgs = JNI_GetDefaultJavaVMInitArgs; - return JNI_TRUE; -#else - HINSTANCE handle; - - if (_launcher_debug) { - printf("JVM path is %s\n", jvmpath); - } - - /* The Microsoft C Runtime Library needs to be loaded first. */ - LoadMSVCRT(); - - /* Load the Java VM DLL */ - if ((handle = LoadLibrary(jvmpath)) == 0) { - ReportErrorMessage2("Error loading: %s", (char *)jvmpath, JNI_TRUE); - return JNI_FALSE; - } - - /* Now get the function addresses */ - ifn->CreateJavaVM = - (void *)GetProcAddress(handle, "JNI_CreateJavaVM"); - ifn->GetDefaultJavaVMInitArgs = - (void *)GetProcAddress(handle, "JNI_GetDefaultJavaVMInitArgs"); - if (ifn->CreateJavaVM == 0 || ifn->GetDefaultJavaVMInitArgs == 0) { - ReportErrorMessage2("Error: can't find JNI interfaces in: %s", - (char *)jvmpath, JNI_TRUE); - return JNI_FALSE; - } - - return JNI_TRUE; -#endif /* ifndef GAMMA */ -} - -/* - * If app is "c:\foo\bin\javac", then put "c:\foo" into buf. - */ -jboolean -GetApplicationHome(char *buf, jint bufsize) -{ -#ifndef GAMMA - char *cp; - GetModuleFileName(0, buf, bufsize); - *strrchr(buf, '\\') = '\0'; /* remove .exe file name */ - if ((cp = strrchr(buf, '\\')) == 0) { - /* This happens if the application is in a drive root, and - * there is no bin directory. */ - buf[0] = '\0'; - return JNI_FALSE; - } - *cp = '\0'; /* remove the bin\ part */ - return JNI_TRUE; - -#else /* ifndef GAMMA */ - - char env[MAXPATHLEN + 1]; - - /* gamma launcher uses ALT_JAVA_HOME environment variable or jdkpath.txt file to find JDK/JRE */ - - if (getenv("ALT_JAVA_HOME") != NULL) { - snprintf(buf, bufsize, "%s", getenv("ALT_JAVA_HOME")); - } - else { - char path[MAXPATHLEN + 1]; - char* p; - int len; - FILE* fp; - - // find the path to the currect executable - len = GetModuleFileName(NULL, path, MAXPATHLEN + 1); - if (len == 0 || len > MAXPATHLEN) { - printf("Could not get directory of current executable."); - return JNI_FALSE; - } - // remove last path component ("hotspot.exe") - p = strrchr(path, '\\'); - if (p == NULL) { - printf("Could not parse directory of current executable.\n"); - return JNI_FALSE; - } - *p = '\0'; - - // open jdkpath.txt and read JAVA_HOME from it - if (strlen(path) + strlen("\\jdkpath.txt") + 1 >= MAXPATHLEN) { - printf("Path too long: %s\n", path); - return JNI_FALSE; - } - strcat(path, "\\jdkpath.txt"); - fp = fopen(path, "r"); - if (fp == NULL) { - printf("Could not open file %s to get path to JDK.\n", path); - return JNI_FALSE; - } - - if (fgets(buf, bufsize, fp) == NULL) { - printf("Could not read from file %s to get path to JDK.\n", path); - fclose(fp); - return JNI_FALSE; - } - // trim the buffer - p = buf + strlen(buf) - 1; - while(isspace(*p)) { - *p = '\0'; - p--; - } - fclose(fp); - } - - _snprintf(env, MAXPATHLEN, "JAVA_HOME=%s", buf); - _putenv(env); - - return JNI_TRUE; -#endif /* ifndef GAMMA */ -} - -#ifdef JAVAW -__declspec(dllimport) char **__initenv; - -int WINAPI -WinMain(HINSTANCE inst, HINSTANCE previnst, LPSTR cmdline, int cmdshow) -{ - int ret; - - __initenv = _environ; - ret = main(__argc, __argv); - - return ret; -} -#endif - -#ifndef GAMMA - -/* - * Helpers to look in the registry for a public JRE. - */ - /* Same for 1.5.0, 1.5.1, 1.5.2 etc. */ -#define DOTRELEASE JDK_MAJOR_VERSION "." JDK_MINOR_VERSION -#define JRE_KEY "Software\\JavaSoft\\Java Runtime Environment" - -static jboolean -GetStringFromRegistry(HKEY key, const char *name, char *buf, jint bufsize) -{ - DWORD type, size; - - if (RegQueryValueEx(key, name, 0, &type, 0, &size) == 0 - && type == REG_SZ - && (size < (unsigned int)bufsize)) { - if (RegQueryValueEx(key, name, 0, 0, buf, &size) == 0) { - return JNI_TRUE; - } - } - return JNI_FALSE; -} - -static jboolean -GetPublicJREHome(char *buf, jint bufsize) -{ - HKEY key, subkey; - char version[MAXPATHLEN]; - - /* - * Note: There is a very similar implementation of the following - * registry reading code in the Windows java control panel (javacp.cpl). - * If there are bugs here, a similar bug probably exists there. Hence, - * changes here require inspection there. - */ - - /* Find the current version of the JRE */ - if (RegOpenKeyEx(HKEY_LOCAL_MACHINE, JRE_KEY, 0, KEY_READ, &key) != 0) { - fprintf(stderr, "Error opening registry key '" JRE_KEY "'\n"); - return JNI_FALSE; - } - - if (!GetStringFromRegistry(key, "CurrentVersion", - version, sizeof(version))) { - fprintf(stderr, "Failed reading value of registry key:\n\t" - JRE_KEY "\\CurrentVersion\n"); - RegCloseKey(key); - return JNI_FALSE; - } - - if (strcmp(version, DOTRELEASE) != 0) { - fprintf(stderr, "Registry key '" JRE_KEY "\\CurrentVersion'\nhas " - "value '%s', but '" DOTRELEASE "' is required.\n", version); - RegCloseKey(key); - return JNI_FALSE; - } - - /* Find directory where the current version is installed. */ - if (RegOpenKeyEx(key, version, 0, KEY_READ, &subkey) != 0) { - fprintf(stderr, "Error opening registry key '" - JRE_KEY "\\%s'\n", version); - RegCloseKey(key); - return JNI_FALSE; - } - - if (!GetStringFromRegistry(subkey, "JavaHome", buf, bufsize)) { - fprintf(stderr, "Failed reading value of registry key:\n\t" - JRE_KEY "\\%s\\JavaHome\n", version); - RegCloseKey(key); - RegCloseKey(subkey); - return JNI_FALSE; - } - - if (_launcher_debug) { - char micro[MAXPATHLEN]; - if (!GetStringFromRegistry(subkey, "MicroVersion", micro, - sizeof(micro))) { - printf("Warning: Can't read MicroVersion\n"); - micro[0] = '\0'; - } - printf("Version major.minor.micro = %s.%s\n", version, micro); - } - - RegCloseKey(key); - RegCloseKey(subkey); - return JNI_TRUE; -} - -#endif /* ifndef GAMMA */ - -/* - * Support for doing cheap, accurate interval timing. - */ -static jboolean counterAvailable = JNI_FALSE; -static jboolean counterInitialized = JNI_FALSE; -static LARGE_INTEGER counterFrequency; - -jlong CounterGet() -{ - LARGE_INTEGER count; - - if (!counterInitialized) { - counterAvailable = QueryPerformanceFrequency(&counterFrequency); - counterInitialized = JNI_TRUE; - } - if (!counterAvailable) { - return 0; - } - QueryPerformanceCounter(&count); - return (jlong)(count.QuadPart); -} - -jlong Counter2Micros(jlong counts) -{ - if (!counterAvailable || !counterInitialized) { - return 0; - } - return (counts * 1000 * 1000)/counterFrequency.QuadPart; -} - -void ReportErrorMessage(char * message, jboolean always) { -#ifdef JAVAW - if (message != NULL) { - MessageBox(NULL, message, "Java Virtual Machine Launcher", - (MB_OK|MB_ICONSTOP|MB_APPLMODAL)); - } -#else - if (always) { - fprintf(stderr, "%s\n", message); - } -#endif -} - -void ReportErrorMessage2(char * format, char * string, jboolean always) { - /* - * The format argument must be a printf format string with one %s - * argument, which is passed the string argument. - */ -#ifdef JAVAW - size_t size; - char * message; - size = strlen(format) + strlen(string); - message = (char*)JLI_MemAlloc(size*sizeof(char)); - sprintf(message, (const char *)format, string); - - if (message != NULL) { - MessageBox(NULL, message, "Java Virtual Machine Launcher", - (MB_OK|MB_ICONSTOP|MB_APPLMODAL)); - JLI_MemFree(message); - } -#else - if (always) { - fprintf(stderr, (const char *)format, string); - fprintf(stderr, "\n"); - } -#endif -} - -/* - * As ReportErrorMessage2 (above) except the system message (if any) - * associated with this error is written to a second %s format specifier - * in the format argument. - */ -void ReportSysErrorMessage2(char * format, char * string, jboolean always) { - int save_errno = errno; - DWORD errval; - int freeit = 0; - char *errtext = NULL; - - if ((errval = GetLastError()) != 0) { /* Platform SDK / DOS Error */ - int n = FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM| - FORMAT_MESSAGE_IGNORE_INSERTS|FORMAT_MESSAGE_ALLOCATE_BUFFER, - NULL, errval, 0, (LPTSTR)&errtext, 0, NULL); - if (errtext == NULL || n == 0) { /* Paranoia check */ - errtext = ""; - n = 0; - } else { - freeit = 1; - if (n > 2) { /* Drop final CR, LF */ - if (errtext[n - 1] == '\n') n--; - if (errtext[n - 1] == '\r') n--; - errtext[n] = '\0'; - } - } - } else /* C runtime error that has no corresponding DOS error code */ - errtext = strerror(save_errno); - -#ifdef JAVAW - { - size_t size; - char * message; - size = strlen(format) + strlen(string) + strlen(errtext); - message = (char*)JLI_MemAlloc(size*sizeof(char)); - sprintf(message, (const char *)format, string, errtext); - - if (message != NULL) { - MessageBox(NULL, message, "Java Virtual Machine Launcher", - (MB_OK|MB_ICONSTOP|MB_APPLMODAL)); - JLI_MemFree(message); - } - } -#else - if (always) { - fprintf(stderr, (const char *)format, string, errtext); - fprintf(stderr, "\n"); - } -#endif - if (freeit) - (void)LocalFree((HLOCAL)errtext); -} - -void ReportExceptionDescription(JNIEnv * env) { -#ifdef JAVAW - /* - * This code should be replaced by code which opens a window with - * the exception detail message. - */ - (*env)->ExceptionDescribe(env); -#else - (*env)->ExceptionDescribe(env); -#endif -} - - -/* - * Return JNI_TRUE for an option string that has no effect but should - * _not_ be passed on to the vm; return JNI_FALSE otherwise. On - * windows, there are no options that should be screened in this - * manner. - */ -jboolean RemovableMachineDependentOption(char * option) { -#ifdef ENABLE_AWT_PRELOAD - if (awtPreloadD3D < 0) { - /* Tests the command line parameter only if not set yet. */ - if (GetBoolParamValue(PARAM_PRELOAD_D3D, option) == 1) { - awtPreloadD3D = 1; - } - } - if (awtPreloadD3D != 0) { - /* Don't test the command line parameters if already disabled. */ - if (GetBoolParamValue(PARAM_NODDRAW, option) == 1 - || GetBoolParamValue(PARAM_D3D, option) == 0 - || GetBoolParamValue(PARAM_OPENGL, option) == 1) - { - awtPreloadD3D = 0; - } - } -#endif /* ENABLE_AWT_PRELOAD */ - - return JNI_FALSE; -} - -void PrintMachineDependentOptions() { - return; -} - -#ifndef GAMMA - -jboolean -ServerClassMachine() { - jboolean result = JNI_FALSE; -#if defined(NEVER_ACT_AS_SERVER_CLASS_MACHINE) - result = JNI_FALSE; -#elif defined(ALWAYS_ACT_AS_SERVER_CLASS_MACHINE) - result = JNI_TRUE; -#endif - return result; -} - -/* - * Determine if there is an acceptable JRE in the registry directory top_key. - * Upon locating the "best" one, return a fully qualified path to it. - * "Best" is defined as the most advanced JRE meeting the constraints - * contained in the manifest_info. If no JRE in this directory meets the - * constraints, return NULL. - * - * It doesn't matter if we get an error reading the registry, or we just - * don't find anything interesting in the directory. We just return NULL - * in either case. - */ -static char * -ProcessDir(manifest_info* info, HKEY top_key) { - DWORD index = 0; - HKEY ver_key; - char name[MAXNAMELEN]; - int len; - char *best = NULL; - - /* - * Enumerate "/SOFTWARE/JavaSoft/Java Runtime Environment" - * searching for the best available version. - */ - while (RegEnumKey(top_key, index, name, MAXNAMELEN) == ERROR_SUCCESS) { - index++; - if (JLI_AcceptableRelease(name, info->jre_version)) - if ((best == NULL) || (JLI_ExactVersionId(name, best) > 0)) { - if (best != NULL) - JLI_MemFree(best); - best = JLI_StringDup(name); - } - } - - /* - * Extract "JavaHome" from the "best" registry directory and return - * that path. If no appropriate version was located, or there is an - * error in extracting the "JavaHome" string, return null. - */ - if (best == NULL) - return (NULL); - else { - if (RegOpenKeyEx(top_key, best, 0, KEY_READ, &ver_key) - != ERROR_SUCCESS) { - JLI_MemFree(best); - if (ver_key != NULL) - RegCloseKey(ver_key); - return (NULL); - } - JLI_MemFree(best); - len = MAXNAMELEN; - if (RegQueryValueEx(ver_key, "JavaHome", NULL, NULL, (LPBYTE)name, &len) - != ERROR_SUCCESS) { - if (ver_key != NULL) - RegCloseKey(ver_key); - return (NULL); - } - if (ver_key != NULL) - RegCloseKey(ver_key); - return (JLI_StringDup(name)); - } -} - -/* - * This is the global entry point. It examines the host for the optimal - * JRE to be used by scanning a set of registry entries. This set of entries - * is hardwired on Windows as "Software\JavaSoft\Java Runtime Environment" - * under the set of roots "{ HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE }". - * - * This routine simply opens each of these registry directories before passing - * control onto ProcessDir(). - */ -char * -LocateJRE(manifest_info* info) { - HKEY key = NULL; - char *path; - int key_index; - HKEY root_keys[2] = { HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE }; - - for (key_index = 0; key_index <= 1; key_index++) { - if (RegOpenKeyEx(root_keys[key_index], JRE_KEY, 0, KEY_READ, &key) - == ERROR_SUCCESS) - if ((path = ProcessDir(info, key)) != NULL) { - if (key != NULL) - RegCloseKey(key); - return (path); - } - if (key != NULL) - RegCloseKey(key); - } - return NULL; -} - - -/* - * Local helper routine to isolate a single token (option or argument) - * from the command line. - * - * This routine accepts a pointer to a character pointer. The first - * token (as defined by MSDN command-line argument syntax) is isolated - * from that string. - * - * Upon return, the input character pointer pointed to by the parameter s - * is updated to point to the remainding, unscanned, portion of the string, - * or to a null character if the entire string has been consummed. - * - * This function returns a pointer to a null-terminated string which - * contains the isolated first token, or to the null character if no - * token could be isolated. - * - * Note the side effect of modifying the input string s by the insertion - * of a null character, making it two strings. - * - * See "Parsing C Command-Line Arguments" in the MSDN Library for the - * parsing rule details. The rule summary from that specification is: - * - * * Arguments are delimited by white space, which is either a space or a tab. - * - * * A string surrounded by double quotation marks is interpreted as a single - * argument, regardless of white space contained within. A quoted string can - * be embedded in an argument. Note that the caret (^) is not recognized as - * an escape character or delimiter. - * - * * A double quotation mark preceded by a backslash, \", is interpreted as a - * literal double quotation mark ("). - * - * * Backslashes are interpreted literally, unless they immediately precede a - * double quotation mark. - * - * * If an even number of backslashes is followed by a double quotation mark, - * then one backslash (\) is placed in the argv array for every pair of - * backslashes (\\), and the double quotation mark (") is interpreted as a - * string delimiter. - * - * * If an odd number of backslashes is followed by a double quotation mark, - * then one backslash (\) is placed in the argv array for every pair of - * backslashes (\\) and the double quotation mark is interpreted as an - * escape sequence by the remaining backslash, causing a literal double - * quotation mark (") to be placed in argv. - */ -static char* -nextarg(char** s) { - char *p = *s; - char *head; - int slashes = 0; - int inquote = 0; - - /* - * Strip leading whitespace, which MSDN defines as only space or tab. - * (Hence, no locale specific "isspace" here.) - */ - while (*p != (char)0 && (*p == ' ' || *p == '\t')) - p++; - head = p; /* Save the start of the token to return */ - - /* - * Isolate a token from the command line. - */ - while (*p != (char)0 && (inquote || !(*p == ' ' || *p == '\t'))) { - if (*p == '\\' && *(p+1) == '"' && slashes % 2 == 0) - p++; - else if (*p == '"') - inquote = !inquote; - slashes = (*p++ == '\\') ? slashes + 1 : 0; - } - - /* - * If the token isolated isn't already terminated in a "char zero", - * then replace the whitespace character with one and move to the - * next character. - */ - if (*p != (char)0) - *p++ = (char)0; - - /* - * Update the parameter to point to the head of the remaining string - * reflecting the command line and return a pointer to the leading - * token which was isolated from the command line. - */ - *s = p; - return (head); -} - -/* - * Local helper routine to return a string equivalent to the input string - * s, but with quotes removed so the result is a string as would be found - * in argv[]. The returned string should be freed by a call to JLI_MemFree(). - * - * The rules for quoting (and escaped quotes) are: - * - * 1 A double quotation mark preceded by a backslash, \", is interpreted as a - * literal double quotation mark ("). - * - * 2 Backslashes are interpreted literally, unless they immediately precede a - * double quotation mark. - * - * 3 If an even number of backslashes is followed by a double quotation mark, - * then one backslash (\) is placed in the argv array for every pair of - * backslashes (\\), and the double quotation mark (") is interpreted as a - * string delimiter. - * - * 4 If an odd number of backslashes is followed by a double quotation mark, - * then one backslash (\) is placed in the argv array for every pair of - * backslashes (\\) and the double quotation mark is interpreted as an - * escape sequence by the remaining backslash, causing a literal double - * quotation mark (") to be placed in argv. - */ -static char* -unquote(const char *s) { - const char *p = s; /* Pointer to the tail of the original string */ - char *un = (char*)JLI_MemAlloc(strlen(s) + 1); /* Ptr to unquoted string */ - char *pun = un; /* Pointer to the tail of the unquoted string */ - - while (*p != '\0') { - if (*p == '"') { - p++; - } else if (*p == '\\') { - const char *q = p + strspn(p,"\\"); - if (*q == '"') - do { - *pun++ = '\\'; - p += 2; - } while (*p == '\\' && p < q); - else - while (p < q) - *pun++ = *p++; - } else { - *pun++ = *p++; - } - } - *pun = '\0'; - return un; -} - -/* - * Given a path to a jre to execute, this routine checks if this process - * is indeed that jre. If not, it exec's that jre. - * - * We want to actually check the paths rather than just the version string - * built into the executable, so that given version specification will yield - * the exact same Java environment, regardless of the version of the arbitrary - * launcher we start with. - */ -void -ExecJRE(char *jre, char **argv) { - int len; - char *progname; - char path[MAXPATHLEN + 1]; - - /* - * Determine the executable we are building (or in the rare case, running). - */ -#ifdef JAVA_ARGS /* javac, jar and friends. */ - progname = "java"; -#else /* java, oldjava, javaw and friends */ -#ifdef PROGNAME - progname = PROGNAME; -#else - { - char *s; - progname = *argv; - if ((s = strrchr(progname, FILE_SEPARATOR)) != 0) { - progname = s + 1; - } - } -#endif /* PROGNAME */ -#endif /* JAVA_ARGS */ - - /* - * Resolve the real path to the currently running launcher. - */ - len = GetModuleFileName(NULL, path, MAXPATHLEN + 1); - if (len == 0 || len > MAXPATHLEN) { - ReportSysErrorMessage2( - "Unable to resolve path to current %s executable: %s", - progname, JNI_TRUE); - exit(1); - } - - if (_launcher_debug) { - printf("ExecJRE: old: %s\n", path); - printf("ExecJRE: new: %s\n", jre); - } - - /* - * If the path to the selected JRE directory is a match to the initial - * portion of the path to the currently executing JRE, we have a winner! - * If so, just return. (strnicmp() is the Windows equiv. of strncasecmp().) - */ - if (strnicmp(jre, path, strlen(jre)) == 0) - return; /* I am the droid you were looking for */ - - /* - * If this isn't the selected version, exec the selected version. - */ - (void)strcat(strcat(strcpy(path, jre), "\\bin\\"), progname); - (void)strcat(path, ".exe"); - - /* - * Although Windows has an execv() entrypoint, it doesn't actually - * overlay a process: it can only create a new process and terminate - * the old process. Therefore, any processes waiting on the initial - * process wake up and they shouldn't. Hence, a chain of pseudo-zombie - * processes must be retained to maintain the proper wait semantics. - * Fortunately the image size of the launcher isn't too large at this - * time. - * - * If it weren't for this semantic flaw, the code below would be ... - * - * execv(path, argv); - * ReportErrorMessage2("Exec of %s failed\n", path, JNI_TRUE); - * exit(1); - * - * The incorrect exec semantics could be addressed by: - * - * exit((int)spawnv(_P_WAIT, path, argv)); - * - * Unfortunately, a bug in Windows spawn/exec impementation prevents - * this from completely working. All the Windows POSIX process creation - * interfaces are implemented as wrappers around the native Windows - * function CreateProcess(). CreateProcess() takes a single string - * to specify command line options and arguments, so the POSIX routine - * wrappers build a single string from the argv[] array and in the - * process, any quoting information is lost. - * - * The solution to this to get the original command line, to process it - * to remove the new multiple JRE options (if any) as was done for argv - * in the common SelectVersion() routine and finally to pass it directly - * to the native CreateProcess() Windows process control interface. - */ - { - char *cmdline; - char *p; - char *np; - char *ocl; - char *ccl; - char *unquoted; - DWORD exitCode; - STARTUPINFO si; - PROCESS_INFORMATION pi; - - /* - * The following code block gets and processes the original command - * line, replacing the argv[0] equivalent in the command line with - * the path to the new executable and removing the appropriate - * Multiple JRE support options. Note that similar logic exists - * in the platform independent SelectVersion routine, but is - * replicated here due to the syntax of CreateProcess(). - * - * The magic "+ 4" characters added to the command line length are - * 2 possible quotes around the path (argv[0]), a space after the - * path and a terminating null character. - */ - ocl = GetCommandLine(); - np = ccl = JLI_StringDup(ocl); - p = nextarg(&np); /* Discard argv[0] */ - cmdline = (char *)JLI_MemAlloc(strlen(path) + strlen(np) + 4); - if (strchr(path, (int)' ') == NULL && strchr(path, (int)'\t') == NULL) - cmdline = strcpy(cmdline, path); - else - cmdline = strcat(strcat(strcpy(cmdline, "\""), path), "\""); - - while (*np != (char)0) { /* While more command-line */ - p = nextarg(&np); - if (*p != (char)0) { /* If a token was isolated */ - unquoted = unquote(p); - if (*unquoted == '-') { /* Looks like an option */ - if (strcmp(unquoted, "-classpath") == 0 || - strcmp(unquoted, "-cp") == 0) { /* Unique cp syntax */ - cmdline = strcat(strcat(cmdline, " "), p); - p = nextarg(&np); - if (*p != (char)0) /* If a token was isolated */ - cmdline = strcat(strcat(cmdline, " "), p); - } else if (strncmp(unquoted, "-version:", 9) != 0 && - strcmp(unquoted, "-jre-restrict-search") != 0 && - strcmp(unquoted, "-no-jre-restrict-search") != 0) { - cmdline = strcat(strcat(cmdline, " "), p); - } - } else { /* End of options */ - cmdline = strcat(strcat(cmdline, " "), p); - cmdline = strcat(strcat(cmdline, " "), np); - JLI_MemFree((void *)unquoted); - break; - } - JLI_MemFree((void *)unquoted); - } - } - JLI_MemFree((void *)ccl); - - if (_launcher_debug) { - np = ccl = JLI_StringDup(cmdline); - p = nextarg(&np); - printf("ReExec Command: %s (%s)\n", path, p); - printf("ReExec Args: %s\n", np); - JLI_MemFree((void *)ccl); - } - (void)fflush(stdout); - (void)fflush(stderr); - - /* - * The following code is modeled after a model presented in the - * Microsoft Technical Article "Moving Unix Applications to - * Windows NT" (March 6, 1994) and "Creating Processes" on MSDN - * (Februrary 2005). It approximates UNIX spawn semantics with - * the parent waiting for termination of the child. - */ - memset(&si, 0, sizeof(si)); - si.cb =sizeof(STARTUPINFO); - memset(&pi, 0, sizeof(pi)); - - if (!CreateProcess((LPCTSTR)path, /* executable name */ - (LPTSTR)cmdline, /* command line */ - (LPSECURITY_ATTRIBUTES)NULL, /* process security attr. */ - (LPSECURITY_ATTRIBUTES)NULL, /* thread security attr. */ - (BOOL)TRUE, /* inherits system handles */ - (DWORD)0, /* creation flags */ - (LPVOID)NULL, /* environment block */ - (LPCTSTR)NULL, /* current directory */ - (LPSTARTUPINFO)&si, /* (in) startup information */ - (LPPROCESS_INFORMATION)&pi)) { /* (out) process information */ - ReportSysErrorMessage2("CreateProcess(%s, ...) failed: %s", - path, JNI_TRUE); - exit(1); - } - - if (WaitForSingleObject(pi.hProcess, INFINITE) != WAIT_FAILED) { - if (GetExitCodeProcess(pi.hProcess, &exitCode) == FALSE) - exitCode = 1; - } else { - ReportErrorMessage("WaitForSingleObject() failed.", JNI_TRUE); - exitCode = 1; - } - - CloseHandle(pi.hThread); - CloseHandle(pi.hProcess); - - exit(exitCode); - } - -} - -#endif /* ifndef GAMMA */ - - -/* - * Wrapper for platform dependent unsetenv function. - */ -int -UnsetEnv(char *name) -{ - int ret; - char *buf = JLI_MemAlloc(strlen(name) + 2); - buf = strcat(strcpy(buf, name), "="); - ret = _putenv(buf); - JLI_MemFree(buf); - return (ret); -} - -/* --- Splash Screen shared library support --- */ - -static const char* SPLASHSCREEN_SO = "\\bin\\splashscreen.dll"; - -static HMODULE hSplashLib = NULL; - -void* SplashProcAddress(const char* name) { - char libraryPath[MAXPATHLEN]; /* some extra space for strcat'ing SPLASHSCREEN_SO */ - - if (!GetJREPath(libraryPath, MAXPATHLEN)) { - return NULL; - } - if (strlen(libraryPath)+strlen(SPLASHSCREEN_SO) >= MAXPATHLEN) { - return NULL; - } - strcat(libraryPath, SPLASHSCREEN_SO); - - if (!hSplashLib) { - hSplashLib = LoadLibrary(libraryPath); - } - if (hSplashLib) { - return GetProcAddress(hSplashLib, name); - } else { - return NULL; - } -} - -void SplashFreeLibrary() { - if (hSplashLib) { - FreeLibrary(hSplashLib); - hSplashLib = NULL; - } -} - -/* - * Block current thread and continue execution in a new thread - */ -int -ContinueInNewThread(int (JNICALL *continuation)(void *), jlong stack_size, void * args) { - int rslt = 0; - unsigned thread_id; - -#ifndef STACK_SIZE_PARAM_IS_A_RESERVATION -#define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) -#endif - - /* - * STACK_SIZE_PARAM_IS_A_RESERVATION is what we want, but it's not - * supported on older version of Windows. Try first with the flag; and - * if that fails try again without the flag. See MSDN document or HotSpot - * source (os_win32.cpp) for details. - */ - HANDLE thread_handle = - (HANDLE)_beginthreadex(NULL, - (unsigned)stack_size, - continuation, - args, - STACK_SIZE_PARAM_IS_A_RESERVATION, - &thread_id); - if (thread_handle == NULL) { - thread_handle = - (HANDLE)_beginthreadex(NULL, - (unsigned)stack_size, - continuation, - args, - 0, - &thread_id); - } - - /* AWT preloading (AFTER main thread start) */ -#ifdef ENABLE_AWT_PRELOAD - /* D3D preloading */ - if (awtPreloadD3D != 0) { - char *envValue; - /* D3D routines checks env.var J2D_D3D if no appropriate - * command line params was specified - */ - envValue = getenv("J2D_D3D"); - if (envValue != NULL && stricmp(envValue, "false") == 0) { - awtPreloadD3D = 0; - } - /* Test that AWT preloading isn't disabled by J2D_D3D_PRELOAD env.var */ - envValue = getenv("J2D_D3D_PRELOAD"); - if (envValue != NULL && stricmp(envValue, "false") == 0) { - awtPreloadD3D = 0; - } - if (awtPreloadD3D < 0) { - /* If awtPreloadD3D is still undefined (-1), test - * if it is turned on by J2D_D3D_PRELOAD env.var. - * By default it's turned OFF. - */ - awtPreloadD3D = 0; - if (envValue != NULL && stricmp(envValue, "true") == 0) { - awtPreloadD3D = 1; - } - } - } - if (awtPreloadD3D) { - AWTPreload(D3D_PRELOAD_FUNC); - } -#endif /* ENABLE_AWT_PRELOAD */ - - if (thread_handle) { - WaitForSingleObject(thread_handle, INFINITE); - GetExitCodeThread(thread_handle, &rslt); - CloseHandle(thread_handle); - } else { - rslt = continuation(args); - } - -#ifdef ENABLE_AWT_PRELOAD - if (awtPreloaded) { - AWTPreloadStop(); - } -#endif /* ENABLE_AWT_PRELOAD */ - - return rslt; -} - -/* Linux only, empty on windows. */ -void SetJavaLauncherPlatformProps() {} - - -//============================== -// AWT preloading -#ifdef ENABLE_AWT_PRELOAD - -typedef int FnPreloadStart(void); -typedef void FnPreloadStop(void); -static FnPreloadStop *fnPreloadStop = NULL; -static HMODULE hPreloadAwt = NULL; - -/* - * Starts AWT preloading - */ -int AWTPreload(const char *funcName) -{ - int result = -1; - - // load AWT library once (if several preload function should be called) - if (hPreloadAwt == NULL) { - // awt.dll is not loaded yet - char libraryPath[MAXPATHLEN]; - int jrePathLen = 0; - HMODULE hJava = NULL; - HMODULE hVerify = NULL; - - while (1) { - // awt.dll depends on jvm.dll & java.dll; - // jvm.dll is already loaded, so we need only java.dll; - // java.dll depends on MSVCRT lib & verify.dll. - if (!GetJREPath(libraryPath, MAXPATHLEN)) { - break; - } - - // save path length - jrePathLen = strlen(libraryPath); - - // load msvcrt 1st - LoadMSVCRT(); - - // load verify.dll - strcat(libraryPath, "\\bin\\verify.dll"); - hVerify = LoadLibrary(libraryPath); - if (hVerify == NULL) { - break; - } - - // restore jrePath - libraryPath[jrePathLen] = 0; - // load java.dll - strcat(libraryPath, "\\bin\\" JAVA_DLL); - hJava = LoadLibrary(libraryPath); - if (hJava == NULL) { - break; - } - - // restore jrePath - libraryPath[jrePathLen] = 0; - // load awt.dll - strcat(libraryPath, "\\bin\\awt.dll"); - hPreloadAwt = LoadLibrary(libraryPath); - if (hPreloadAwt == NULL) { - break; - } - - // get "preloadStop" func ptr - fnPreloadStop = (FnPreloadStop *)GetProcAddress(hPreloadAwt, "preloadStop"); - - break; - } - } - - if (hPreloadAwt != NULL) { - FnPreloadStart *fnInit = (FnPreloadStart *)GetProcAddress(hPreloadAwt, funcName); - if (fnInit != NULL) { - // don't forget to stop preloading - awtPreloaded = 1; - - result = fnInit(); - } - } - - return result; -} - -/* - * Terminates AWT preloading - */ -void AWTPreloadStop() { - if (fnPreloadStop != NULL) { - fnPreloadStop(); - } -} - -#endif /* ENABLE_AWT_PRELOAD */ diff -r e0fb8a213650 -r 836a62f43af9 src/os/windows/launcher/java_md.h --- a/src/os/windows/launcher/java_md.h Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,83 +0,0 @@ -/* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef JAVA_MD_H -#define JAVA_MD_H - -#include -#include -#include -#ifndef GAMMA -#include "manifest_info.h" -#endif -#include "jli_util.h" - -#ifdef GAMMA -#define stricmp _stricmp -#define strnicmp _strnicmp -#define snprintf _snprintf -#define strdup _strdup -#endif - -#define PATH_SEPARATOR ';' -#define FILESEP "\\" -#define FILE_SEPARATOR '\\' -#define IS_FILE_SEPARATOR(c) ((c) == '\\' || (c) == '/') -#define MAXPATHLEN MAX_PATH -#define MAXNAMELEN MAX_PATH - -#ifdef JAVA_ARGS -/* - * ApplicationHome is prepended to each of these entries; the resulting - * strings are concatenated (separated by PATH_SEPARATOR) and used as the - * value of -cp option to the launcher. - */ -#ifndef APP_CLASSPATH -#define APP_CLASSPATH { "\\lib\\tools.jar", "\\classes" } -#endif -#endif - -/* - * Support for doing cheap, accurate interval timing. - */ -extern jlong CounterGet(void); -extern jlong Counter2Micros(jlong counts); - -#ifdef JAVAW -#define main _main -extern int _main(int argc, char **argv); -#endif - -#define JLONG_FORMAT "%I64d" - -/* - * Function prototypes. - */ -#ifndef GAMMA -char *LocateJRE(manifest_info *info); -void ExecJRE(char *jre, char **argv); -#endif -int UnsetEnv(char *name); - -#endif diff -r e0fb8a213650 -r 836a62f43af9 src/os/windows/vm/chaitin_windows.cpp --- a/src/os/windows/vm/chaitin_windows.cpp Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,78 +0,0 @@ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "opto/chaitin.hpp" -#include "opto/machnode.hpp" - -// Disallow the use of the frame pointer (EBP) for implicit null exceptions -// on win95/98. If we do not do this, the OS gets confused and gives a stack -// error. -void PhaseRegAlloc::pd_preallocate_hook() { -#ifndef _WIN64 - if (ImplicitNullChecks && !os::win32::is_nt()) { - for (uint block_num=1; block_num<_cfg._num_blocks; block_num++) { - Block *block = _cfg._blocks[block_num]; - - Node *block_end = block->end(); - if (block_end->is_MachNullCheck() && - block_end->as_Mach()->ideal_Opcode() != Op_Con) { - // The last instruction in the block is an implicit null check. - // Fix its input so that it does not load into the frame pointer. - _matcher.pd_implicit_null_fixup(block_end->in(1)->as_Mach(), - block_end->as_MachNullCheck()->_vidx); - } - } - } -#else - // WIN64==itanium on XP -#endif -} - -#ifdef ASSERT -// Verify that no implicit null check uses the frame pointer (EBP) as -// its register on win95/98. Use of the frame pointer in an implicit -// null check confuses the OS, yielding a stack error. -void PhaseRegAlloc::pd_postallocate_verify_hook() { -#ifndef _WIN64 - if (ImplicitNullChecks && !os::win32::is_nt()) { - for (uint block_num=1; block_num<_cfg._num_blocks; block_num++) { - Block *block = _cfg._blocks[block_num]; - - Node *block_end = block->_nodes[block->_nodes.size()-1]; - if (block_end->is_MachNullCheck() && block_end->as_Mach()->ideal_Opcode() != Op_Con) { - // The last instruction in the block is an implicit - // null check. Verify that this instruction does not - // use the frame pointer. - int reg = get_reg_first(block_end->in(1)->in(block_end->as_MachNullCheck()->_vidx)); - assert(reg != EBP_num, - "implicit null check using frame pointer on win95/98"); - } - } - } -#else - // WIN64==itanium on XP -#endif -} -#endif diff -r e0fb8a213650 -r 836a62f43af9 src/os/windows/vm/os_windows.cpp --- a/src/os/windows/vm/os_windows.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os/windows/vm/os_windows.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -813,15 +813,21 @@ return result; } -// For now, we say that Windows does not support vtime. I have no idea -// whether it can actually be made to (DLD, 9/13/05). - -bool os::supports_vtime() { return false; } +bool os::supports_vtime() { return true; } bool os::enable_vtime() { return false; } bool os::vtime_enabled() { return false; } + double os::elapsedVTime() { - // better than nothing, but not much - return elapsedTime(); + FILETIME created; + FILETIME exited; + FILETIME kernel; + FILETIME user; + if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { + // the resolution of windows_to_java_time() should be sufficient (ms) + return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; + } else { + return elapsedTime(); + } } jlong os::javaTimeMillis() { @@ -944,6 +950,8 @@ MINIDUMP_TYPE dumpType; static const char* cwd; +// Default is to always create dump for debug builds, on product builds only dump on server versions of Windows. +#ifndef ASSERT // If running on a client version of Windows and user has not explicitly enabled dumping if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) { VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false); @@ -953,6 +961,12 @@ VMError::report_coredump_status("Minidump has been disabled from the command line", false); return; } +#else + if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { + VMError::report_coredump_status("Minidump has been disabled from the command line", false); + return; + } +#endif dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); @@ -1004,7 +1018,21 @@ // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { - VMError::report_coredump_status("Call to MiniDumpWriteDump() failed", false); + DWORD error = GetLastError(); + LPTSTR msgbuf = NULL; + + if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) { + + jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf); + LocalFree(msgbuf); + } else { + // Call to FormatMessage failed, just include the result from GetLastError + jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error); + } + VMError::report_coredump_status(buffer, false); } else { VMError::report_coredump_status(buffer, true); } @@ -1221,8 +1249,10 @@ // Needs to be in os specific directory because windows requires another // header file -const char* os::get_current_directory(char *buf, int buflen) { - return _getcwd(buf, buflen); +const char* os::get_current_directory(char *buf, size_t buflen) { + int n = static_cast(buflen); + if (buflen > INT_MAX) n = INT_MAX; + return _getcwd(buf, n); } //----------------------------------------------------------- @@ -3322,7 +3352,7 @@ assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back } -class HighResolutionInterval { +class HighResolutionInterval : public CHeapObj { // The default timer resolution seems to be 10 milliseconds. // (Where is this written down?) // If someone wants to sleep for only a fraction of the default, @@ -4115,6 +4145,10 @@ return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); } +FILE* os::open(int fd, const char* mode) { + return ::_fdopen(fd, mode); +} + // Is a (classpath) directory empty? bool os::dir_is_empty(const char* path) { WIN32_FIND_DATA fd; @@ -5031,6 +5065,71 @@ return ::setsockopt(fd, level, optname, optval, optlen); } +// WINDOWS CONTEXT Flags for THREAD_SAMPLING +#if defined(IA32) +# define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) +#elif defined (AMD64) +# define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) +#endif + +// returns true if thread could be suspended, +// false otherwise +static bool do_suspend(HANDLE* h) { + if (h != NULL) { + if (SuspendThread(*h) != ~0) { + return true; + } + } + return false; +} + +// resume the thread +// calling resume on an active thread is a no-op +static void do_resume(HANDLE* h) { + if (h != NULL) { + ResumeThread(*h); + } +} + +// retrieve a suspend/resume context capable handle +// from the tid. Caller validates handle return value. +void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) { + if (h != NULL) { + *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); + } +} + +// +// Thread sampling implementation +// +void os::SuspendedThreadTask::internal_do_task() { + CONTEXT ctxt; + HANDLE h = NULL; + + // get context capable handle for thread + get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); + + // sanity + if (h == NULL || h == INVALID_HANDLE_VALUE) { + return; + } + + // suspend the thread + if (do_suspend(&h)) { + ctxt.ContextFlags = sampling_context_flags; + // get thread context + GetThreadContext(h, &ctxt); + SuspendedThreadTaskContext context(_thread, &ctxt); + // pass context to Thread Sampling impl + do_task(context); + // resume thread + do_resume(&h); + } + + // close handle + CloseHandle(h); +} + // Kernel32 API typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp --- a/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,10 +30,16 @@ // currently interrupted by SIGPROF bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) { + assert(Thread::current() == this, "caller must be current thread"); + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} - assert(Thread::current() == this, "caller must be current thread"); +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) { + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) { assert(this->is_Java_thread(), "must be JavaThread"); - JavaThread* jt = (JavaThread *)this; // If we have a last_Java_frame, then we should use it even if diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp --- a/src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,13 @@ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); + bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, + bool isInJava); + +private: + bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava); +public: + // These routines are only used on cpu architectures that // have separate register stacks (Itanium). static bool register_stack_overflow() { return false; } diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp --- a/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -178,7 +178,7 @@ // JVM needs to know exact stack location, abort if it fails if (rslt != 0) { if (rslt == ENOMEM) { - vm_exit_out_of_memory(0, "pthread_getattr_np"); + vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np"); } else { fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt)); } diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/linux_x86/vm/linux_x86_32.s --- a/src/os_cpu/linux_x86/vm/linux_x86_32.s Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/linux_x86/vm/linux_x86_32.s Wed Jun 19 10:45:56 2013 +0200 @@ -241,7 +241,7 @@ jbe 2f # <= 32 dwords rep; smovl jmp 4f - .=.+8 + .space 8 2: subl %esi,%edi .p2align 4,,15 3: movl (%esi),%edx @@ -378,7 +378,7 @@ rep; smovl jmp 4f # copy aligned dwords - .=.+5 + .space 5 2: subl %esi,%edi .p2align 4,,15 3: movl (%esi),%edx @@ -454,7 +454,7 @@ popl %edi popl %esi ret - .=.+10 + .space 10 2: subl %esi,%edi jmp 4f .p2align 4,,15 diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/linux_x86/vm/os_linux_x86.cpp --- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -93,6 +93,10 @@ register void *esp; __asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp)); return (address) ((char*)esp + sizeof(long)*2); +#elif defined(__clang__) + intptr_t* esp; + __asm__ __volatile__ ("mov %%"SPELL_REG_SP", %0":"=r"(esp):); + return (address) esp; #else register void *esp __asm__ (SPELL_REG_SP); return (address) esp; @@ -175,6 +179,9 @@ #ifdef SPARC_WORKS register intptr_t **ebp; __asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp)); +#elif defined(__clang__) + intptr_t **ebp; + __asm__ __volatile__ ("mov %%"SPELL_REG_FP", %0":"=r"(ebp):); #else register intptr_t **ebp __asm__ (SPELL_REG_FP); #endif @@ -710,7 +717,7 @@ // JVM needs to know exact stack location, abort if it fails if (rslt != 0) { if (rslt == ENOMEM) { - vm_exit_out_of_memory(0, "pthread_getattr_np"); + vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np"); } else { fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt)); } diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/linux_x86/vm/thread_linux_x86.cpp --- a/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,8 +32,15 @@ void* ucontext, bool isInJava) { assert(Thread::current() == this, "caller must be current thread"); + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) { + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) { assert(this->is_Java_thread(), "must be JavaThread"); - JavaThread* jt = (JavaThread *)this; // If we have a last_Java_frame, then we should use it even if diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/linux_x86/vm/thread_linux_x86.hpp --- a/src/os_cpu/linux_x86/vm/thread_linux_x86.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/linux_x86/vm/thread_linux_x86.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,11 @@ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); + bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava); +private: + bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava); +public: + // These routines are only used on cpu architectures that // have separate register stacks (Itanium). static bool register_stack_overflow() { return false; } diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/linux_zero/vm/os_linux_zero.cpp --- a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -313,7 +313,7 @@ int res = pthread_getattr_np(pthread_self(), &attr); if (res != 0) { if (res == ENOMEM) { - vm_exit_out_of_memory(0, "pthread_getattr_np"); + vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np"); } else { fatal(err_msg("pthread_getattr_np failed with errno = %d", res)); diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp --- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -194,6 +194,11 @@ return NULL; } +address os::Solaris::ucontext_get_pc(ucontext_t *uc) { + return (address) uc->uc_mcontext.gregs[REG_PC]; +} + + // For Forte Analyzer AsyncGetCallTrace profiling support - thread // is currently interrupted by SIGPROF. // @@ -265,22 +270,6 @@ } } - -void GetThreadPC_Callback::execute(OSThread::InterruptArguments *args) { - Thread* thread = args->thread(); - ucontext_t* uc = args->ucontext(); - intptr_t* sp; - - assert(ProfileVM && thread->is_VM_thread(), "just checking"); - - // Skip the mcontext corruption verification. If if occasionally - // things get corrupt, it is ok for profiling - we will just get an unresolved - // function name - ExtendedPC new_addr((address)uc->uc_mcontext.gregs[REG_PC]); - _addr = new_addr; -} - - static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) { char lwpstatusfile[PROCFILE_LENGTH]; int lwpfd, err; @@ -358,13 +347,8 @@ guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs"); if (sig == os::Solaris::SIGasync()) { - if (thread) { - OSThread::InterruptArguments args(thread, uc); - thread->osthread()->do_interrupt_callbacks_at_interrupt(&args); - return true; - } else if (vmthread) { - OSThread::InterruptArguments args(vmthread, uc); - vmthread->osthread()->do_interrupt_callbacks_at_interrupt(&args); + if (thread || vmthread) { + OSThread::SR_handler(t, uc); return true; } else if (os::Solaris::chained_handler(sig, info, ucVoid)) { return true; @@ -591,7 +575,7 @@ // on the thread stack, which could get a mapping error when touched. address addr = (address) info->si_addr; if (sig == SIGBUS && info->si_code == BUS_OBJERR && info->si_errno == ENOMEM) { - vm_exit_out_of_memory(0, "Out of swap space to map in thread stack."); + vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "Out of swap space to map in thread stack."); } VMError err(t, sig, pc, info, ucVoid); diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp --- a/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,11 +36,21 @@ void* ucontext, bool isInJava) { assert(Thread::current() == this, "caller must be current thread"); + return pd_get_top_frame(fr_addr, ucontext, isInJava, true); +} + +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) { + // get ucontext somehow + return pd_get_top_frame(fr_addr, ucontext, isInJava, false); +} + +bool JavaThread::pd_get_top_frame(frame* fr_addr, + void* ucontext, bool isInJava, bool makeWalkable) { assert(this->is_Java_thread(), "must be JavaThread"); JavaThread* jt = (JavaThread *)this; - if (!isInJava) { + if (!isInJava && makeWalkable) { // make_walkable flushes register windows and grabs last_Java_pc // which can not be done if the ucontext sp matches last_Java_sp // stack walking utilities assume last_Java_pc set if marked flushed diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.hpp --- a/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,6 +93,11 @@ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); + bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava); +private: + bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava, bool makeWalkable); +public: + // These routines are only used on cpu architectures that // have separate register stacks (Itanium). static bool register_stack_overflow() { return false; } diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp --- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -183,6 +183,10 @@ return (intptr_t*)uc->uc_mcontext.gregs[REG_FP]; } +address os::Solaris::ucontext_get_pc(ucontext_t *uc) { + return (address) uc->uc_mcontext.gregs[REG_PC]; +} + // For Forte Analyzer AsyncGetCallTrace profiling support - thread // is currently interrupted by SIGPROF. // @@ -252,22 +256,6 @@ } } -// This is a simple callback that just fetches a PC for an interrupted thread. -// The thread need not be suspended and the fetched PC is just a hint. -// This one is currently used for profiling the VMThread ONLY! - -// Must be synchronous -void GetThreadPC_Callback::execute(OSThread::InterruptArguments *args) { - Thread* thread = args->thread(); - ucontext_t* uc = args->ucontext(); - intptr_t* sp; - - assert(ProfileVM && thread->is_VM_thread(), "just checking"); - - ExtendedPC new_addr((address)uc->uc_mcontext.gregs[REG_PC]); - _addr = new_addr; -} - static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) { char lwpstatusfile[PROCFILE_LENGTH]; int lwpfd, err; @@ -419,14 +407,8 @@ guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs"); if (sig == os::Solaris::SIGasync()) { - if(thread){ - OSThread::InterruptArguments args(thread, uc); - thread->osthread()->do_interrupt_callbacks_at_interrupt(&args); - return true; - } - else if(vmthread){ - OSThread::InterruptArguments args(vmthread, uc); - vmthread->osthread()->do_interrupt_callbacks_at_interrupt(&args); + if(thread || vmthread){ + OSThread::SR_handler(t, uc); return true; } else if (os::Solaris::chained_handler(sig, info, ucVoid)) { return true; @@ -745,7 +727,7 @@ // on the thread stack, which could get a mapping error when touched. address addr = (address) info->si_addr; if (sig == SIGBUS && info->si_code == BUS_OBJERR && info->si_errno == ENOMEM) { - vm_exit_out_of_memory(0, "Out of swap space to map in thread stack."); + vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "Out of swap space to map in thread stack."); } VMError err(t, sig, pc, info, ucVoid); diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp --- a/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,8 +30,17 @@ // currently interrupted by SIGPROF bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) { + assert(Thread::current() == this, "caller must be current thread"); + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} - assert(Thread::current() == this, "caller must be current thread"); +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, + void* ucontext, bool isInJava) { + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame(frame* fr_addr, + void* ucontext, bool isInJava) { assert(this->is_Java_thread(), "must be JavaThread"); JavaThread* jt = (JavaThread *)this; diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp --- a/src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,6 +54,12 @@ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); + bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, + bool isInJava); +private: + bool pd_get_top_frame(frame* fr_addr, void* ucontext, + bool isInJava); +public: // These routines are only used on cpu architectures that // have separate register stacks (Itanium). diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/windows_x86/vm/thread_windows_x86.cpp --- a/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,15 @@ void* ucontext, bool isInJava) { assert(Thread::current() == this, "caller must be current thread"); + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) { + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) { + assert(this->is_Java_thread(), "must be JavaThread"); JavaThread* jt = (JavaThread *)this; @@ -87,4 +96,3 @@ } void JavaThread::cache_global_variables() { } - diff -r e0fb8a213650 -r 836a62f43af9 src/os_cpu/windows_x86/vm/thread_windows_x86.hpp --- a/src/os_cpu/windows_x86/vm/thread_windows_x86.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/os_cpu/windows_x86/vm/thread_windows_x86.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,6 +58,12 @@ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); + bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava); + +private: + bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava); + + public: // These routines are only used on cpu architectures that // have separate register stacks (Itanium). static bool register_stack_overflow() { return false; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/tools/ProjectCreator/BuildConfig.java --- a/src/share/tools/ProjectCreator/BuildConfig.java Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/tools/ProjectCreator/BuildConfig.java Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,25 +65,7 @@ String sourceBase = getFieldString(null, "SourceBase"); String buildSpace = getFieldString(null, "BuildSpace"); String outDir = buildBase; - String value = System.getenv("OUT_DIR"); - if (value != null) { - outDir = value; - } - - int lastDirectorySeparator = Math.max(outDir.lastIndexOf("/"), outDir.lastIndexOf("\\")); - if (lastDirectorySeparator >= 0) { - outDir = outDir.substring(0, lastDirectorySeparator); - } - - outDir += Util.sep + build + Util.sep + "jre" + Util.sep + "bin"; - if (flavour.equals("graal")) { - outDir += Util.sep + "graal"; - } else if (flavour.equals("compiler1")) { - outDir += Util.sep + "client"; - } else { - outDir += Util.sep + "server"; - } - buildBase = outDir; + String jdkTargetRoot = getFieldString(null, "JdkTargetRoot"); put("Id", flavourBuild); put("OutputDir", outDir); @@ -91,6 +73,7 @@ put("BuildBase", buildBase); put("BuildSpace", buildSpace); put("OutputDll", outDir + Util.sep + outDll); + put("JdkTargetRoot", jdkTargetRoot); context = new String [] {flavourBuild, flavour, build, null}; } @@ -181,7 +164,7 @@ sysDefines.add("_WINDOWS"); sysDefines.add("HOTSPOT_BUILD_USER=\\\""+System.getProperty("user.name")+"\\\""); sysDefines.add("HOTSPOT_BUILD_TARGET=\\\""+get("Build")+"\\\""); - sysDefines.add("INCLUDE_TRACE"); + sysDefines.add("INCLUDE_TRACE=1"); sysDefines.add("_JNI_IMPLEMENTATION_"); if (vars.get("PlatformName").equals("Win32")) { sysDefines.add("HOTSPOT_LIB_ARCH=\\\"i386\\\""); diff -r e0fb8a213650 -r 836a62f43af9 src/share/tools/ProjectCreator/WinGammaPlatformVC10.java --- a/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java Wed Jun 19 10:45:56 2013 +0200 @@ -98,11 +98,6 @@ tagV(cfg.getV("LinkerFlags")); endTag(); - startTag("PostBuildEvent"); - tagData("Message", BuildConfig.getFieldString(null, "PostbuildDescription")); - tagData("Command", cfg.expandFormat(BuildConfig.getFieldString(null, "PostbuildCommand").replace("\t", "\r\n"))); - endTag(); - startTag("PreLinkEvent"); tagData("Message", BuildConfig.getFieldString(null, "PrelinkDescription")); tagData("Command", cfg.expandFormat(BuildConfig.getFieldString(null, "PrelinkCommand").replace("\t", "\r\n"))); @@ -141,7 +136,9 @@ for (BuildConfig cfg : allConfigs) { startTag(cfg, "PropertyGroup"); - tagData("LocalDebuggerCommand", "$(TargetDir)/hotspot.exe"); + tagData("LocalDebuggerCommand", cfg.get("JdkTargetRoot") + "\\bin\\java.exe"); + tagData("LocalDebuggerCommandArguments", "-XXaltjvm=$(TargetDir) -Dsun.java.launcher=gamma"); + tagData("LocalDebuggerEnvironment", "JAVA_HOME=" + cfg.get("JdkTargetRoot")); endTag(); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/tools/hsdis/hsdis.c --- a/src/share/tools/hsdis/hsdis.c Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/tools/hsdis/hsdis.c Wed Jun 19 10:45:56 2013 +0200 @@ -27,6 +27,7 @@ HotSpot PrintAssembly option. */ +#include /* required by bfd.h */ #include #include #include diff -r e0fb8a213650 -r 836a62f43af9 src/share/tools/launcher/java.c --- a/src/share/tools/launcher/java.c Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,2080 +0,0 @@ -/* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -/* - * Gamma (Hotspot internal engineering test) launcher based on 6.0u22 JDK, - * search "GAMMA" for gamma specific changes. - * - * GAMMA: gamma launcher is much simpler than regular java launcher in that - * JVM is either statically linked in or it is installed in the - * same directory where the launcher exists, so we don't have to - * worry about choosing the right JVM based on command line flag, jar - * file and/or ergonomics. Intead of removing unused logic from source - * they are commented out with #ifndef GAMMA, hopefully it'll be easier - * to maintain this file in sync with regular JDK launcher. - */ - -/* - * Shared source for 'java' command line tool. - * - * If JAVA_ARGS is defined, then acts as a launcher for applications. For - * instance, the JDK command line tools such as javac and javadoc (see - * makefiles for more details) are built with this program. Any arguments - * prefixed with '-J' will be passed directly to the 'java' command. - */ - -#ifdef GAMMA -# ifdef JAVA_ARGS -# error Do NOT define JAVA_ARGS when building gamma launcher -# endif -# if !defined(LINK_INTO_AOUT) && !defined(LINK_INTO_LIBJVM) -# error Either LINK_INTO_AOUT or LINK_INTO_LIBJVM must be defined -# endif -#endif - -/* - * One job of the launcher is to remove command line options which the - * vm does not understand and will not process. These options include - * options which select which style of vm is run (e.g. -client and - * -server) as well as options which select the data model to use. - * Additionally, for tools which invoke an underlying vm "-J-foo" - * options are turned into "-foo" options to the vm. This option - * filtering is handled in a number of places in the launcher, some of - * it in machine-dependent code. In this file, the function - * CheckJVMType removes vm style options and TranslateApplicationArgs - * removes "-J" prefixes. On unix platforms, the - * CreateExecutionEnvironment function from the unix java_md.c file - * processes and removes -d options. However, in case - * CreateExecutionEnvironment does not need to exec because - * LD_LIBRARY_PATH is set acceptably and the data model does not need - * to be changed, ParseArguments will screen out the redundant -d - * options and prevent them from being passed to the vm; this is done - * by using the machine-dependent call - * RemovableMachineDependentOption. - */ - -#include -#include -#include - -#include -#include -#include "java.h" -#ifndef GAMMA -#include "manifest_info.h" -#include "version_comp.h" -#include "splashscreen.h" -#endif -#include "wildcard.h" - -#ifndef FULL_VERSION -#define FULL_VERSION JDK_MAJOR_VERSION "." JDK_MINOR_VERSION -#endif - -/* - * The following environment variable is used to influence the behavior - * of the jre exec'd through the SelectVersion routine. The command line - * options which specify the version are not passed to the exec'd version, - * because that jre may be an older version which wouldn't recognize them. - * This environment variable is known to this (and later) version and serves - * to suppress the version selection code. This is not only for efficiency, - * but also for correctness, since any command line options have been - * removed which would cause any value found in the manifest to be used. - * This would be incorrect because the command line options are defined - * to take precedence. - * - * The value associated with this environment variable is the MainClass - * name from within the executable jar file (if any). This is strictly a - * performance enhancement to avoid re-reading the jar file manifest. - * - * A NOTE TO DEVELOPERS: For performance reasons it is important that - * the program image remain relatively small until after SelectVersion - * CreateExecutionEnvironment have finished their possibly recursive - * processing. Watch everything, but resist all temptations to use Java - * interfaces. - */ -#define ENV_ENTRY "_JAVA_VERSION_SET" - -#ifndef GAMMA -#define SPLASH_FILE_ENV_ENTRY "_JAVA_SPLASH_FILE" -#define SPLASH_JAR_ENV_ENTRY "_JAVA_SPLASH_JAR" -#endif - -static jboolean printVersion = JNI_FALSE; /* print and exit */ -static jboolean showVersion = JNI_FALSE; /* print but continue */ -static char *progname; -jboolean _launcher_debug = JNI_FALSE; - -#ifndef GAMMA -/* - * Entries for splash screen environment variables. - * putenv is performed in SelectVersion. We need - * them in memory until UnsetEnv, so they are made static - * global instead of auto local. - */ -static char* splash_file_entry = NULL; -static char* splash_jar_entry = NULL; -#endif - -/* - * List of VM options to be specified when the VM is created. - */ -static JavaVMOption *options; -static int numOptions, maxOptions; - -/* - * Prototypes for functions internal to launcher. - */ -static void SetClassPath(const char *s); -static void SelectVersion(int argc, char **argv, char **main_class); -static jboolean ParseArguments(int *pargc, char ***pargv, char **pjarfile, - char **pclassname, int *pret, const char *jvmpath); -static jboolean InitializeJVM(JavaVM **pvm, JNIEnv **penv, - InvocationFunctions *ifn); -static jstring NewPlatformString(JNIEnv *env, char *s); -static jobjectArray NewPlatformStringArray(JNIEnv *env, char **strv, int strc); -static jclass LoadClass(JNIEnv *env, char *name); -static jstring GetMainClassName(JNIEnv *env, char *jarname); -static void SetJavaCommandLineProp(char* classname, char* jarfile, int argc, char** argv); -static void SetJavaLauncherProp(void); - -#ifdef JAVA_ARGS -static void TranslateApplicationArgs(int *pargc, char ***pargv); -static jboolean AddApplicationOptions(void); -#endif - -static void PrintJavaVersion(JNIEnv *env); -static void PrintUsage(void); -static jint PrintXUsage(const char *jvmpath); - -static void SetPaths(int argc, char **argv); - -#ifndef GAMMA - -/* Maximum supported entries from jvm.cfg. */ -#define INIT_MAX_KNOWN_VMS 10 -/* Values for vmdesc.flag */ -#define VM_UNKNOWN -1 -#define VM_KNOWN 0 -#define VM_ALIASED_TO 1 -#define VM_WARN 2 -#define VM_ERROR 3 -#define VM_IF_SERVER_CLASS 4 -#define VM_IGNORE 5 -struct vmdesc { - char *name; - int flag; - char *alias; - char *server_class; -}; -static struct vmdesc *knownVMs = NULL; -static int knownVMsCount = 0; -static int knownVMsLimit = 0; - -static void GrowKnownVMs(); -static int KnownVMIndex(const char* name); -static void FreeKnownVMs(); -static void ShowSplashScreen(); - -#endif /* ifndef GAMMA */ - -jboolean ServerClassMachine(); - -/* flag which if set suppresses error messages from the launcher */ -static int noExitErrorMessage = 0; - -/* - * Running Java code in primordial thread caused many problems. We will - * create a new thread to invoke JVM. See 6316197 for more information. - */ -static jlong threadStackSize = 0; /* stack size of the new thread */ - -int JNICALL JavaMain(void * args); /* entry point */ - -struct JavaMainArgs { - int argc; - char ** argv; - char * jarfile; - char * classname; - InvocationFunctions ifn; -}; - -/* - * Entry point. - */ -int -main(int argc, char ** argv) -{ - char *jarfile = 0; - char *classname = 0; - char *s = 0; - char *main_class = NULL; - int ret; - InvocationFunctions ifn; - jlong start, end; - char jrepath[MAXPATHLEN], jvmpath[MAXPATHLEN]; - char ** original_argv = argv; - - if (getenv("_JAVA_LAUNCHER_DEBUG") != 0) { - _launcher_debug = JNI_TRUE; - printf("----_JAVA_LAUNCHER_DEBUG----\n"); - } - -#ifndef GAMMA - /* - * Make sure the specified version of the JRE is running. - * - * There are three things to note about the SelectVersion() routine: - * 1) If the version running isn't correct, this routine doesn't - * return (either the correct version has been exec'd or an error - * was issued). - * 2) Argc and Argv in this scope are *not* altered by this routine. - * It is the responsibility of subsequent code to ignore the - * arguments handled by this routine. - * 3) As a side-effect, the variable "main_class" is guaranteed to - * be set (if it should ever be set). This isn't exactly the - * poster child for structured programming, but it is a small - * price to pay for not processing a jar file operand twice. - * (Note: This side effect has been disabled. See comment on - * bugid 5030265 below.) - */ - SelectVersion(argc, argv, &main_class); -#endif /* ifndef GAMMA */ - - /* copy original argv */ - { - int i; - original_argv = (char**)JLI_MemAlloc(sizeof(char*)*(argc+1)); - for(i = 0; i < argc+1; i++) - original_argv[i] = argv[i]; - } - - CreateExecutionEnvironment(&argc, &argv, - jrepath, sizeof(jrepath), - jvmpath, sizeof(jvmpath), - original_argv); - - printf("Using java runtime at: %s\n", jrepath); - - ifn.CreateJavaVM = 0; - ifn.GetDefaultJavaVMInitArgs = 0; - - if (_launcher_debug) - start = CounterGet(); - if (!LoadJavaVM(jvmpath, &ifn)) { - exit(6); - } - if (_launcher_debug) { - end = CounterGet(); - printf("%ld micro seconds to LoadJavaVM\n", - (long)(jint)Counter2Micros(end-start)); - } - -#ifdef JAVA_ARGS /* javac, jar and friends. */ - progname = "java"; -#else /* java, oldjava, javaw and friends */ -#ifdef PROGNAME - progname = PROGNAME; -#else - progname = *argv; - if ((s = strrchr(progname, FILE_SEPARATOR)) != 0) { - progname = s + 1; - } -#endif /* PROGNAME */ -#endif /* JAVA_ARGS */ - ++argv; - --argc; - -#ifdef JAVA_ARGS - /* Preprocess wrapper arguments */ - TranslateApplicationArgs(&argc, &argv); - if (!AddApplicationOptions()) { - exit(1); - } -#endif - - /* Set default CLASSPATH */ - if ((s = getenv("CLASSPATH")) == 0) { - s = "."; - } -#ifndef JAVA_ARGS - SetClassPath(s); -#endif - - /* - * Parse command line options; if the return value of - * ParseArguments is false, the program should exit. - */ - if (!ParseArguments(&argc, &argv, &jarfile, &classname, &ret, jvmpath)) { - exit(ret); - } - - /* Override class path if -jar flag was specified */ - if (jarfile != 0) { - SetClassPath(jarfile); - } - - /* set the -Dsun.java.command pseudo property */ - SetJavaCommandLineProp(classname, jarfile, argc, argv); - - /* Set the -Dsun.java.launcher pseudo property */ - SetJavaLauncherProp(); - - /* set the -Dsun.java.launcher.* platform properties */ - SetJavaLauncherPlatformProps(); - -#ifndef GAMMA - /* Show the splash screen if needed */ - ShowSplashScreen(); -#endif - - /* - * Done with all command line processing and potential re-execs so - * clean up the environment. - */ - (void)UnsetEnv(ENV_ENTRY); -#ifndef GAMMA - (void)UnsetEnv(SPLASH_FILE_ENV_ENTRY); - (void)UnsetEnv(SPLASH_JAR_ENV_ENTRY); - - JLI_MemFree(splash_jar_entry); - JLI_MemFree(splash_file_entry); -#endif - - /* - * If user doesn't specify stack size, check if VM has a preference. - * Note that HotSpot no longer supports JNI_VERSION_1_1 but it will - * return its default stack size through the init args structure. - */ - if (threadStackSize == 0) { - struct JDK1_1InitArgs args1_1; - memset((void*)&args1_1, 0, sizeof(args1_1)); - args1_1.version = JNI_VERSION_1_1; - ifn.GetDefaultJavaVMInitArgs(&args1_1); /* ignore return value */ - if (args1_1.javaStackSize > 0) { - threadStackSize = args1_1.javaStackSize; - } - } - - { /* Create a new thread to create JVM and invoke main method */ - struct JavaMainArgs args; - - args.argc = argc; - args.argv = argv; - args.jarfile = jarfile; - args.classname = classname; - args.ifn = ifn; - - return ContinueInNewThread(JavaMain, threadStackSize, (void*)&args); - } -} - -int JNICALL -JavaMain(void * _args) -{ - struct JavaMainArgs *args = (struct JavaMainArgs *)_args; - int argc = args->argc; - char **argv = args->argv; - char *jarfile = args->jarfile; - char *classname = args->classname; - InvocationFunctions ifn = args->ifn; - - JavaVM *vm = 0; - JNIEnv *env = 0; - jstring mainClassName; - jclass mainClass; - jmethodID mainID; - jobjectArray mainArgs; - int ret = 0; - jlong start, end; - - /* - * Error message to print or display; by default the message will - * only be displayed in a window. - */ - char * message = "Fatal exception occurred. Program will exit."; - jboolean messageDest = JNI_FALSE; - - /* Initialize the virtual machine */ - - if (_launcher_debug) - start = CounterGet(); - if (!InitializeJVM(&vm, &env, &ifn)) { - ReportErrorMessage("Could not create the Java virtual machine.", - JNI_TRUE); - exit(1); - } - - if (printVersion || showVersion) { - PrintJavaVersion(env); - if ((*env)->ExceptionOccurred(env)) { - ReportExceptionDescription(env); - goto leave; - } - if (printVersion) { - ret = 0; - message = NULL; - goto leave; - } - if (showVersion) { - fprintf(stderr, "\n"); - } - } - - /* If the user specified neither a class name nor a JAR file */ - if (jarfile == 0 && classname == 0) { - PrintUsage(); - message = NULL; - goto leave; - } - -#ifndef GAMMA - FreeKnownVMs(); /* after last possible PrintUsage() */ -#endif - - if (_launcher_debug) { - end = CounterGet(); - printf("%ld micro seconds to InitializeJVM\n", - (long)(jint)Counter2Micros(end-start)); - } - - /* At this stage, argc/argv have the applications' arguments */ - if (_launcher_debug) { - int i = 0; - printf("Main-Class is '%s'\n", classname ? classname : ""); - printf("Apps' argc is %d\n", argc); - for (; i < argc; i++) { - printf(" argv[%2d] = '%s'\n", i, argv[i]); - } - } - - ret = 1; - - /* - * Get the application's main class. - * - * See bugid 5030265. The Main-Class name has already been parsed - * from the manifest, but not parsed properly for UTF-8 support. - * Hence the code here ignores the value previously extracted and - * uses the pre-existing code to reextract the value. This is - * possibly an end of release cycle expedient. However, it has - * also been discovered that passing some character sets through - * the environment has "strange" behavior on some variants of - * Windows. Hence, maybe the manifest parsing code local to the - * launcher should never be enhanced. - * - * Hence, future work should either: - * 1) Correct the local parsing code and verify that the - * Main-Class attribute gets properly passed through - * all environments, - * 2) Remove the vestages of maintaining main_class through - * the environment (and remove these comments). - */ - if (jarfile != 0) { - mainClassName = GetMainClassName(env, jarfile); - if ((*env)->ExceptionOccurred(env)) { - ReportExceptionDescription(env); - goto leave; - } - if (mainClassName == NULL) { - const char * format = "Failed to load Main-Class manifest " - "attribute from\n%s"; - message = (char*)JLI_MemAlloc((strlen(format) + strlen(jarfile)) * - sizeof(char)); - sprintf(message, format, jarfile); - messageDest = JNI_TRUE; - goto leave; - } - classname = (char *)(*env)->GetStringUTFChars(env, mainClassName, 0); - if (classname == NULL) { - ReportExceptionDescription(env); - goto leave; - } - mainClass = LoadClass(env, classname); - if(mainClass == NULL) { /* exception occured */ - const char * format = "Could not find the main class: %s. Program will exit."; - ReportExceptionDescription(env); - message = (char *)JLI_MemAlloc((strlen(format) + - strlen(classname)) * sizeof(char) ); - messageDest = JNI_TRUE; - sprintf(message, format, classname); - goto leave; - } - (*env)->ReleaseStringUTFChars(env, mainClassName, classname); - } else { - mainClassName = NewPlatformString(env, classname); - if (mainClassName == NULL) { - const char * format = "Failed to load Main Class: %s"; - message = (char *)JLI_MemAlloc((strlen(format) + strlen(classname)) * - sizeof(char) ); - sprintf(message, format, classname); - messageDest = JNI_TRUE; - goto leave; - } - classname = (char *)(*env)->GetStringUTFChars(env, mainClassName, 0); - if (classname == NULL) { - ReportExceptionDescription(env); - goto leave; - } - mainClass = LoadClass(env, classname); - if(mainClass == NULL) { /* exception occured */ - const char * format = "Could not find the main class: %s. Program will exit."; - ReportExceptionDescription(env); - message = (char *)JLI_MemAlloc((strlen(format) + - strlen(classname)) * sizeof(char) ); - messageDest = JNI_TRUE; - sprintf(message, format, classname); - goto leave; - } - (*env)->ReleaseStringUTFChars(env, mainClassName, classname); - } - - /* Get the application's main method */ - mainID = (*env)->GetStaticMethodID(env, mainClass, "main", - "([Ljava/lang/String;)V"); - if (mainID == NULL) { - if ((*env)->ExceptionOccurred(env)) { - ReportExceptionDescription(env); - } else { - message = "No main method found in specified class."; - messageDest = JNI_TRUE; - } - goto leave; - } - - { /* Make sure the main method is public */ - jint mods; - jmethodID mid; - jobject obj = (*env)->ToReflectedMethod(env, mainClass, - mainID, JNI_TRUE); - - if( obj == NULL) { /* exception occurred */ - ReportExceptionDescription(env); - goto leave; - } - - mid = - (*env)->GetMethodID(env, - (*env)->GetObjectClass(env, obj), - "getModifiers", "()I"); - if ((*env)->ExceptionOccurred(env)) { - ReportExceptionDescription(env); - goto leave; - } - - mods = (*env)->CallIntMethod(env, obj, mid); - if ((mods & 1) == 0) { /* if (!Modifier.isPublic(mods)) ... */ - message = "Main method not public."; - messageDest = JNI_TRUE; - goto leave; - } - } - - /* Build argument array */ - mainArgs = NewPlatformStringArray(env, argv, argc); - if (mainArgs == NULL) { - ReportExceptionDescription(env); - goto leave; - } - - /* Invoke main method. */ - (*env)->CallStaticVoidMethod(env, mainClass, mainID, mainArgs); - - /* - * The launcher's exit code (in the absence of calls to - * System.exit) will be non-zero if main threw an exception. - */ - ret = (*env)->ExceptionOccurred(env) == NULL ? 0 : 1; - - /* - * Detach the main thread so that it appears to have ended when - * the application's main method exits. This will invoke the - * uncaught exception handler machinery if main threw an - * exception. An uncaught exception handler cannot change the - * launcher's return code except by calling System.exit. - */ - if ((*vm)->DetachCurrentThread(vm) != 0) { - message = "Could not detach main thread."; - messageDest = JNI_TRUE; - ret = 1; - goto leave; - } - - message = NULL; - - leave: - /* - * Wait for all non-daemon threads to end, then destroy the VM. - * This will actually create a trivial new Java waiter thread - * named "DestroyJavaVM", but this will be seen as a different - * thread from the one that executed main, even though they are - * the same C thread. This allows mainThread.join() and - * mainThread.isAlive() to work as expected. - */ - (*vm)->DestroyJavaVM(vm); - - if(message != NULL && !noExitErrorMessage) - ReportErrorMessage(message, messageDest); - return ret; -} - -#ifndef GAMMA -/* - * Checks the command line options to find which JVM type was - * specified. If no command line option was given for the JVM type, - * the default type is used. The environment variable - * JDK_ALTERNATE_VM and the command line option -XXaltjvm= are also - * checked as ways of specifying which JVM type to invoke. - */ -char * -CheckJvmType(int *pargc, char ***argv, jboolean speculative) { - int i, argi; - int argc; - char **newArgv; - int newArgvIdx = 0; - int isVMType; - int jvmidx = -1; - char *jvmtype = getenv("JDK_ALTERNATE_VM"); - - argc = *pargc; - - /* To make things simpler we always copy the argv array */ - newArgv = JLI_MemAlloc((argc + 1) * sizeof(char *)); - - /* The program name is always present */ - newArgv[newArgvIdx++] = (*argv)[0]; - - for (argi = 1; argi < argc; argi++) { - char *arg = (*argv)[argi]; - isVMType = 0; - -#ifdef JAVA_ARGS - if (arg[0] != '-') { - newArgv[newArgvIdx++] = arg; - continue; - } -#else - if (strcmp(arg, "-classpath") == 0 || - strcmp(arg, "-cp") == 0) { - newArgv[newArgvIdx++] = arg; - argi++; - if (argi < argc) { - newArgv[newArgvIdx++] = (*argv)[argi]; - } - continue; - } - if (arg[0] != '-') break; -#endif - - /* Did the user pass an explicit VM type? */ - i = KnownVMIndex(arg); - if (i >= 0) { - jvmtype = knownVMs[jvmidx = i].name + 1; /* skip the - */ - isVMType = 1; - *pargc = *pargc - 1; - } - - /* Did the user specify an "alternate" VM? */ - else if (strncmp(arg, "-XXaltjvm=", 10) == 0 || strncmp(arg, "-J-XXaltjvm=", 12) == 0) { - isVMType = 1; - jvmtype = arg+((arg[1]=='X')? 10 : 12); - jvmidx = -1; - } - - if (!isVMType) { - newArgv[newArgvIdx++] = arg; - } - } - - /* - * Finish copying the arguments if we aborted the above loop. - * NOTE that if we aborted via "break" then we did NOT copy the - * last argument above, and in addition argi will be less than - * argc. - */ - while (argi < argc) { - newArgv[newArgvIdx++] = (*argv)[argi]; - argi++; - } - - /* argv is null-terminated */ - newArgv[newArgvIdx] = 0; - - /* Copy back argv */ - *argv = newArgv; - *pargc = newArgvIdx; - - /* use the default VM type if not specified (no alias processing) */ - if (jvmtype == NULL) { - char* result = knownVMs[0].name+1; - /* Use a different VM type if we are on a server class machine? */ - if ((knownVMs[0].flag == VM_IF_SERVER_CLASS) && - (ServerClassMachine() == JNI_TRUE)) { - result = knownVMs[0].server_class+1; - } - if (_launcher_debug) { - printf("Default VM: %s\n", result); - } - return result; - } - - /* if using an alternate VM, no alias processing */ - if (jvmidx < 0) - return jvmtype; - - /* Resolve aliases first */ - { - int loopCount = 0; - while (knownVMs[jvmidx].flag == VM_ALIASED_TO) { - int nextIdx = KnownVMIndex(knownVMs[jvmidx].alias); - - if (loopCount > knownVMsCount) { - if (!speculative) { - ReportErrorMessage("Error: Corrupt jvm.cfg file; cycle in alias list.", - JNI_TRUE); - exit(1); - } else { - return "ERROR"; - /* break; */ - } - } - - if (nextIdx < 0) { - if (!speculative) { - ReportErrorMessage2("Error: Unable to resolve VM alias %s", - knownVMs[jvmidx].alias, JNI_TRUE); - exit(1); - } else { - return "ERROR"; - } - } - jvmidx = nextIdx; - jvmtype = knownVMs[jvmidx].name+1; - loopCount++; - } - } - - switch (knownVMs[jvmidx].flag) { - case VM_WARN: - if (!speculative) { - fprintf(stderr, "Warning: %s VM not supported; %s VM will be used\n", - jvmtype, knownVMs[0].name + 1); - } - /* fall through */ - case VM_IGNORE: - jvmtype = knownVMs[jvmidx=0].name + 1; - /* fall through */ - case VM_KNOWN: - break; - case VM_ERROR: - if (!speculative) { - ReportErrorMessage2("Error: %s VM not supported", jvmtype, JNI_TRUE); - exit(1); - } else { - return "ERROR"; - } - } - - return jvmtype; -} -#endif /* ifndef GAMMA */ - -# define KB (1024UL) -# define MB (1024UL * KB) -# define GB (1024UL * MB) - -/* copied from HotSpot function "atomll()" */ -static int -parse_stack_size(const char *s, jlong *result) { - jlong n = 0; - int args_read = sscanf(s, JLONG_FORMAT, &n); - if (args_read != 1) { - return 0; - } - while (*s != '\0' && *s >= '0' && *s <= '9') { - s++; - } - // 4705540: illegal if more characters are found after the first non-digit - if (strlen(s) > 1) { - return 0; - } - switch (*s) { - case 'T': case 't': - *result = n * GB * KB; - return 1; - case 'G': case 'g': - *result = n * GB; - return 1; - case 'M': case 'm': - *result = n * MB; - return 1; - case 'K': case 'k': - *result = n * KB; - return 1; - case '\0': - *result = n; - return 1; - default: - /* Create JVM with default stack and let VM handle malformed -Xss string*/ - return 0; - } -} - -/* - * Adds a new VM option with the given given name and value. - */ -void -AddOption(char *str, void *info) -{ - /* - * Expand options array if needed to accommodate at least one more - * VM option. - */ - if (numOptions >= maxOptions) { - if (options == 0) { - maxOptions = 4; - options = JLI_MemAlloc(maxOptions * sizeof(JavaVMOption)); - } else { - JavaVMOption *tmp; - maxOptions *= 2; - tmp = JLI_MemAlloc(maxOptions * sizeof(JavaVMOption)); - memcpy(tmp, options, numOptions * sizeof(JavaVMOption)); - JLI_MemFree(options); - options = tmp; - } - } - options[numOptions].optionString = str; - options[numOptions++].extraInfo = info; - - if (strncmp(str, "-Xss", 4) == 0) { - jlong tmp; - if (parse_stack_size(str + 4, &tmp)) { - threadStackSize = tmp; - } - } -} - -static void -SetClassPath(const char *s) -{ - char *def; - s = JLI_WildcardExpandClasspath(s); - def = JLI_MemAlloc(strlen(s) + 40); - sprintf(def, "-Djava.class.path=%s", s); - AddOption(def, NULL); -} - -#ifndef GAMMA -/* - * The SelectVersion() routine ensures that an appropriate version of - * the JRE is running. The specification for the appropriate version - * is obtained from either the manifest of a jar file (preferred) or - * from command line options. - * The routine also parses splash screen command line options and - * passes on their values in private environment variables. - */ -static void -SelectVersion(int argc, char **argv, char **main_class) -{ - char *arg; - char **new_argv; - char **new_argp; - char *operand; - char *version = NULL; - char *jre = NULL; - int jarflag = 0; - int headlessflag = 0; - int restrict_search = -1; /* -1 implies not known */ - manifest_info info; - char env_entry[MAXNAMELEN + 24] = ENV_ENTRY "="; - char *splash_file_name = NULL; - char *splash_jar_name = NULL; - char *env_in; - int res; - - /* - * If the version has already been selected, set *main_class - * with the value passed through the environment (if any) and - * simply return. - */ - if ((env_in = getenv(ENV_ENTRY)) != NULL) { - if (*env_in != '\0') - *main_class = JLI_StringDup(env_in); - return; - } - - /* - * Scan through the arguments for options relevant to multiple JRE - * support. For reference, the command line syntax is defined as: - * - * SYNOPSIS - * java [options] class [argument...] - * - * java [options] -jar file.jar [argument...] - * - * As the scan is performed, make a copy of the argument list with - * the version specification options (new to 1.5) removed, so that - * a version less than 1.5 can be exec'd. - * - * Note that due to the syntax of the native Windows interface - * CreateProcess(), processing similar to the following exists in - * the Windows platform specific routine ExecJRE (in java_md.c). - * Changes here should be reproduced there. - */ - new_argv = JLI_MemAlloc((argc + 1) * sizeof(char*)); - new_argv[0] = argv[0]; - new_argp = &new_argv[1]; - argc--; - argv++; - while ((arg = *argv) != 0 && *arg == '-') { - if (strncmp(arg, "-version:", 9) == 0) { - version = arg + 9; - } else if (strcmp(arg, "-jre-restrict-search") == 0) { - restrict_search = 1; - } else if (strcmp(arg, "-no-jre-restrict-search") == 0) { - restrict_search = 0; - } else { - if (strcmp(arg, "-jar") == 0) - jarflag = 1; - /* deal with "unfortunate" classpath syntax */ - if ((strcmp(arg, "-classpath") == 0 || strcmp(arg, "-cp") == 0) && - (argc >= 2)) { - *new_argp++ = arg; - argc--; - argv++; - arg = *argv; - } - - /* - * Checking for headless toolkit option in the some way as AWT does: - * "true" means true and any other value means false - */ - if (strcmp(arg, "-Djava.awt.headless=true") == 0) { - headlessflag = 1; - } else if (strncmp(arg, "-Djava.awt.headless=", 20) == 0) { - headlessflag = 0; - } else if (strncmp(arg, "-splash:", 8) == 0) { - splash_file_name = arg+8; - } - *new_argp++ = arg; - } - argc--; - argv++; - } - if (argc <= 0) { /* No operand? Possibly legit with -[full]version */ - operand = NULL; - } else { - argc--; - *new_argp++ = operand = *argv++; - } - while (argc-- > 0) /* Copy over [argument...] */ - *new_argp++ = *argv++; - *new_argp = NULL; - - /* - * If there is a jar file, read the manifest. If the jarfile can't be - * read, the manifest can't be read from the jar file, or the manifest - * is corrupt, issue the appropriate error messages and exit. - * - * Even if there isn't a jar file, construct a manifest_info structure - * containing the command line information. It's a convenient way to carry - * this data around. - */ - if (jarflag && operand) { - if ((res = JLI_ParseManifest(operand, &info)) != 0) { - if (res == -1) - ReportErrorMessage2("Unable to access jarfile %s", - operand, JNI_TRUE); - else - ReportErrorMessage2("Invalid or corrupt jarfile %s", - operand, JNI_TRUE); - exit(1); - } - - /* - * Command line splash screen option should have precedence - * over the manifest, so the manifest data is used only if - * splash_file_name has not been initialized above during command - * line parsing - */ - if (!headlessflag && !splash_file_name && info.splashscreen_image_file_name) { - splash_file_name = info.splashscreen_image_file_name; - splash_jar_name = operand; - } - } else { - info.manifest_version = NULL; - info.main_class = NULL; - info.jre_version = NULL; - info.jre_restrict_search = 0; - } - - /* - * Passing on splash screen info in environment variables - */ - if (splash_file_name && !headlessflag) { - char* splash_file_entry = JLI_MemAlloc(strlen(SPLASH_FILE_ENV_ENTRY "=")+strlen(splash_file_name)+1); - strcpy(splash_file_entry, SPLASH_FILE_ENV_ENTRY "="); - strcat(splash_file_entry, splash_file_name); - putenv(splash_file_entry); - } - if (splash_jar_name && !headlessflag) { - char* splash_jar_entry = JLI_MemAlloc(strlen(SPLASH_JAR_ENV_ENTRY "=")+strlen(splash_jar_name)+1); - strcpy(splash_jar_entry, SPLASH_JAR_ENV_ENTRY "="); - strcat(splash_jar_entry, splash_jar_name); - putenv(splash_jar_entry); - } - - /* - * The JRE-Version and JRE-Restrict-Search values (if any) from the - * manifest are overwritten by any specified on the command line. - */ - if (version != NULL) - info.jre_version = version; - if (restrict_search != -1) - info.jre_restrict_search = restrict_search; - - /* - * "Valid" returns (other than unrecoverable errors) follow. Set - * main_class as a side-effect of this routine. - */ - if (info.main_class != NULL) - *main_class = JLI_StringDup(info.main_class); - - /* - * If no version selection information is found either on the command - * line or in the manifest, simply return. - */ - if (info.jre_version == NULL) { - JLI_FreeManifest(); - JLI_MemFree(new_argv); - return; - } - - /* - * Check for correct syntax of the version specification (JSR 56). - */ - if (!JLI_ValidVersionString(info.jre_version)) { - ReportErrorMessage2("Syntax error in version specification \"%s\"", - info.jre_version, JNI_TRUE); - exit(1); - } - - /* - * Find the appropriate JVM on the system. Just to be as forgiving as - * possible, if the standard algorithms don't locate an appropriate - * jre, check to see if the one running will satisfy the requirements. - * This can happen on systems which haven't been set-up for multiple - * JRE support. - */ - jre = LocateJRE(&info); - if (_launcher_debug) - printf("JRE-Version = %s, JRE-Restrict-Search = %s Selected = %s\n", - (info.jre_version?info.jre_version:"null"), - (info.jre_restrict_search?"true":"false"), (jre?jre:"null")); - if (jre == NULL) { - if (JLI_AcceptableRelease(FULL_VERSION, info.jre_version)) { - JLI_FreeManifest(); - JLI_MemFree(new_argv); - return; - } else { - ReportErrorMessage2( - "Unable to locate JRE meeting specification \"%s\"", - info.jre_version, JNI_TRUE); - exit(1); - } - } - - /* - * If I'm not the chosen one, exec the chosen one. Returning from - * ExecJRE indicates that I am indeed the chosen one. - * - * The private environment variable _JAVA_VERSION_SET is used to - * prevent the chosen one from re-reading the manifest file and - * using the values found within to override the (potential) command - * line flags stripped from argv (because the target may not - * understand them). Passing the MainClass value is an optimization - * to avoid locating, expanding and parsing the manifest extra - * times. - */ - if (info.main_class != NULL) { - if (strlen(info.main_class) <= MAXNAMELEN) { - (void)strcat(env_entry, info.main_class); - } else { - ReportErrorMessage("Error: main-class: attribute exceeds system limits\n", JNI_TRUE); - exit(1); - } - } - (void)putenv(env_entry); - ExecJRE(jre, new_argv); - JLI_FreeManifest(); - JLI_MemFree(new_argv); - return; -} -#endif /* ifndef GAMMA */ - -/* - * Parses command line arguments. Returns JNI_FALSE if launcher - * should exit without starting vm (e.g. certain version and usage - * options); returns JNI_TRUE if vm needs to be started to process - * given options. *pret (the launcher process return value) is set to - * 0 for a normal exit. - */ -static jboolean -ParseArguments(int *pargc, char ***pargv, char **pjarfile, - char **pclassname, int *pret, const char *jvmpath) -{ - int argc = *pargc; - char **argv = *pargv; - jboolean jarflag = JNI_FALSE; - char *arg; - - *pret = 1; - while ((arg = *argv) != 0 && *arg == '-') { - argv++; --argc; - if (strcmp(arg, "-classpath") == 0 || strcmp(arg, "-cp") == 0) { - if (argc < 1) { - ReportErrorMessage2("%s requires class path specification", - arg, JNI_TRUE); - PrintUsage(); - return JNI_FALSE; - } - SetClassPath(*argv); - argv++; --argc; - } else if (strcmp(arg, "-jar") == 0) { - jarflag = JNI_TRUE; - } else if (strcmp(arg, "-help") == 0 || - strcmp(arg, "-h") == 0 || - strcmp(arg, "-?") == 0) { - PrintUsage(); - *pret = 0; - return JNI_FALSE; - } else if (strcmp(arg, "-version") == 0) { - printVersion = JNI_TRUE; - return JNI_TRUE; - } else if (strcmp(arg, "-showversion") == 0) { - showVersion = JNI_TRUE; - } else if (strcmp(arg, "-X") == 0) { - *pret = PrintXUsage(jvmpath); - return JNI_FALSE; -/* - * The following case provide backward compatibility with old-style - * command line options. - */ - } else if (strcmp(arg, "-fullversion") == 0) { - fprintf(stderr, "%s full version \"%s\"\n", progname, - FULL_VERSION); - *pret = 0; - return JNI_FALSE; - } else if (strcmp(arg, "-verbosegc") == 0) { - AddOption("-verbose:gc", NULL); - } else if (strcmp(arg, "-t") == 0) { - AddOption("-Xt", NULL); - } else if (strcmp(arg, "-tm") == 0) { - AddOption("-Xtm", NULL); - } else if (strcmp(arg, "-debug") == 0) { - AddOption("-Xdebug", NULL); - } else if (strcmp(arg, "-noclassgc") == 0) { - AddOption("-Xnoclassgc", NULL); - } else if (strcmp(arg, "-Xfuture") == 0) { - AddOption("-Xverify:all", NULL); - } else if (strcmp(arg, "-verify") == 0) { - AddOption("-Xverify:all", NULL); - } else if (strcmp(arg, "-verifyremote") == 0) { - AddOption("-Xverify:remote", NULL); - } else if (strcmp(arg, "-noverify") == 0) { - AddOption("-Xverify:none", NULL); - } else if (strcmp(arg, "-XXsuppressExitMessage") == 0) { - noExitErrorMessage = 1; - } else if (strncmp(arg, "-prof", 5) == 0) { - char *p = arg + 5; - char *tmp = JLI_MemAlloc(strlen(arg) + 50); - if (*p) { - sprintf(tmp, "-Xrunhprof:cpu=old,file=%s", p + 1); - } else { - sprintf(tmp, "-Xrunhprof:cpu=old,file=java.prof"); - } - AddOption(tmp, NULL); - } else if (strncmp(arg, "-ss", 3) == 0 || - strncmp(arg, "-oss", 4) == 0 || - strncmp(arg, "-ms", 3) == 0 || - strncmp(arg, "-mx", 3) == 0) { - char *tmp = JLI_MemAlloc(strlen(arg) + 6); - sprintf(tmp, "-X%s", arg + 1); /* skip '-' */ - AddOption(tmp, NULL); - } else if (strcmp(arg, "-checksource") == 0 || - strcmp(arg, "-cs") == 0 || - strcmp(arg, "-noasyncgc") == 0) { - /* No longer supported */ - fprintf(stderr, - "Warning: %s option is no longer supported.\n", - arg); - } else if (strncmp(arg, "-version:", 9) == 0 || - strcmp(arg, "-no-jre-restrict-search") == 0 || - strcmp(arg, "-jre-restrict-search") == 0 || - strncmp(arg, "-splash:", 8) == 0) { - ; /* Ignore machine independent options already handled */ - } else if (RemovableMachineDependentOption(arg) ) { - ; /* Do not pass option to vm. */ - } - else { - AddOption(arg, NULL); - } - } - - if (--argc >= 0) { - if (jarflag) { - *pjarfile = *argv++; - *pclassname = 0; - } else { - *pjarfile = 0; - *pclassname = *argv++; - } - *pargc = argc; - *pargv = argv; - } - - return JNI_TRUE; -} - -/* - * Initializes the Java Virtual Machine. Also frees options array when - * finished. - */ -static jboolean -InitializeJVM(JavaVM **pvm, JNIEnv **penv, InvocationFunctions *ifn) -{ - JavaVMInitArgs args; - jint r; - - memset(&args, 0, sizeof(args)); - args.version = JNI_VERSION_1_2; - args.nOptions = numOptions; - args.options = options; - args.ignoreUnrecognized = JNI_FALSE; - - if (_launcher_debug) { - int i = 0; - printf("JavaVM args:\n "); - printf("version 0x%08lx, ", (long)args.version); - printf("ignoreUnrecognized is %s, ", - args.ignoreUnrecognized ? "JNI_TRUE" : "JNI_FALSE"); - printf("nOptions is %ld\n", (long)args.nOptions); - for (i = 0; i < numOptions; i++) - printf(" option[%2d] = '%s'\n", - i, args.options[i].optionString); - } - - r = ifn->CreateJavaVM(pvm, (void **)penv, &args); - JLI_MemFree(options); - return r == JNI_OK; -} - - -#define NULL_CHECK0(e) if ((e) == 0) return 0 -#define NULL_CHECK(e) if ((e) == 0) return - -static jstring platformEncoding = NULL; -static jstring getPlatformEncoding(JNIEnv *env) { - if (platformEncoding == NULL) { - jstring propname = (*env)->NewStringUTF(env, "sun.jnu.encoding"); - if (propname) { - jclass cls; - jmethodID mid; - NULL_CHECK0 (cls = (*env)->FindClass(env, "java/lang/System")); - NULL_CHECK0 (mid = (*env)->GetStaticMethodID( - env, cls, - "getProperty", - "(Ljava/lang/String;)Ljava/lang/String;")); - platformEncoding = (*env)->CallStaticObjectMethod ( - env, cls, mid, propname); - } - } - return platformEncoding; -} - -static jboolean isEncodingSupported(JNIEnv *env, jstring enc) { - jclass cls; - jmethodID mid; - NULL_CHECK0 (cls = (*env)->FindClass(env, "java/nio/charset/Charset")); - NULL_CHECK0 (mid = (*env)->GetStaticMethodID( - env, cls, - "isSupported", - "(Ljava/lang/String;)Z")); - return (*env)->CallStaticBooleanMethod(env, cls, mid, enc); -} - -/* - * Returns a new Java string object for the specified platform string. - */ -static jstring -NewPlatformString(JNIEnv *env, char *s) -{ - int len = (int)strlen(s); - jclass cls; - jmethodID mid; - jbyteArray ary; - jstring enc; - - if (s == NULL) - return 0; - enc = getPlatformEncoding(env); - - ary = (*env)->NewByteArray(env, len); - if (ary != 0) { - jstring str = 0; - (*env)->SetByteArrayRegion(env, ary, 0, len, (jbyte *)s); - if (!(*env)->ExceptionOccurred(env)) { - if (isEncodingSupported(env, enc) == JNI_TRUE) { - NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String")); - NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "", - "([BLjava/lang/String;)V")); - str = (*env)->NewObject(env, cls, mid, ary, enc); - } else { - /*If the encoding specified in sun.jnu.encoding is not - endorsed by "Charset.isSupported" we have to fall back - to use String(byte[]) explicitly here without specifying - the encoding name, in which the StringCoding class will - pickup the iso-8859-1 as the fallback converter for us. - */ - NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String")); - NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "", - "([B)V")); - str = (*env)->NewObject(env, cls, mid, ary); - } - (*env)->DeleteLocalRef(env, ary); - return str; - } - } - return 0; -} - -/* - * Returns a new array of Java string objects for the specified - * array of platform strings. - */ -static jobjectArray -NewPlatformStringArray(JNIEnv *env, char **strv, int strc) -{ - jarray cls; - jarray ary; - int i; - - NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String")); - NULL_CHECK0(ary = (*env)->NewObjectArray(env, strc, cls, 0)); - for (i = 0; i < strc; i++) { - jstring str = NewPlatformString(env, *strv++); - NULL_CHECK0(str); - (*env)->SetObjectArrayElement(env, ary, i, str); - (*env)->DeleteLocalRef(env, str); - } - return ary; -} - -/* - * Loads a class, convert the '.' to '/'. - */ -static jclass -LoadClass(JNIEnv *env, char *name) -{ - char *buf = JLI_MemAlloc(strlen(name) + 1); - char *s = buf, *t = name, c; - jclass cls; - jlong start, end; - - if (_launcher_debug) - start = CounterGet(); - - do { - c = *t++; - *s++ = (c == '.') ? '/' : c; - } while (c != '\0'); - cls = (*env)->FindClass(env, buf); - JLI_MemFree(buf); - - if (_launcher_debug) { - end = CounterGet(); - printf("%ld micro seconds to load main class\n", - (long)(jint)Counter2Micros(end-start)); - printf("----_JAVA_LAUNCHER_DEBUG----\n"); - } - - return cls; -} - - -/* - * Returns the main class name for the specified jar file. - */ -static jstring -GetMainClassName(JNIEnv *env, char *jarname) -{ -#define MAIN_CLASS "Main-Class" - jclass cls; - jmethodID mid; - jobject jar, man, attr; - jstring str, result = 0; - - NULL_CHECK0(cls = (*env)->FindClass(env, "java/util/jar/JarFile")); - NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "", - "(Ljava/lang/String;)V")); - NULL_CHECK0(str = NewPlatformString(env, jarname)); - NULL_CHECK0(jar = (*env)->NewObject(env, cls, mid, str)); - NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "getManifest", - "()Ljava/util/jar/Manifest;")); - man = (*env)->CallObjectMethod(env, jar, mid); - if (man != 0) { - NULL_CHECK0(mid = (*env)->GetMethodID(env, - (*env)->GetObjectClass(env, man), - "getMainAttributes", - "()Ljava/util/jar/Attributes;")); - attr = (*env)->CallObjectMethod(env, man, mid); - if (attr != 0) { - NULL_CHECK0(mid = (*env)->GetMethodID(env, - (*env)->GetObjectClass(env, attr), - "getValue", - "(Ljava/lang/String;)Ljava/lang/String;")); - NULL_CHECK0(str = NewPlatformString(env, MAIN_CLASS)); - result = (*env)->CallObjectMethod(env, attr, mid, str); - } - } - return result; -} - -#ifdef JAVA_ARGS -static char *java_args[] = JAVA_ARGS; -static char *app_classpath[] = APP_CLASSPATH; - -/* - * For tools, convert command line args thus: - * javac -cp foo:foo/"*" -J-ms32m ... - * java -ms32m -cp JLI_WildcardExpandClasspath(foo:foo/"*") ... - */ -static void -TranslateApplicationArgs(int *pargc, char ***pargv) -{ - const int NUM_ARGS = (sizeof(java_args) / sizeof(char *)); - int argc = *pargc; - char **argv = *pargv; - int nargc = argc + NUM_ARGS; - char **nargv = JLI_MemAlloc((nargc + 1) * sizeof(char *)); - int i; - - *pargc = nargc; - *pargv = nargv; - - /* Copy the VM arguments (i.e. prefixed with -J) */ - for (i = 0; i < NUM_ARGS; i++) { - char *arg = java_args[i]; - if (arg[0] == '-' && arg[1] == 'J') { - *nargv++ = arg + 2; - } - } - - for (i = 0; i < argc; i++) { - char *arg = argv[i]; - if (arg[0] == '-' && arg[1] == 'J') { - if (arg[2] == '\0') { - ReportErrorMessage("Error: the -J option should not be " - "followed by a space.", JNI_TRUE); - exit(1); - } - *nargv++ = arg + 2; - } - } - - /* Copy the rest of the arguments */ - for (i = 0; i < NUM_ARGS; i++) { - char *arg = java_args[i]; - if (arg[0] != '-' || arg[1] != 'J') { - *nargv++ = arg; - } - } - for (i = 0; i < argc; i++) { - char *arg = argv[i]; - if (arg[0] == '-') { - if (arg[1] == 'J') - continue; -#ifdef EXPAND_CLASSPATH_WILDCARDS - if (arg[1] == 'c' - && (strcmp(arg, "-cp") == 0 || - strcmp(arg, "-classpath") == 0) - && i < argc - 1) { - *nargv++ = arg; - *nargv++ = (char *) JLI_WildcardExpandClasspath(argv[i+1]); - i++; - continue; - } -#endif - } - *nargv++ = arg; - } - *nargv = 0; -} - -/* - * For our tools, we try to add 3 VM options: - * -Denv.class.path= - * -Dapplication.home= - * -Djava.class.path= - * is the user's setting of CLASSPATH -- for instance the user - * tells javac where to find binary classes through this environment - * variable. Notice that users will be able to compile against our - * tools classes (sun.tools.javac.Main) only if they explicitly add - * tools.jar to CLASSPATH. - * is the directory where the application is installed. - * is the classpath to where our apps' classfiles are. - */ -static jboolean -AddApplicationOptions() -{ - const int NUM_APP_CLASSPATH = (sizeof(app_classpath) / sizeof(char *)); - char *envcp, *appcp, *apphome; - char home[MAXPATHLEN]; /* application home */ - char separator[] = { PATH_SEPARATOR, '\0' }; - int size, i; - int strlenHome; - - { - const char *s = getenv("CLASSPATH"); - if (s) { - s = (char *) JLI_WildcardExpandClasspath(s); - /* 40 for -Denv.class.path= */ - envcp = (char *)JLI_MemAlloc(strlen(s) + 40); - sprintf(envcp, "-Denv.class.path=%s", s); - AddOption(envcp, NULL); - } - } - - if (!GetApplicationHome(home, sizeof(home))) { - ReportErrorMessage("Can't determine application home", JNI_TRUE); - return JNI_FALSE; - } - - /* 40 for '-Dapplication.home=' */ - apphome = (char *)JLI_MemAlloc(strlen(home) + 40); - sprintf(apphome, "-Dapplication.home=%s", home); - AddOption(apphome, NULL); - - /* How big is the application's classpath? */ - size = 40; /* 40: "-Djava.class.path=" */ - strlenHome = (int)strlen(home); - for (i = 0; i < NUM_APP_CLASSPATH; i++) { - size += strlenHome + (int)strlen(app_classpath[i]) + 1; /* 1: separator */ - } - appcp = (char *)JLI_MemAlloc(size + 1); - strcpy(appcp, "-Djava.class.path="); - for (i = 0; i < NUM_APP_CLASSPATH; i++) { - strcat(appcp, home); /* c:\program files\myapp */ - strcat(appcp, app_classpath[i]); /* \lib\myapp.jar */ - strcat(appcp, separator); /* ; */ - } - appcp[strlen(appcp)-1] = '\0'; /* remove trailing path separator */ - AddOption(appcp, NULL); - return JNI_TRUE; -} -#endif /* JAVA_ARGS */ - -/* - * inject the -Dsun.java.command pseudo property into the args structure - * this pseudo property is used in the HotSpot VM to expose the - * Java class name and arguments to the main method to the VM. The - * HotSpot VM uses this pseudo property to store the Java class name - * (or jar file name) and the arguments to the class's main method - * to the instrumentation memory region. The sun.java.command pseudo - * property is not exported by HotSpot to the Java layer. - */ -void -SetJavaCommandLineProp(char *classname, char *jarfile, - int argc, char **argv) -{ - - int i = 0; - size_t len = 0; - char* javaCommand = NULL; - char* dashDstr = "-Dsun.java.command="; - - if (classname == NULL && jarfile == NULL) { - /* unexpected, one of these should be set. just return without - * setting the property - */ - return; - } - - /* if the class name is not set, then use the jarfile name */ - if (classname == NULL) { - classname = jarfile; - } - - /* determine the amount of memory to allocate assuming - * the individual components will be space separated - */ - len = strlen(classname); - for (i = 0; i < argc; i++) { - len += strlen(argv[i]) + 1; - } - - /* allocate the memory */ - javaCommand = (char*) JLI_MemAlloc(len + strlen(dashDstr) + 1); - - /* build the -D string */ - *javaCommand = '\0'; - strcat(javaCommand, dashDstr); - strcat(javaCommand, classname); - - for (i = 0; i < argc; i++) { - /* the components of the string are space separated. In - * the case of embedded white space, the relationship of - * the white space separated components to their true - * positional arguments will be ambiguous. This issue may - * be addressed in a future release. - */ - strcat(javaCommand, " "); - strcat(javaCommand, argv[i]); - } - - AddOption(javaCommand, NULL); -} - -/* - * JVM would like to know if it's created by a standard Sun launcher, or by - * user native application, the following property indicates the former. - */ -void SetJavaLauncherProp() { - AddOption("-Dsun.java.launcher=" LAUNCHER_TYPE, NULL); -} - -/* - * Prints the version information from the java.version and other properties. - */ -static void -PrintJavaVersion(JNIEnv *env) -{ - jclass ver; - jmethodID print; - - NULL_CHECK(ver = (*env)->FindClass(env, "sun/misc/Version")); - NULL_CHECK(print = (*env)->GetStaticMethodID(env, ver, "print", "()V")); - - (*env)->CallStaticVoidMethod(env, ver, print); -} - -/* - * Prints default usage message. - */ -static void -PrintUsage(void) -{ -#ifndef GAMMA - int i; -#endif - - fprintf(stdout, - "Usage: %s [-options] class [args...]\n" - " (to execute a class)\n" - " or %s [-options] -jar jarfile [args...]\n" - " (to execute a jar file)\n" - "\n" - "where options include:\n", - progname, - progname); - -#ifndef GAMMA - PrintMachineDependentOptions(); - - if ((knownVMs[0].flag == VM_KNOWN) || - (knownVMs[0].flag == VM_IF_SERVER_CLASS)) { - fprintf(stdout, " %s\t to select the \"%s\" VM\n", - knownVMs[0].name, knownVMs[0].name+1); - } - for (i=1; i\n" -" -classpath \n" -" A %c separated list of directories, JAR archives,\n" -" and ZIP archives to search for class files.\n" -" -D=\n" -" set a system property\n" -" -verbose[:class|gc|jni]\n" -" enable verbose output\n" -" -version print product version and exit\n" -" -version:\n" -" require the specified version to run\n" -" -showversion print product version and continue\n" -" -jre-restrict-search | -jre-no-restrict-search\n" -" include/exclude user private JREs in the version search\n" -" -? -help print this help message\n" -" -X print help on non-standard options\n" -" -ea[:...|:]\n" -" -enableassertions[:...|:]\n" -" enable assertions\n" -" -da[:...|:]\n" -" -disableassertions[:...|:]\n" -" disable assertions\n" -" -esa | -enablesystemassertions\n" -" enable system assertions\n" -" -dsa | -disablesystemassertions\n" -" disable system assertions\n" -" -agentlib:[=]\n" -" load native agent library , e.g. -agentlib:hprof\n" -" see also, -agentlib:jdwp=help and -agentlib:hprof=help\n" -" -agentpath:[=]\n" -" load native agent library by full pathname\n" -" -javaagent:[=]\n" -" load Java programming language agent, see java.lang.instrument\n" -" -splash:\n" -" show splash screen with specified image\n" - - ,PATH_SEPARATOR); -} - -/* - * Print usage message for -X options. - */ -static jint -PrintXUsage(const char *jvmpath) -{ - /* - A 32 bit cushion to prevent buffer overrun, noting that - fopen(3C) may fail if the buffer exceeds MAXPATHLEN. - */ - char path[MAXPATHLEN+32]; - char buf[128]; - size_t n; - FILE *fp; - static const char Xusage_txt[] = "/Xusage.txt"; - - strcpy(path, jvmpath); - /* Note the FILE_SEPARATOR is platform dependent */ - strcpy(strrchr(path, FILE_SEPARATOR), Xusage_txt); - fp = fopen(path, "r"); - if (fp == 0) { - fprintf(stderr, "Can't open %s\n", path); - return 1; - } - while ((n = fread(buf, 1, sizeof(buf), fp)) != 0) { - fwrite(buf, 1, n, stdout); - } - fclose(fp); - return 0; -} - -#ifndef GAMMA -/* - * Read the jvm.cfg file and fill the knownJVMs[] array. - * - * The functionality of the jvm.cfg file is subject to change without - * notice and the mechanism will be removed in the future. - * - * The lexical structure of the jvm.cfg file is as follows: - * - * jvmcfg := { vmLine } - * vmLine := knownLine - * | aliasLine - * | warnLine - * | ignoreLine - * | errorLine - * | predicateLine - * | commentLine - * knownLine := flag "KNOWN" EOL - * warnLine := flag "WARN" EOL - * ignoreLine := flag "IGNORE" EOL - * errorLine := flag "ERROR" EOL - * aliasLine := flag "ALIASED_TO" flag EOL - * predicateLine := flag "IF_SERVER_CLASS" flag EOL - * commentLine := "#" text EOL - * flag := "-" identifier - * - * The semantics are that when someone specifies a flag on the command line: - * - if the flag appears on a knownLine, then the identifier is used as - * the name of the directory holding the JVM library (the name of the JVM). - * - if the flag appears as the first flag on an aliasLine, the identifier - * of the second flag is used as the name of the JVM. - * - if the flag appears on a warnLine, the identifier is used as the - * name of the JVM, but a warning is generated. - * - if the flag appears on an ignoreLine, the identifier is recognized as the - * name of a JVM, but the identifier is ignored and the default vm used - * - if the flag appears on an errorLine, an error is generated. - * - if the flag appears as the first flag on a predicateLine, and - * the machine on which you are running passes the predicate indicated, - * then the identifier of the second flag is used as the name of the JVM, - * otherwise the identifier of the first flag is used as the name of the JVM. - * If no flag is given on the command line, the first vmLine of the jvm.cfg - * file determines the name of the JVM. - * PredicateLines are only interpreted on first vmLine of a jvm.cfg file, - * since they only make sense if someone hasn't specified the name of the - * JVM on the command line. - * - * The intent of the jvm.cfg file is to allow several JVM libraries to - * be installed in different subdirectories of a single JRE installation, - * for space-savings and convenience in testing. - * The intent is explicitly not to provide a full aliasing or predicate - * mechanism. - */ -jint -ReadKnownVMs(const char *jrepath, char * arch, jboolean speculative) -{ - FILE *jvmCfg; - char jvmCfgName[MAXPATHLEN+20]; - char line[MAXPATHLEN+20]; - int cnt = 0; - int lineno = 0; - jlong start, end; - int vmType; - char *tmpPtr; - char *altVMName = NULL; - char *serverClassVMName = NULL; - static char *whiteSpace = " \t"; - if (_launcher_debug) { - start = CounterGet(); - } - - strcpy(jvmCfgName, jrepath); - strcat(jvmCfgName, FILESEP "lib" FILESEP); - strcat(jvmCfgName, arch); - strcat(jvmCfgName, FILESEP "jvm.cfg"); - - jvmCfg = fopen(jvmCfgName, "r"); - if (jvmCfg == NULL) { - if (!speculative) { - ReportErrorMessage2("Error: could not open `%s'", jvmCfgName, - JNI_TRUE); - exit(1); - } else { - return -1; - } - } - while (fgets(line, sizeof(line), jvmCfg) != NULL) { - vmType = VM_UNKNOWN; - lineno++; - if (line[0] == '#') - continue; - if (line[0] != '-') { - fprintf(stderr, "Warning: no leading - on line %d of `%s'\n", - lineno, jvmCfgName); - } - if (cnt >= knownVMsLimit) { - GrowKnownVMs(cnt); - } - line[strlen(line)-1] = '\0'; /* remove trailing newline */ - tmpPtr = line + strcspn(line, whiteSpace); - if (*tmpPtr == 0) { - fprintf(stderr, "Warning: missing VM type on line %d of `%s'\n", - lineno, jvmCfgName); - } else { - /* Null-terminate this string for JLI_StringDup below */ - *tmpPtr++ = 0; - tmpPtr += strspn(tmpPtr, whiteSpace); - if (*tmpPtr == 0) { - fprintf(stderr, "Warning: missing VM type on line %d of `%s'\n", - lineno, jvmCfgName); - } else { - if (!strncmp(tmpPtr, "KNOWN", strlen("KNOWN"))) { - vmType = VM_KNOWN; - } else if (!strncmp(tmpPtr, "ALIASED_TO", strlen("ALIASED_TO"))) { - tmpPtr += strcspn(tmpPtr, whiteSpace); - if (*tmpPtr != 0) { - tmpPtr += strspn(tmpPtr, whiteSpace); - } - if (*tmpPtr == 0) { - fprintf(stderr, "Warning: missing VM alias on line %d of `%s'\n", - lineno, jvmCfgName); - } else { - /* Null terminate altVMName */ - altVMName = tmpPtr; - tmpPtr += strcspn(tmpPtr, whiteSpace); - *tmpPtr = 0; - vmType = VM_ALIASED_TO; - } - } else if (!strncmp(tmpPtr, "WARN", strlen("WARN"))) { - vmType = VM_WARN; - } else if (!strncmp(tmpPtr, "IGNORE", strlen("IGNORE"))) { - vmType = VM_IGNORE; - } else if (!strncmp(tmpPtr, "ERROR", strlen("ERROR"))) { - vmType = VM_ERROR; - } else if (!strncmp(tmpPtr, - "IF_SERVER_CLASS", - strlen("IF_SERVER_CLASS"))) { - tmpPtr += strcspn(tmpPtr, whiteSpace); - if (*tmpPtr != 0) { - tmpPtr += strspn(tmpPtr, whiteSpace); - } - if (*tmpPtr == 0) { - fprintf(stderr, "Warning: missing server class VM on line %d of `%s'\n", - lineno, jvmCfgName); - } else { - /* Null terminate server class VM name */ - serverClassVMName = tmpPtr; - tmpPtr += strcspn(tmpPtr, whiteSpace); - *tmpPtr = 0; - vmType = VM_IF_SERVER_CLASS; - } - } else { - fprintf(stderr, "Warning: unknown VM type on line %d of `%s'\n", - lineno, &jvmCfgName[0]); - vmType = VM_KNOWN; - } - } - } - - if (_launcher_debug) - printf("jvm.cfg[%d] = ->%s<-\n", cnt, line); - if (vmType != VM_UNKNOWN) { - knownVMs[cnt].name = JLI_StringDup(line); - knownVMs[cnt].flag = vmType; - switch (vmType) { - default: - break; - case VM_ALIASED_TO: - knownVMs[cnt].alias = JLI_StringDup(altVMName); - if (_launcher_debug) { - printf(" name: %s vmType: %s alias: %s\n", - knownVMs[cnt].name, "VM_ALIASED_TO", knownVMs[cnt].alias); - } - break; - case VM_IF_SERVER_CLASS: - knownVMs[cnt].server_class = JLI_StringDup(serverClassVMName); - if (_launcher_debug) { - printf(" name: %s vmType: %s server_class: %s\n", - knownVMs[cnt].name, "VM_IF_SERVER_CLASS", knownVMs[cnt].server_class); - } - break; - } - cnt++; - } - } - fclose(jvmCfg); - knownVMsCount = cnt; - - if (_launcher_debug) { - end = CounterGet(); - printf("%ld micro seconds to parse jvm.cfg\n", - (long)(jint)Counter2Micros(end-start)); - } - - return cnt; -} - - -static void -GrowKnownVMs(int minimum) -{ - struct vmdesc* newKnownVMs; - int newMax; - - newMax = (knownVMsLimit == 0 ? INIT_MAX_KNOWN_VMS : (2 * knownVMsLimit)); - if (newMax <= minimum) { - newMax = minimum; - } - newKnownVMs = (struct vmdesc*) JLI_MemAlloc(newMax * sizeof(struct vmdesc)); - if (knownVMs != NULL) { - memcpy(newKnownVMs, knownVMs, knownVMsLimit * sizeof(struct vmdesc)); - } - JLI_MemFree(knownVMs); - knownVMs = newKnownVMs; - knownVMsLimit = newMax; -} - - -/* Returns index of VM or -1 if not found */ -static int -KnownVMIndex(const char* name) -{ - int i; - if (strncmp(name, "-J", 2) == 0) name += 2; - for (i = 0; i < knownVMsCount; i++) { - if (!strcmp(name, knownVMs[i].name)) { - return i; - } - } - return -1; -} - -static void -FreeKnownVMs() -{ - int i; - for (i = 0; i < knownVMsCount; i++) { - JLI_MemFree(knownVMs[i].name); - knownVMs[i].name = NULL; - } - JLI_MemFree(knownVMs); -} - - -/* - * Displays the splash screen according to the jar file name - * and image file names stored in environment variables - */ -static void -ShowSplashScreen() -{ - const char *jar_name = getenv(SPLASH_JAR_ENV_ENTRY); - const char *file_name = getenv(SPLASH_FILE_ENV_ENTRY); - int data_size; - void *image_data; - if (jar_name) { - image_data = JLI_JarUnpackFile(jar_name, file_name, &data_size); - if (image_data) { - DoSplashInit(); - DoSplashLoadMemory(image_data, data_size); - JLI_MemFree(image_data); - } - } else if (file_name) { - DoSplashInit(); - DoSplashLoadFile(file_name); - } else { - return; - } - DoSplashSetFileJarName(file_name, jar_name); -} - -#endif /* ifndef GAMMA */ diff -r e0fb8a213650 -r 836a62f43af9 src/share/tools/launcher/java.h --- a/src/share/tools/launcher/java.h Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,110 +0,0 @@ -/* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - - -#ifndef _JAVA_H_ -#define _JAVA_H_ - -/* - * Get system specific defines. - */ -#include "jni.h" -#include "java_md.h" -#include "jli_util.h" - -/* - * Pointers to the needed JNI invocation API, initialized by LoadJavaVM. - */ -typedef jint (JNICALL *CreateJavaVM_t)(JavaVM **pvm, void **env, void *args); -typedef jint (JNICALL *GetDefaultJavaVMInitArgs_t)(void *args); - -typedef struct { - CreateJavaVM_t CreateJavaVM; - GetDefaultJavaVMInitArgs_t GetDefaultJavaVMInitArgs; -} InvocationFunctions; - -/* - * Prototypes for launcher functions in the system specific java_md.c. - */ - -jboolean -LoadJavaVM(const char *jvmpath, InvocationFunctions *ifn); - -void -GetXUsagePath(char *buf, jint bufsize); - -jboolean -GetApplicationHome(char *buf, jint bufsize); - -const char * -GetArch(); - -void CreateExecutionEnvironment(int *_argc, - char ***_argv, - char jrepath[], - jint so_jrepath, - char jvmpath[], - jint so_jvmpath, - char **original_argv); - -/* - * Report an error message to stderr or a window as appropriate. The - * flag always is set to JNI_TRUE if message is to be reported to both - * strerr and windows and set to JNI_FALSE if the message should only - * be sent to a window. - */ -void ReportErrorMessage(char * message, jboolean always); -void ReportErrorMessage2(char * format, char * string, jboolean always); - -/* - * Report an exception which terminates the vm to stderr or a window - * as appropriate. - */ -void ReportExceptionDescription(JNIEnv * env); - -jboolean RemovableMachineDependentOption(char * option); -void PrintMachineDependentOptions(); - -/* - * Block current thread and continue execution in new thread - */ -int ContinueInNewThread(int (JNICALL *continuation)(void *), - jlong stack_size, void * args); - -/* sun.java.launcher.* platform properties. */ -void SetJavaLauncherPlatformProps(void); - -/* - * Functions defined in java.c and used in java_md.c. - */ -jint ReadKnownVMs(const char *jrepath, char * arch, jboolean speculative); -char *CheckJvmType(int *argc, char ***argv, jboolean speculative); -void AddOption(char *str, void *info); - -/* - * Make launcher spit debug output. - */ -extern jboolean _launcher_debug; - -#endif /* _JAVA_H_ */ diff -r e0fb8a213650 -r 836a62f43af9 src/share/tools/launcher/jli_util.c --- a/src/share/tools/launcher/jli_util.c Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,89 +0,0 @@ - -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include -#include -#include "jli_util.h" - -#ifdef GAMMA -#ifdef TARGET_OS_FAMILY_windows -#define strdup _strdup -#endif -#endif - -/* - * Returns a pointer to a block of at least 'size' bytes of memory. - * Prints error message and exits if the memory could not be allocated. - */ -void * -JLI_MemAlloc(size_t size) -{ - void *p = malloc(size); - if (p == 0) { - perror("malloc"); - exit(1); - } - return p; -} - -/* - * Equivalent to realloc(size). - * Prints error message and exits if the memory could not be reallocated. - */ -void * -JLI_MemRealloc(void *ptr, size_t size) -{ - void *p = realloc(ptr, size); - if (p == 0) { - perror("realloc"); - exit(1); - } - return p; -} - -/* - * Wrapper over strdup(3C) which prints an error message and exits if memory - * could not be allocated. - */ -char * -JLI_StringDup(const char *s1) -{ - char *s = strdup(s1); - if (s == NULL) { - perror("strdup"); - exit(1); - } - return s; -} - -/* - * Very equivalent to free(ptr). - * Here to maintain pairing with the above routines. - */ -void -JLI_MemFree(void *ptr) -{ - free(ptr); -} diff -r e0fb8a213650 -r 836a62f43af9 src/share/tools/launcher/jli_util.h --- a/src/share/tools/launcher/jli_util.h Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,35 +0,0 @@ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef _JLI_UTIL_H -#define _JLI_UTIL_H - -#include - -void *JLI_MemAlloc(size_t size); -void *JLI_MemRealloc(void *ptr, size_t size); -char *JLI_StringDup(const char *s1); -void JLI_MemFree(void *ptr); - -#endif /* _JLI_UTIL_H */ diff -r e0fb8a213650 -r 836a62f43af9 src/share/tools/launcher/wildcard.c --- a/src/share/tools/launcher/wildcard.c Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,496 +0,0 @@ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -/* - * Class-Path Wildcards - * - * The syntax for wildcards is a single asterisk. The class path - * foo/"*", e.g., loads all jar files in the directory named foo. - * (This requires careful quotation when used in shell scripts.) - * - * Only files whose names end in .jar or .JAR are matched. - * Files whose names end in .zip, or which have a particular - * magic number, regardless of filename extension, are not - * matched. - * - * Files are considered regardless of whether or not they are - * "hidden" in the UNIX sense, i.e., have names beginning with '.'. - * - * A wildcard only matches jar files, not class files in the same - * directory. If you want to load both class files and jar files from - * a single directory foo then you can say foo:foo/"*", or foo/"*":foo - * if you want the jar files to take precedence. - * - * Subdirectories are not searched recursively, i.e., foo/"*" only - * looks for jar files in foo, not in foo/bar, foo/baz, etc. - * - * Expansion of wildcards is done early, prior to the invocation of a - * program's main method, rather than late, during the class-loading - * process itself. Each element of the input class path containing a - * wildcard is replaced by the (possibly empty) sequence of elements - * generated by enumerating the jar files in the named directory. If - * the directory foo contains a.jar, b.jar, and c.jar, - * e.g., then the class path foo/"*" is expanded into - * foo/a.jar:foo/b.jar:foo/c.jar, and that string would be the value - * of the system property java.class.path. - * - * The order in which the jar files in a directory are enumerated in - * the expanded class path is not specified and may vary from platform - * to platform and even from moment to moment on the same machine. A - * well-constructed application should not depend upon any particular - * order. If a specific order is required then the jar files can be - * enumerated explicitly in the class path. - * - * The CLASSPATH environment variable is not treated any differently - * from the -classpath (equiv. -cp) command-line option, - * i.e. wildcards are honored in all these cases. - * - * Class-path wildcards are not honored in the Class-Path jar-manifest - * header. - * - * Class-path wildcards are honored not only by the Java launcher but - * also by most other command-line tools that accept class paths, and - * in particular by javac and javadoc. - * - * Class-path wildcards are not honored in any other kind of path, and - * especially not in the bootstrap class path, which is a mere - * artifact of our implementation and not something that developers - * should use. - * - * Classpath wildcards are only expanded in the Java launcher code, - * supporting the use of wildcards on the command line and in the - * CLASSPATH environment variable. We do not support the use of - * wildcards by applications that embed the JVM. - */ - -#include -#include -#include -#include -#include -#include "java.h" /* Strictly for PATH_SEPARATOR/FILE_SEPARATOR */ -#include "jli_util.h" - -#ifdef _WIN32 -#include -#else /* Unix */ -#include -#include -#endif /* Unix */ - -static int -exists(const char* filename) -{ -#ifdef _WIN32 - return _access(filename, 0) == 0; -#else - return access(filename, F_OK) == 0; -#endif -} - -#define NEW_(TYPE) ((TYPE) JLI_MemAlloc(sizeof(struct TYPE##_))) - -/* - * Wildcard directory iteration. - * WildcardIterator_for(wildcard) returns an iterator. - * Each call to that iterator's next() method returns the basename - * of an entry in the wildcard's directory. The basename's memory - * belongs to the iterator. The caller is responsible for prepending - * the directory name and file separator, if necessary. - * When done with the iterator, call the close method to clean up. - */ -typedef struct WildcardIterator_* WildcardIterator; - -#ifdef _WIN32 -struct WildcardIterator_ -{ - HANDLE handle; - char *firstFile; /* Stupid FindFirstFile...FindNextFile */ -}; - -static WildcardIterator -WildcardIterator_for(const char *wildcard) -{ - WIN32_FIND_DATA find_data; - WildcardIterator it = NEW_(WildcardIterator); - HANDLE handle = FindFirstFile(wildcard, &find_data); - if (handle == INVALID_HANDLE_VALUE) - return NULL; - it->handle = handle; - it->firstFile = find_data.cFileName; - return it; -} - -static char * -WildcardIterator_next(WildcardIterator it) -{ - WIN32_FIND_DATA find_data; - if (it->firstFile != NULL) { - char *firstFile = it->firstFile; - it->firstFile = NULL; - return firstFile; - } - return FindNextFile(it->handle, &find_data) - ? find_data.cFileName : NULL; -} - -static void -WildcardIterator_close(WildcardIterator it) -{ - if (it) { - FindClose(it->handle); - JLI_MemFree(it->firstFile); - JLI_MemFree(it); - } -} - -#else /* Unix */ -struct WildcardIterator_ -{ - DIR *dir; -}; - -static WildcardIterator -WildcardIterator_for(const char *wildcard) -{ - DIR *dir; - int wildlen = strlen(wildcard); - if (wildlen < 2) { - dir = opendir("."); - } else { - char *dirname = JLI_StringDup(wildcard); - dirname[wildlen - 1] = '\0'; - dir = opendir(dirname); - JLI_MemFree(dirname); - } - if (dir == NULL) - return NULL; - else { - WildcardIterator it = NEW_(WildcardIterator); - it->dir = dir; - return it; - } -} - -static char * -WildcardIterator_next(WildcardIterator it) -{ - struct dirent* dirp = readdir(it->dir); - return dirp ? dirp->d_name : NULL; -} - -static void -WildcardIterator_close(WildcardIterator it) -{ - if (it) { - closedir(it->dir); - JLI_MemFree(it); - } -} -#endif /* Unix */ - -static int -equal(const char *s1, const char *s2) -{ - return strcmp(s1, s2) == 0; -} - -/* - * FileList ADT - a dynamic list of C filenames - */ -struct FileList_ -{ - char **files; - int size; - int capacity; -}; -typedef struct FileList_ *FileList; - -static FileList -FileList_new(int capacity) -{ - FileList fl = NEW_(FileList); - fl->capacity = capacity; - fl->files = (char **) JLI_MemAlloc(capacity * sizeof(fl->files[0])); - fl->size = 0; - return fl; -} - -#ifdef DEBUG_WILDCARD -static void -FileList_print(FileList fl) -{ - int i; - putchar('['); - for (i = 0; i < fl->size; i++) { - if (i > 0) printf(", "); - printf("\"%s\"",fl->files[i]); - } - putchar(']'); -} -#endif - -static void -FileList_free(FileList fl) -{ - if (fl) { - if (fl->files) { - int i; - for (i = 0; i < fl->size; i++) - JLI_MemFree(fl->files[i]); - JLI_MemFree(fl->files); - } - JLI_MemFree(fl); - } -} - -static void -FileList_ensureCapacity(FileList fl, int capacity) -{ - if (fl->capacity < capacity) { - while (fl->capacity < capacity) - fl->capacity *= 2; - fl->files = JLI_MemRealloc(fl->files, - fl->capacity * sizeof(fl->files[0])); - } -} - -static void -FileList_add(FileList fl, char *file) -{ - FileList_ensureCapacity(fl, fl->size+1); - fl->files[fl->size++] = file; -} - -static void -FileList_addSubstring(FileList fl, const char *beg, int len) -{ - char *filename = (char *) JLI_MemAlloc(len+1); - memcpy(filename, beg, len); - filename[len] = '\0'; - FileList_ensureCapacity(fl, fl->size+1); - fl->files[fl->size++] = filename; -} - -static char * -FileList_join(FileList fl, char sep) -{ - int i; - int size; - char *path; - char *p; - for (i = 0, size = 1; i < fl->size; i++) - size += strlen(fl->files[i]) + 1; - - path = JLI_MemAlloc(size); - - for (i = 0, p = path; i < fl->size; i++) { - int len = strlen(fl->files[i]); - if (i > 0) *p++ = sep; - memcpy(p, fl->files[i], len); - p += len; - } - *p = '\0'; - - return path; -} - -static FileList -FileList_split(const char *path, char sep) -{ - const char *p, *q; - int len = strlen(path); - int count; - FileList fl; - for (count = 1, p = path; p < path + len; p++) - count += (*p == sep); - fl = FileList_new(count); - for (p = path;;) { - for (q = p; q <= path + len; q++) { - if (*q == sep || *q == '\0') { - FileList_addSubstring(fl, p, q - p); - if (*q == '\0') - return fl; - p = q + 1; - } - } - } -} - -static int -isJarFileName(const char *filename) -{ - int len = strlen(filename); - return (len >= 4) && - (filename[len - 4] == '.') && - (equal(filename + len - 3, "jar") || - equal(filename + len - 3, "JAR")) && - /* Paranoia: Maybe filename is "DIR:foo.jar" */ - (strchr(filename, PATH_SEPARATOR) == NULL); -} - -static char * -wildcardConcat(const char *wildcard, const char *basename) -{ - int wildlen = strlen(wildcard); - int baselen = strlen(basename); - char *filename = (char *) JLI_MemAlloc(wildlen + baselen); - /* Replace the trailing '*' with basename */ - memcpy(filename, wildcard, wildlen-1); - memcpy(filename+wildlen-1, basename, baselen+1); - return filename; -} - -static FileList -wildcardFileList(const char *wildcard) -{ - const char *basename; - FileList fl = FileList_new(16); - WildcardIterator it = WildcardIterator_for(wildcard); - if (it == NULL) { - FileList_free(fl); - return NULL; - } - while ((basename = WildcardIterator_next(it)) != NULL) - if (isJarFileName(basename)) - FileList_add(fl, wildcardConcat(wildcard, basename)); - WildcardIterator_close(it); - return fl; -} - -static int -isWildcard(const char *filename) -{ - int len = strlen(filename); - return (len > 0) && - (filename[len - 1] == '*') && - (len == 1 || IS_FILE_SEPARATOR(filename[len - 2])) && - (! exists(filename)); -} - -static void -FileList_expandWildcards(FileList fl) -{ - int i, j; - for (i = 0; i < fl->size; i++) { - if (isWildcard(fl->files[i])) { - FileList expanded = wildcardFileList(fl->files[i]); - if (expanded != NULL && expanded->size > 0) { - JLI_MemFree(fl->files[i]); - FileList_ensureCapacity(fl, fl->size + expanded->size); - for (j = fl->size - 1; j >= i+1; j--) - fl->files[j+expanded->size-1] = fl->files[j]; - for (j = 0; j < expanded->size; j++) - fl->files[i+j] = expanded->files[j]; - i += expanded->size - 1; - fl->size += expanded->size - 1; - /* fl expropriates expanded's elements. */ - expanded->size = 0; - } - FileList_free(expanded); - } - } -} - -const char * -JLI_WildcardExpandClasspath(const char *classpath) -{ - char *expanded; - FileList fl; - - if (strchr(classpath, '*') == NULL) - return classpath; - fl = FileList_split(classpath, PATH_SEPARATOR); - FileList_expandWildcards(fl); - expanded = FileList_join(fl, PATH_SEPARATOR); - FileList_free(fl); - if (getenv("_JAVA_LAUNCHER_DEBUG") != 0) - printf("Expanded wildcards:\n" - " before: \"%s\"\n" - " after : \"%s\"\n", - classpath, expanded); - return expanded; -} - -#ifdef DEBUG_WILDCARD -static void -wildcardExpandArgv(const char ***argv) -{ - int i; - for (i = 0; (*argv)[i]; i++) { - if (equal((*argv)[i], "-cp") || - equal((*argv)[i], "-classpath")) { - i++; - (*argv)[i] = wildcardExpandClasspath((*argv)[i]); - } - } -} - -static void -debugPrintArgv(char *argv[]) -{ - int i; - putchar('['); - for (i = 0; argv[i]; i++) { - if (i > 0) printf(", "); - printf("\"%s\"", argv[i]); - } - printf("]\n"); -} - -int -main(int argc, char *argv[]) -{ - argv[0] = "java"; - wildcardExpandArgv((const char***)&argv); - debugPrintArgv(argv); - /* execvp("java", argv); */ - return 0; -} -#endif /* DEBUG_WILDCARD */ - -/* Cute little perl prototype implementation.... - -my $sep = ($^O =~ /^(Windows|cygwin)/) ? ";" : ":"; - -sub expand($) { - opendir DIR, $_[0] or return $_[0]; - join $sep, map {"$_[0]/$_"} grep {/\.(jar|JAR)$/} readdir DIR; -} - -sub munge($) { - join $sep, - map {(! -r $_ and s/[\/\\]+\*$//) ? expand $_ : $_} split $sep, $_[0]; -} - -for (my $i = 0; $i < @ARGV - 1; $i++) { - $ARGV[$i+1] = munge $ARGV[$i+1] if $ARGV[$i] =~ /^-c(p|lasspath)$/; -} - -$ENV{CLASSPATH} = munge $ENV{CLASSPATH} if exists $ENV{CLASSPATH}; -@ARGV = ("java", @ARGV); -print "@ARGV\n"; -exec @ARGV; - -*/ diff -r e0fb8a213650 -r 836a62f43af9 src/share/tools/launcher/wildcard.h --- a/src/share/tools/launcher/wildcard.h Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,34 +0,0 @@ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef WILDCARD_H_ -#define WILDCARD_H_ - -#ifdef EXPAND_CLASSPATH_WILDCARDS -const char *JLI_WildcardExpandClasspath(const char *classpath); -#else -#define JLI_WildcardExpandClasspath(s) (s) -#endif - -#endif /* include guard */ diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/adlc/archDesc.cpp --- a/src/share/vm/adlc/archDesc.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/adlc/archDesc.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -29,8 +29,8 @@ static FILE *errfile = stderr; //--------------------------- utility functions ----------------------------- -inline char toUpper(char lower) { - return (('a' <= lower && lower <= 'z') ? (lower + ('A'-'a')) : lower); +inline char toUpper(char lower) { + return (('a' <= lower && lower <= 'z') ? ((char) (lower + ('A'-'a'))) : lower); } char *toUpper(const char *str) { char *upper = new char[strlen(str)+1]; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/adlc/dict2.cpp --- a/src/share/vm/adlc/dict2.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/adlc/dict2.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -64,18 +64,18 @@ int i; // Precompute table of null character hashes - if( !initflag ) { // Not initializated yet? - xsum[0] = (1<Amalloc_4(sizeof(bucket)*_size); - memset(_bin,0,sizeof(bucket)*_size); + _bin = (bucket*)_arena->Amalloc_4(sizeof(bucket) * _size); + memset(_bin, 0, sizeof(bucket) * _size); } //------------------------------~Dict------------------------------------------ @@ -287,11 +287,11 @@ register int sum = 0; register const char *s = (const char *)t; - while( ((c = s[k]) != '\0') && (k < MAXID-1) ) { // Get characters till nul - c = (c<<1)+1; // Characters are always odd! - sum += c + (c<> 1); // Hash key, un-modulo'd table size } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/adlc/formssel.cpp --- a/src/share/vm/adlc/formssel.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/adlc/formssel.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -796,11 +796,11 @@ return num_opnds; } -const char *InstructForm::opnd_ident(int idx) { +const char* InstructForm::opnd_ident(int idx) { return _components.at(idx)->_name; } -const char *InstructForm::unique_opnd_ident(int idx) { +const char* InstructForm::unique_opnd_ident(uint idx) { uint i; for (i = 1; i < num_opnds(); ++i) { if (unique_opnds_idx(i) == idx) { @@ -1315,36 +1315,36 @@ // Seach through operands to determine parameters unique positions. void InstructForm::set_unique_opnds() { uint* uniq_idx = NULL; - int nopnds = num_opnds(); + uint nopnds = num_opnds(); uint num_uniq = nopnds; - int i; + uint i; _uniq_idx_length = 0; - if ( nopnds > 0 ) { + if (nopnds > 0) { // Allocate index array. Worst case we're mapping from each // component back to an index and any DEF always goes at 0 so the // length of the array has to be the number of components + 1. _uniq_idx_length = _components.count() + 1; - uniq_idx = (uint*) malloc(sizeof(uint)*(_uniq_idx_length)); - for( i = 0; i < _uniq_idx_length; i++ ) { + uniq_idx = (uint*) malloc(sizeof(uint) * _uniq_idx_length); + for (i = 0; i < _uniq_idx_length; i++) { uniq_idx[i] = i; } } // Do it only if there is a match rule and no expand rule. With an // expand rule it is done by creating new mach node in Expand() // method. - if ( nopnds > 0 && _matrule != NULL && _exprule == NULL ) { + if (nopnds > 0 && _matrule != NULL && _exprule == NULL) { const char *name; uint count; bool has_dupl_use = false; _parameters.reset(); - while( (name = _parameters.iter()) != NULL ) { + while ((name = _parameters.iter()) != NULL) { count = 0; - int position = 0; - int uniq_position = 0; + uint position = 0; + uint uniq_position = 0; _components.reset(); Component *comp = NULL; - if( sets_result() ) { + if (sets_result()) { comp = _components.iter(); position++; } @@ -1352,11 +1352,11 @@ for (; (comp = _components.iter()) != NULL; ++position) { // When the first component is not a DEF, // leave space for the result operand! - if ( position==0 && (! comp->isa(Component::DEF)) ) { + if (position==0 && (!comp->isa(Component::DEF))) { ++position; } - if( strcmp(name, comp->_name)==0 ) { - if( ++count > 1 ) { + if (strcmp(name, comp->_name) == 0) { + if (++count > 1) { assert(position < _uniq_idx_length, "out of bounds"); uniq_idx[position] = uniq_position; has_dupl_use = true; @@ -1364,22 +1364,25 @@ uniq_position = position; } } - if( comp->isa(Component::DEF) - && comp->isa(Component::USE) ) { + if (comp->isa(Component::DEF) && comp->isa(Component::USE)) { ++position; - if( position != 1 ) + if (position != 1) --position; // only use two slots for the 1st USE_DEF } } } - if( has_dupl_use ) { - for( i = 1; i < nopnds; i++ ) - if( i != uniq_idx[i] ) + if (has_dupl_use) { + for (i = 1; i < nopnds; i++) { + if (i != uniq_idx[i]) { break; - int j = i; - for( ; i < nopnds; i++ ) - if( i == uniq_idx[i] ) + } + } + uint j = i; + for (; i < nopnds; i++) { + if (i == uniq_idx[i]) { uniq_idx[i] = j++; + } + } num_uniq = j; } } @@ -2216,21 +2219,27 @@ bool OperandForm::is_bound_register() const { - RegClass *reg_class = get_RegClass(); - if (reg_class == NULL) return false; - - const char * name = ideal_type(globalAD->globalNames()); - if (name == NULL) return false; - - int size = 0; - if (strcmp(name,"RegFlags")==0) size = 1; - if (strcmp(name,"RegI")==0) size = 1; - if (strcmp(name,"RegF")==0) size = 1; - if (strcmp(name,"RegD")==0) size = 2; - if (strcmp(name,"RegL")==0) size = 2; - if (strcmp(name,"RegN")==0) size = 1; - if (strcmp(name,"RegP")==0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1; - if (size == 0) return false; + RegClass* reg_class = get_RegClass(); + if (reg_class == NULL) { + return false; + } + + const char* name = ideal_type(globalAD->globalNames()); + if (name == NULL) { + return false; + } + + uint size = 0; + if (strcmp(name, "RegFlags") == 0) size = 1; + if (strcmp(name, "RegI") == 0) size = 1; + if (strcmp(name, "RegF") == 0) size = 1; + if (strcmp(name, "RegD") == 0) size = 2; + if (strcmp(name, "RegL") == 0) size = 2; + if (strcmp(name, "RegN") == 0) size = 1; + if (strcmp(name, "RegP") == 0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1; + if (size == 0) { + return false; + } return size == reg_class->size(); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/adlc/formssel.hpp --- a/src/share/vm/adlc/formssel.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/adlc/formssel.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -106,7 +106,7 @@ const char *_ins_pipe; // Instruction Scheduling description class uint *_uniq_idx; // Indexes of unique operands - int _uniq_idx_length; // Length of _uniq_idx array + uint _uniq_idx_length; // Length of _uniq_idx array uint _num_uniq; // Number of unique operands ComponentList _components; // List of Components matches MachNode's // operand structure @@ -272,14 +272,14 @@ void set_unique_opnds(); uint num_unique_opnds() { return _num_uniq; } uint unique_opnds_idx(int idx) { - if( _uniq_idx != NULL && idx > 0 ) { - assert(idx < _uniq_idx_length, "out of bounds"); - return _uniq_idx[idx]; - } else { - return idx; - } + if (_uniq_idx != NULL && idx > 0) { + assert((uint)idx < _uniq_idx_length, "out of bounds"); + return _uniq_idx[idx]; + } else { + return idx; + } } - const char *unique_opnd_ident(int idx); // Name of operand at unique idx. + const char *unique_opnd_ident(uint idx); // Name of operand at unique idx. // Operands which are only KILLs aren't part of the input array and // require special handling in some cases. Their position in this diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/adlc/main.cpp --- a/src/share/vm/adlc/main.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/adlc/main.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -213,6 +213,7 @@ AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._HPP_file._name)); AD.addInclude(AD._CPP_file, "memory/allocation.inline.hpp"); AD.addInclude(AD._CPP_file, "asm/macroAssembler.inline.hpp"); + AD.addInclude(AD._CPP_file, "code/compiledIC.hpp"); AD.addInclude(AD._CPP_file, "code/vmreg.hpp"); AD.addInclude(AD._CPP_file, "gc_interface/collectedHeap.inline.hpp"); AD.addInclude(AD._CPP_file, "oops/compiledICHolder.hpp"); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/adlc/output_c.cpp --- a/src/share/vm/adlc/output_c.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/adlc/output_c.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -463,8 +463,9 @@ uint resources_used_exclusively = 0; for (pipeclass->_resUsage.reset(); - (piperesource = (const PipeClassResourceForm *)pipeclass->_resUsage.iter()) != NULL; ) + (piperesource = (const PipeClassResourceForm*)pipeclass->_resUsage.iter()) != NULL; ) { element_count++; + } // Pre-compute the string length int templen; @@ -482,8 +483,8 @@ for (i = rescount; i > 0; i /= 10) maskdigit++; - static const char * pipeline_use_cycle_mask = "Pipeline_Use_Cycle_Mask"; - static const char * pipeline_use_element = "Pipeline_Use_Element"; + static const char* pipeline_use_cycle_mask = "Pipeline_Use_Cycle_Mask"; + static const char* pipeline_use_element = "Pipeline_Use_Element"; templen = 1 + (int)(strlen(pipeline_use_cycle_mask) + (int)strlen(pipeline_use_element) + @@ -496,11 +497,12 @@ templen = 0; for (pipeclass->_resUsage.reset(); - (piperesource = (const PipeClassResourceForm *)pipeclass->_resUsage.iter()) != NULL; ) { + (piperesource = (const PipeClassResourceForm*)pipeclass->_resUsage.iter()) != NULL; ) { int used_mask = pipeline->_resdict[piperesource->_resource]->is_resource()->mask(); - if (!used_mask) + if (!used_mask) { fprintf(stderr, "*** used_mask is 0 ***\n"); + } resources_used |= used_mask; @@ -509,8 +511,9 @@ for (lb = 0; (used_mask & (1 << lb)) == 0; lb++); for (ub = 31; (used_mask & (1 << ub)) == 0; ub--); - if (lb == ub) + if (lb == ub) { resources_used_exclusively |= used_mask; + } int formatlen = sprintf(&resource_mask[templen], " %s(0x%0*x, %*d, %*d, %s %s(", @@ -526,7 +529,7 @@ int cycles = piperesource->_cycles; uint stage = pipeline->_stages.index(piperesource->_stage); - if (NameList::Not_in_list == stage) { + if ((uint)NameList::Not_in_list == stage) { fprintf(stderr, "pipeline_res_mask_initializer: " "semantic error: " @@ -534,8 +537,8 @@ piperesource->_stage); exit(1); } - uint upper_limit = stage+cycles-1; - uint lower_limit = stage-1; + uint upper_limit = stage + cycles - 1; + uint lower_limit = stage - 1; uint upper_idx = upper_limit >> 5; uint lower_idx = lower_limit >> 5; uint upper_position = upper_limit & 0x1f; @@ -543,7 +546,7 @@ uint mask = (((uint)1) << upper_position) - 1; - while ( upper_idx > lower_idx ) { + while (upper_idx > lower_idx) { res_mask[upper_idx--] |= mask; mask = (uint)-1; } @@ -565,8 +568,9 @@ } resource_mask[templen] = 0; - if (last_comma) + if (last_comma) { last_comma[0] = ' '; + } // See if the same string is in the table int ndx = pipeline_res_mask.index(resource_mask); @@ -580,7 +584,7 @@ fprintf(fp_cpp, "static const Pipeline_Use_Element pipeline_res_mask_%03d[%d] = {\n%s};\n\n", ndx+1, element_count, resource_mask); - char * args = new char [9 + 2*masklen + maskdigit]; + char* args = new char [9 + 2*masklen + maskdigit]; sprintf(args, "0x%0*x, 0x%0*x, %*d", masklen, resources_used, @@ -589,8 +593,9 @@ pipeline_res_args.addName(args); } - else + else { delete [] resource_mask; + } delete [] res_mask; //delete [] res_masks; @@ -1787,7 +1792,7 @@ // Skip first unique operands. for( i = 1; i < cur_num_opnds; i++ ) { comp = node->_components.iter(); - if( (int)i != node->unique_opnds_idx(i) ) { + if (i != node->unique_opnds_idx(i)) { break; } new_num_opnds++; @@ -1795,7 +1800,7 @@ // Replace not unique operands with next unique operands. for( ; i < cur_num_opnds; i++ ) { comp = node->_components.iter(); - int j = node->unique_opnds_idx(i); + uint j = node->unique_opnds_idx(i); // unique_opnds_idx(i) is unique if unique_opnds_idx(j) is not unique. if( j != node->unique_opnds_idx(j) ) { fprintf(fp," set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n", diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/asm/assembler.cpp --- a/src/share/vm/asm/assembler.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/asm/assembler.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -44,7 +44,7 @@ CodeSection* cs = code->insts(); cs->clear_mark(); // new assembler kills old mark if (cs->start() == NULL) { - vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s", + vm_exit_out_of_memory(0, OOM_MMAP_ERROR, err_msg("CodeCache: no room for %s", code->name())); } _code_section = cs; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/c1/c1_Compiler.cpp --- a/src/share/vm/c1/c1_Compiler.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/c1/c1_Compiler.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -77,30 +77,42 @@ } -BufferBlob* Compiler::build_buffer_blob() { +BufferBlob* Compiler::get_buffer_blob(ciEnv* env) { + // Allocate buffer blob once at startup since allocation for each + // compilation seems to be too expensive (at least on Intel win32). + BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob(); + if (buffer_blob != NULL) { + return buffer_blob; + } + // setup CodeBuffer. Preallocate a BufferBlob of size // NMethodSizeLimit plus some extra space for constants. int code_buffer_size = Compilation::desired_max_code_buffer_size() + Compilation::desired_max_constant_size(); - BufferBlob* blob = BufferBlob::create("Compiler1 temporary CodeBuffer", - code_buffer_size); - guarantee(blob != NULL, "must create initial code buffer"); - return blob; + + buffer_blob = BufferBlob::create("Compiler1 temporary CodeBuffer", + code_buffer_size); + if (buffer_blob == NULL) { + CompileBroker::handle_full_code_cache(); + env->record_failure("CodeCache is full"); + } else { + CompilerThread::current()->set_buffer_blob(buffer_blob); + } + + return buffer_blob; } void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci) { - // Allocate buffer blob once at startup since allocation for each - // compilation seems to be too expensive (at least on Intel win32). - BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob(); + BufferBlob* buffer_blob = Compiler::get_buffer_blob(env); if (buffer_blob == NULL) { - buffer_blob = build_buffer_blob(); - CompilerThread::current()->set_buffer_blob(buffer_blob); + return; } if (!is_initialized()) { initialize(); } + // invoke compilation { // We are nested here because we need for the destructor diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/c1/c1_Compiler.hpp --- a/src/share/vm/c1/c1_Compiler.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/c1/c1_Compiler.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -46,7 +46,7 @@ virtual bool is_c1() { return true; }; - BufferBlob* build_buffer_blob(); + BufferBlob* get_buffer_blob(ciEnv* env); // Missing feature tests virtual bool supports_native() { return true; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/c1/c1_LIRGenerator.cpp --- a/src/share/vm/c1/c1_LIRGenerator.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2232,6 +2232,7 @@ // We still need to continue with the checks. if (src.is_constant()) { ciObject* src_con = src.get_jobject_constant(); + guarantee(src_con != NULL, "no source constant"); if (src_con->is_null_object()) { // The constant src object is null - We can skip @@ -3044,21 +3045,20 @@ assert(level > CompLevel_simple, "Shouldn't be here"); int offset = -1; - LIR_Opr counter_holder = new_register(T_METADATA); - LIR_Opr meth; + LIR_Opr counter_holder; if (level == CompLevel_limited_profile) { - offset = in_bytes(backedge ? Method::backedge_counter_offset() : - Method::invocation_counter_offset()); - __ metadata2reg(method->constant_encoding(), counter_holder); - meth = counter_holder; + address counters_adr = method->ensure_method_counters(); + counter_holder = new_pointer_register(); + __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder); + offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() : + MethodCounters::invocation_counter_offset()); } else if (level == CompLevel_full_profile) { + counter_holder = new_register(T_METADATA); offset = in_bytes(backedge ? MethodData::backedge_counter_offset() : MethodData::invocation_counter_offset()); ciMethodData* md = method->method_data_or_null(); assert(md != NULL, "Sanity"); __ metadata2reg(md->constant_encoding(), counter_holder); - meth = new_register(T_METADATA); - __ metadata2reg(method->constant_encoding(), meth); } else { ShouldNotReachHere(); } @@ -3069,6 +3069,8 @@ __ store(result, counter); if (notify) { LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT); + LIR_Opr meth = new_register(T_METADATA); + __ metadata2reg(method->constant_encoding(), meth); __ logical_and(result, mask, result); __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0)); // The bci for info can point to cmp for if's we want the if bci diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/c1/c1_RangeCheckElimination.cpp --- a/src/share/vm/c1/c1_RangeCheckElimination.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/c1/c1_RangeCheckElimination.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -459,7 +459,7 @@ // Iterate over all different indices if (_optimistic) { - for (int i=0; iid()]; assert(info != NULL, "Info must not be null"); @@ -531,9 +531,7 @@ remove_range_check(ai); } } - _access_indexed_info[index_instruction->id()] = NULL; } - indices.clear(); if (list_constant.length() > 1) { AccessIndexed *first = list_constant.at(0); @@ -560,6 +558,13 @@ } } } + + // Clear data structures for next array + for (int i = 0; i < indices.length(); i++) { + Instruction *index_instruction = indices.at(i); + _access_indexed_info[index_instruction->id()] = NULL; + } + indices.clear(); } } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/c1/c1_Runtime1.cpp --- a/src/share/vm/c1/c1_Runtime1.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/c1/c1_Runtime1.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1261,7 +1261,7 @@ if (length == 0) return ac_ok; if (src->is_typeArray()) { - Klass* const klass_oop = src->klass(); + Klass* klass_oop = src->klass(); if (klass_oop != dst->klass()) return ac_failed; TypeArrayKlass* klass = TypeArrayKlass::cast(klass_oop); const int l2es = klass->log2_element_size(); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/ci/ciEnv.cpp --- a/src/share/vm/ci/ciEnv.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/ci/ciEnv.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -483,7 +483,8 @@ { // We have to lock the cpool to keep the oop from being resolved // while we are accessing it. - MonitorLockerEx ml(cpool->lock()); + oop cplock = cpool->lock(); + ObjectLocker ol(cplock, THREAD, cplock != NULL); constantTag tag = cpool->tag_at(index); if (tag.is_klass()) { // The klass has been inserted into the constant pool @@ -1149,23 +1150,9 @@ record_method_not_compilable("out of memory"); } -fileStream* ciEnv::_replay_data_stream = NULL; - -void ciEnv::dump_replay_data() { +void ciEnv::dump_replay_data(outputStream* out) { VM_ENTRY_MARK; MutexLocker ml(Compile_lock); - if (_replay_data_stream == NULL) { - _replay_data_stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(ReplayDataFile); - if (_replay_data_stream == NULL) { - fatal(err_msg("Can't open %s for replay data", ReplayDataFile)); - } - } - dump_replay_data(_replay_data_stream); -} - - -void ciEnv::dump_replay_data(outputStream* out) { - ASSERT_IN_VM; ResourceMark rm; #if INCLUDE_JVMTI out->print_cr("JvmtiExport can_access_local_variables %d", _jvmti_can_access_local_variables); @@ -1178,13 +1165,15 @@ for (int i = 0; i < objects->length(); i++) { objects->at(i)->dump_replay_data(out); } - Method* method = task()->method(); - int entry_bci = task()->osr_bci(); + CompileTask* task = this->task(); + Method* method = task->method(); + int entry_bci = task->osr_bci(); + int comp_level = task->comp_level(); // Klass holder = method->method_holder(); - out->print_cr("compile %s %s %s %d", + out->print_cr("compile %s %s %s %d %d", method->klass_name()->as_quoted_ascii(), method->name()->as_quoted_ascii(), method->signature()->as_quoted_ascii(), - entry_bci); + entry_bci, comp_level); out->flush(); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/ci/ciEnv.hpp --- a/src/share/vm/ci/ciEnv.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/ci/ciEnv.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -46,8 +46,6 @@ friend class CompileBroker; friend class Dependencies; // for get_object, during logging - static fileStream* _replay_data_stream; - private: Arena* _arena; // Alias for _ciEnv_arena except in init_shared_objects() Arena _ciEnv_arena; @@ -451,10 +449,6 @@ // RedefineClasses support void metadata_do(void f(Metadata*)) { _factory->metadata_do(f); } - // Dump the compilation replay data for this ciEnv to - // ReplayDataFile, creating the file if needed. - void dump_replay_data(); - // Dump the compilation replay data for the ciEnv to the stream. void dump_replay_data(outputStream* out); }; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/ci/ciInstanceKlass.cpp --- a/src/share/vm/ci/ciInstanceKlass.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/ci/ciInstanceKlass.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -211,13 +211,42 @@ // ------------------------------------------------------------------ // ciInstanceKlass::uses_default_loader -bool ciInstanceKlass::uses_default_loader() { +bool ciInstanceKlass::uses_default_loader() const { // Note: We do not need to resolve the handle or enter the VM // in order to test null-ness. return _loader == NULL; } // ------------------------------------------------------------------ + +/** + * Return basic type of boxed value for box klass or T_OBJECT if not. + */ +BasicType ciInstanceKlass::box_klass_type() const { + if (uses_default_loader() && is_loaded()) { + return SystemDictionary::box_klass_type(get_Klass()); + } else { + return T_OBJECT; + } +} + +/** + * Is this boxing klass? + */ +bool ciInstanceKlass::is_box_klass() const { + return is_java_primitive(box_klass_type()); +} + +/** + * Is this boxed value offset? + */ +bool ciInstanceKlass::is_boxed_value_offset(int offset) const { + BasicType bt = box_klass_type(); + return is_java_primitive(bt) && + (offset == java_lang_boxing_object::value_offset_in_bytes(bt)); +} + +// ------------------------------------------------------------------ // ciInstanceKlass::is_in_package // // Is this klass in the given package? diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/ci/ciInstanceKlass.hpp --- a/src/share/vm/ci/ciInstanceKlass.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/ci/ciInstanceKlass.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -217,10 +217,14 @@ ciInstanceKlass* implementor(); // Is the defining class loader of this class the default loader? - bool uses_default_loader(); + bool uses_default_loader() const; bool is_java_lang_Object() const; + BasicType box_klass_type() const; + bool is_box_klass() const; + bool is_boxed_value_offset(int offset) const; + // Is this klass in the given package? bool is_in_package(const char* packagename) { return is_in_package(packagename, (int) strlen(packagename)); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/ci/ciMethod.cpp --- a/src/share/vm/ci/ciMethod.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/ci/ciMethod.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -905,6 +905,20 @@ } // ------------------------------------------------------------------ +// ciMethod::ensure_method_counters +// +address ciMethod::ensure_method_counters() { + check_is_loaded(); + VM_ENTRY_MARK; + methodHandle mh(THREAD, get_Method()); + MethodCounters *counter = mh->method_counters(); + if (counter == NULL) { + counter = Method::build_method_counters(mh(), CHECK_AND_CLEAR_NULL); + } + return (address)counter; +} + +// ------------------------------------------------------------------ // ciMethod::should_exclude // // Should this method be excluded from compilation? @@ -1165,6 +1179,44 @@ bool ciMethod::is_accessor () const { FETCH_FLAG_FROM_VM(is_accessor); } bool ciMethod::is_initializer () const { FETCH_FLAG_FROM_VM(is_initializer); } +bool ciMethod::is_boxing_method() const { + if (holder()->is_box_klass()) { + switch (intrinsic_id()) { + case vmIntrinsics::_Boolean_valueOf: + case vmIntrinsics::_Byte_valueOf: + case vmIntrinsics::_Character_valueOf: + case vmIntrinsics::_Short_valueOf: + case vmIntrinsics::_Integer_valueOf: + case vmIntrinsics::_Long_valueOf: + case vmIntrinsics::_Float_valueOf: + case vmIntrinsics::_Double_valueOf: + return true; + default: + return false; + } + } + return false; +} + +bool ciMethod::is_unboxing_method() const { + if (holder()->is_box_klass()) { + switch (intrinsic_id()) { + case vmIntrinsics::_booleanValue: + case vmIntrinsics::_byteValue: + case vmIntrinsics::_charValue: + case vmIntrinsics::_shortValue: + case vmIntrinsics::_intValue: + case vmIntrinsics::_longValue: + case vmIntrinsics::_floatValue: + case vmIntrinsics::_doubleValue: + return true; + default: + return false; + } + } + return false; +} + BCEscapeAnalyzer *ciMethod::get_bcea() { #ifdef COMPILER2 if (_bcea == NULL) { @@ -1191,13 +1243,14 @@ ASSERT_IN_VM; ResourceMark rm; Method* method = get_Method(); + MethodCounters* mcs = method->method_counters(); Klass* holder = method->method_holder(); st->print_cr("ciMethod %s %s %s %d %d %d %d %d", holder->name()->as_quoted_ascii(), method->name()->as_quoted_ascii(), method->signature()->as_quoted_ascii(), - method->invocation_counter()->raw_counter(), - method->backedge_counter()->raw_counter(), + mcs == NULL ? 0 : mcs->invocation_counter()->raw_counter(), + mcs == NULL ? 0 : mcs->backedge_counter()->raw_counter(), interpreter_invocation_count(), interpreter_throwout_count(), _instructions_size); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/ci/ciMethod.hpp --- a/src/share/vm/ci/ciMethod.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/ci/ciMethod.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -196,7 +196,6 @@ // Analysis and profiling. // // Usage note: liveness_at_bci and init_vars should be wrapped in ResourceMarks. - bool uses_monitors() const { return _uses_monitors; } // this one should go away, it has a misleading name bool has_monitor_bytecodes() const { return _uses_monitors; } bool has_balanced_monitors(); @@ -262,6 +261,7 @@ bool is_klass_loaded(int refinfo_index, bool must_be_resolved) const; bool check_call(int refinfo_index, bool is_static) const; bool ensure_method_data(); // make sure it exists in the VM also + address ensure_method_counters(); int instructions_size(); int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC @@ -298,6 +298,8 @@ bool is_initializer () const; bool can_be_statically_bound() const { return _can_be_statically_bound; } void dump_replay_data(outputStream* st); + bool is_boxing_method() const; + bool is_unboxing_method() const; // Print the bytecodes of this method. void print_codes_on(outputStream* st); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/ci/ciReplay.cpp --- a/src/share/vm/ci/ciReplay.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/ci/ciReplay.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -89,7 +89,7 @@ loader = Handle(thread, SystemDictionary::java_system_loader()); stream = fopen(filename, "rt"); if (stream == NULL) { - fprintf(stderr, "Can't open replay file %s\n", filename); + fprintf(stderr, "ERROR: Can't open replay file %s\n", filename); } buffer_length = 32; buffer = NEW_RESOURCE_ARRAY(char, buffer_length); @@ -327,7 +327,6 @@ if (had_error()) { tty->print_cr("Error while parsing line %d: %s\n", line_no, _error_message); tty->print_cr("%s", buffer); - assert(false, "error"); return; } pos = 0; @@ -370,11 +369,47 @@ } } - // compile + // validation of comp_level + bool is_valid_comp_level(int comp_level) { + const int msg_len = 256; + char* msg = NULL; + if (!is_compile(comp_level)) { + msg = NEW_RESOURCE_ARRAY(char, msg_len); + jio_snprintf(msg, msg_len, "%d isn't compilation level", comp_level); + } else if (!TieredCompilation && (comp_level != CompLevel_highest_tier)) { + msg = NEW_RESOURCE_ARRAY(char, msg_len); + switch (comp_level) { + case CompLevel_simple: + jio_snprintf(msg, msg_len, "compilation level %d requires Client VM or TieredCompilation", comp_level); + break; + case CompLevel_full_optimization: + jio_snprintf(msg, msg_len, "compilation level %d requires Server VM", comp_level); + break; + default: + jio_snprintf(msg, msg_len, "compilation level %d requires TieredCompilation", comp_level); + } + } + if (msg != NULL) { + report_error(msg); + return false; + } + return true; + } + + // compile void process_compile(TRAPS) { // methodHandle method; Method* method = parse_method(CHECK); int entry_bci = parse_int("entry_bci"); + const char* comp_level_label = "comp_level"; + int comp_level = parse_int(comp_level_label); + // old version w/o comp_level + if (had_error() && (error_message() == comp_level_label)) { + comp_level = CompLevel_full_optimization; + } + if (!is_valid_comp_level(comp_level)) { + return; + } Klass* k = method->method_holder(); ((InstanceKlass*)k)->initialize(THREAD); if (HAS_PENDING_EXCEPTION) { @@ -389,12 +424,12 @@ } } // Make sure the existence of a prior compile doesn't stop this one - nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, CompLevel_full_optimization, true) : method->code(); + nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code(); if (nm != NULL) { nm->make_not_entrant(); } replay_state = this; - CompileBroker::compile_method(method, entry_bci, CompLevel_full_optimization, + CompileBroker::compile_method(method, entry_bci, comp_level, methodHandle(), 0, "replay", THREAD); replay_state = NULL; reset(); @@ -457,7 +492,9 @@ } Klass* k = parse_klass(CHECK); rec->oops_offsets[i] = offset; - rec->oops_handles[i] = (jobject)(new KlassHandle(THREAD, k)); + KlassHandle *kh = NEW_C_HEAP_OBJ(KlassHandle, mtCompiler); + ::new ((void*)kh) KlassHandle(THREAD, k); + rec->oops_handles[i] = (jobject)kh; } } @@ -551,7 +588,7 @@ if (parsed_two_word == i) continue; default: - ShouldNotReachHere(); + fatal(err_msg_res("Unexpected tag: %d", cp->tag_at(i).value())); break; } @@ -819,6 +856,11 @@ ReplaySuppressInitializers = 1; } + if (FLAG_IS_DEFAULT(ReplayDataFile)) { + tty->print_cr("ERROR: no compiler replay data file specified (use -XX:ReplayDataFile=replay_pid12345.txt)."); + return 1; + } + // Load and parse the replay data CompileReplay rp(ReplayDataFile, THREAD); int exit_code = 0; @@ -920,12 +962,17 @@ method->print_name(tty); tty->cr(); } else { + EXCEPTION_CONTEXT; + MethodCounters* mcs = method->method_counters(); // m->_instructions_size = rec->instructions_size; m->_instructions_size = -1; m->_interpreter_invocation_count = rec->interpreter_invocation_count; m->_interpreter_throwout_count = rec->interpreter_throwout_count; - method->invocation_counter()->_counter = rec->invocation_counter; - method->backedge_counter()->_counter = rec->backedge_counter; + if (mcs == NULL) { + mcs = Method::build_method_counters(method, CHECK_AND_CLEAR); + } + mcs->invocation_counter()->_counter = rec->invocation_counter; + mcs->backedge_counter()->_counter = rec->backedge_counter; } } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/altHashing.cpp --- a/src/share/vm/classfile/altHashing.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/altHashing.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -242,8 +242,8 @@ void AltHashing::testMurmur3_32_ByteArray() { // printf("testMurmur3_32_ByteArray\n"); - jbyte* vector = new jbyte[256]; - jbyte* hashes = new jbyte[4 * 256]; + jbyte vector[256]; + jbyte hashes[4 * 256]; for (int i = 0; i < 256; i++) { vector[i] = (jbyte) i; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/bytecodeAssembler.cpp --- a/src/share/vm/classfile/bytecodeAssembler.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/bytecodeAssembler.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -75,8 +75,8 @@ int idx = i + _orig->length(); switch (entry._tag) { case BytecodeCPEntry::UTF8: + entry._u.utf8->increment_refcount(); cp->symbol_at_put(idx, entry._u.utf8); - entry._u.utf8->increment_refcount(); break; case BytecodeCPEntry::KLASS: cp->unresolved_klass_at_put( diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/classFileParser.cpp --- a/src/share/vm/classfile/classFileParser.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/classFileParser.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -39,6 +39,7 @@ #include "memory/gcLocker.hpp" #include "memory/metadataFactory.hpp" #include "memory/oopFactory.hpp" +#include "memory/referenceType.hpp" #include "memory/universe.inline.hpp" #include "oops/constantPool.hpp" #include "oops/fieldStreams.hpp" @@ -436,14 +437,19 @@ ref_index, CHECK_(nullHandle)); break; case JVM_REF_invokeVirtual: - case JVM_REF_invokeStatic: - case JVM_REF_invokeSpecial: case JVM_REF_newInvokeSpecial: check_property( tag.is_method(), "Invalid constant pool index %u in class file %s (not a method)", ref_index, CHECK_(nullHandle)); break; + case JVM_REF_invokeStatic: + case JVM_REF_invokeSpecial: + check_property(tag.is_method() || + ((_major_version >= JAVA_8_VERSION) && tag.is_interface_method()), + "Invalid constant pool index %u in class file %s (not a method)", + ref_index, CHECK_(nullHandle)); + break; case JVM_REF_invokeInterface: check_property( tag.is_interface_method(), @@ -1714,15 +1720,28 @@ coll->set_annotation(id); if (id == AnnotationCollector::_sun_misc_Contended) { + // @Contended can optionally specify the contention group. + // + // Contended group defines the equivalence class over the fields: + // the fields within the same contended group are not treated distinct. + // The only exception is default group, which does not incur the + // equivalence. Naturally, contention group for classes is meaningless. + // + // While the contention group is specified as String, annotation + // values are already interned, and we might as well use the constant + // pool index as the group tag. + // + u2 group_index = 0; // default contended group if (count == 1 && s_size == (index - index0) // match size && s_tag_val == *(abase + tag_off) && member == vmSymbols::value_name()) { - u2 group_index = Bytes::get_Java_u2(abase + s_con_off); - coll->set_contended_group(group_index); - } else { - coll->set_contended_group(0); // default contended group + group_index = Bytes::get_Java_u2(abase + s_con_off); + if (_cp->symbol_at(group_index)->utf8_length() == 0) { + group_index = 0; // default contended group + } } + coll->set_contended_group(group_index); } } } @@ -2022,7 +2041,6 @@ u2 method_parameters_length = 0; u1* method_parameters_data = NULL; bool method_parameters_seen = false; - bool method_parameters_four_byte_flags; bool parsed_code_attribute = false; bool parsed_checked_exceptions_attribute = false; bool parsed_stackmap_attribute = false; @@ -2236,26 +2254,14 @@ } method_parameters_seen = true; method_parameters_length = cfs->get_u1_fast(); - // Track the actual size (note: this is written for clarity; a - // decent compiler will CSE and constant-fold this into a single - // expression) - // Use the attribute length to figure out the size of flags - if (method_attribute_length == (method_parameters_length * 6u) + 1u) { - method_parameters_four_byte_flags = true; - } else if (method_attribute_length == (method_parameters_length * 4u) + 1u) { - method_parameters_four_byte_flags = false; - } else { + if (method_attribute_length != (method_parameters_length * 4u) + 1u) { classfile_parse_error( "Invalid MethodParameters method attribute length %u in class file", method_attribute_length, CHECK_(nullHandle)); } method_parameters_data = cfs->get_u1_buffer(); cfs->skip_u2_fast(method_parameters_length); - if (method_parameters_four_byte_flags) { - cfs->skip_u4_fast(method_parameters_length); - } else { - cfs->skip_u2_fast(method_parameters_length); - } + cfs->skip_u2_fast(method_parameters_length); // ignore this attribute if it cannot be reflected if (!SystemDictionary::Parameter_klass_loaded()) method_parameters_length = 0; @@ -2418,13 +2424,8 @@ for (int i = 0; i < method_parameters_length; i++) { elem[i].name_cp_index = Bytes::get_Java_u2(method_parameters_data); method_parameters_data += 2; - if (method_parameters_four_byte_flags) { - elem[i].flags = Bytes::get_Java_u4(method_parameters_data); - method_parameters_data += 4; - } else { - elem[i].flags = Bytes::get_Java_u2(method_parameters_data); - method_parameters_data += 2; - } + elem[i].flags = Bytes::get_Java_u2(method_parameters_data); + method_parameters_data += 2; } } @@ -3066,7 +3067,7 @@ } } } -#endif // ASSERT +#endif // def ASSERT instanceKlassHandle ClassFileParser::parse_super_class(int super_class_index, @@ -3121,15 +3122,8 @@ FieldLayoutInfo* info, TRAPS) { - // get the padding width from the option - // TODO: Ask VM about specific CPU we are running on - int pad_size = ContendedPaddingWidth; - // Field size and offset computation int nonstatic_field_size = _super_klass() == NULL ? 0 : _super_klass()->nonstatic_field_size(); -#ifndef PRODUCT - int orig_nonstatic_field_size = 0; -#endif int next_static_oop_offset; int next_static_double_offset; int next_static_word_offset; @@ -3140,13 +3134,14 @@ int next_nonstatic_word_offset; int next_nonstatic_short_offset; int next_nonstatic_byte_offset; - int next_nonstatic_type_offset; int first_nonstatic_oop_offset; - int first_nonstatic_field_offset; int next_nonstatic_field_offset; int next_nonstatic_padded_offset; // Count the contended fields by type. + // + // We ignore static fields, because @Contended is not supported for them. + // The layout code below will also ignore the static fields. int nonstatic_contended_count = 0; FieldAllocationCount fac_contended; for (AllFieldStream fs(_fields, _cp); !fs.done(); fs.next()) { @@ -3158,7 +3153,6 @@ } } } - int contended_count = nonstatic_contended_count; // Calculate the starting byte offsets @@ -3178,61 +3172,60 @@ next_static_byte_offset = next_static_short_offset + ((fac->count[STATIC_SHORT]) * BytesPerShort); - first_nonstatic_field_offset = instanceOopDesc::base_offset_in_bytes() + - nonstatic_field_size * heapOopSize; - - // class is contended, pad before all the fields - if (parsed_annotations->is_contended()) { - first_nonstatic_field_offset += pad_size; + int nonstatic_fields_start = instanceOopDesc::base_offset_in_bytes() + + nonstatic_field_size * heapOopSize; + + next_nonstatic_field_offset = nonstatic_fields_start; + + bool is_contended_class = parsed_annotations->is_contended(); + + // Class is contended, pad before all the fields + if (is_contended_class) { + next_nonstatic_field_offset += ContendedPaddingWidth; } - next_nonstatic_field_offset = first_nonstatic_field_offset; - + // Compute the non-contended fields count. + // The packing code below relies on these counts to determine if some field + // can be squeezed into the alignment gap. Contended fields are obviously + // exempt from that. unsigned int nonstatic_double_count = fac->count[NONSTATIC_DOUBLE] - fac_contended.count[NONSTATIC_DOUBLE]; unsigned int nonstatic_word_count = fac->count[NONSTATIC_WORD] - fac_contended.count[NONSTATIC_WORD]; unsigned int nonstatic_short_count = fac->count[NONSTATIC_SHORT] - fac_contended.count[NONSTATIC_SHORT]; unsigned int nonstatic_byte_count = fac->count[NONSTATIC_BYTE] - fac_contended.count[NONSTATIC_BYTE]; unsigned int nonstatic_oop_count = fac->count[NONSTATIC_OOP] - fac_contended.count[NONSTATIC_OOP]; + // Total non-static fields count, including every contended field + unsigned int nonstatic_fields_count = fac->count[NONSTATIC_DOUBLE] + fac->count[NONSTATIC_WORD] + + fac->count[NONSTATIC_SHORT] + fac->count[NONSTATIC_BYTE] + + fac->count[NONSTATIC_OOP]; + bool super_has_nonstatic_fields = (_super_klass() != NULL && _super_klass->has_nonstatic_fields()); - bool has_nonstatic_fields = super_has_nonstatic_fields || - ((nonstatic_double_count + nonstatic_word_count + - nonstatic_short_count + nonstatic_byte_count + - nonstatic_oop_count) != 0); + bool has_nonstatic_fields = super_has_nonstatic_fields || (nonstatic_fields_count != 0); // Prepare list of oops for oop map generation. + // + // "offset" and "count" lists are describing the set of contiguous oop + // regions. offset[i] is the start of the i-th region, which then has + // count[i] oops following. Before we know how many regions are required, + // we pessimistically allocate the maps to fit all the oops into the + // distinct regions. + // + // TODO: We add +1 to always allocate non-zero resource arrays; we need + // to figure out if we still need to do this. int* nonstatic_oop_offsets; unsigned int* nonstatic_oop_counts; unsigned int nonstatic_oop_map_count = 0; + unsigned int max_nonstatic_oop_maps = fac->count[NONSTATIC_OOP] + 1; nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD( - THREAD, int, nonstatic_oop_count + 1); + THREAD, int, max_nonstatic_oop_maps); nonstatic_oop_counts = NEW_RESOURCE_ARRAY_IN_THREAD( - THREAD, unsigned int, nonstatic_oop_count + 1); + THREAD, unsigned int, max_nonstatic_oop_maps); first_nonstatic_oop_offset = 0; // will be set for first oop field -#ifndef PRODUCT - if( PrintCompactFieldsSavings ) { - next_nonstatic_double_offset = next_nonstatic_field_offset + - (nonstatic_oop_count * heapOopSize); - if ( nonstatic_double_count > 0 ) { - next_nonstatic_double_offset = align_size_up(next_nonstatic_double_offset, BytesPerLong); - } - next_nonstatic_word_offset = next_nonstatic_double_offset + - (nonstatic_double_count * BytesPerLong); - next_nonstatic_short_offset = next_nonstatic_word_offset + - (nonstatic_word_count * BytesPerInt); - next_nonstatic_byte_offset = next_nonstatic_short_offset + - (nonstatic_short_count * BytesPerShort); - next_nonstatic_type_offset = align_size_up((next_nonstatic_byte_offset + - nonstatic_byte_count ), heapOopSize ); - orig_nonstatic_field_size = nonstatic_field_size + - ((next_nonstatic_type_offset - first_nonstatic_field_offset)/heapOopSize); - } -#endif bool compact_fields = CompactFields; int allocation_style = FieldsAllocationStyle; if( allocation_style < 0 || allocation_style > 2 ) { // Out of range? @@ -3264,6 +3257,7 @@ compact_fields = false; // Don't compact fields } + // Rearrange fields for a given allocation style if( allocation_style == 0 ) { // Fields order: oops, longs/doubles, ints, shorts/chars, bytes, padded fields next_nonstatic_oop_offset = next_nonstatic_field_offset; @@ -3304,6 +3298,8 @@ int nonstatic_short_space_offset; int nonstatic_byte_space_offset; + // Try to squeeze some of the fields into the gaps due to + // long/double alignment. if( nonstatic_double_count > 0 ) { int offset = next_nonstatic_double_offset; next_nonstatic_double_offset = align_size_up(offset, BytesPerLong); @@ -3413,9 +3409,11 @@ int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) * heapOopSize ) { // Extend current oop map + assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check"); nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1; } else { // Create new oop map + assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check"); nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset; nonstatic_oop_counts [nonstatic_oop_map_count] = 1; nonstatic_oop_map_count += 1; @@ -3473,12 +3471,10 @@ // // Additionally, this should not break alignment for the fields, so we round the alignment up // for each field. - if (contended_count > 0) { + if (nonstatic_contended_count > 0) { // if there is at least one contended field, we need to have pre-padding for them - if (nonstatic_contended_count > 0) { - next_nonstatic_padded_offset += pad_size; - } + next_nonstatic_padded_offset += ContendedPaddingWidth; // collect all contended groups BitMap bm(_cp->size()); @@ -3539,6 +3535,7 @@ next_nonstatic_padded_offset += heapOopSize; // Create new oop map + assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check"); nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset; nonstatic_oop_counts [nonstatic_oop_map_count] = 1; nonstatic_oop_map_count += 1; @@ -3556,7 +3553,7 @@ // the fields within the same contended group are not inter-padded. // The only exception is default group, which does not incur the // equivalence, and so requires intra-padding. - next_nonstatic_padded_offset += pad_size; + next_nonstatic_padded_offset += ContendedPaddingWidth; } fs.set_offset(real_offset); @@ -3568,67 +3565,59 @@ // subclass fields and/or adjacent object. // If this was the default group, the padding is already in place. if (current_group != 0) { - next_nonstatic_padded_offset += pad_size; + next_nonstatic_padded_offset += ContendedPaddingWidth; } } // handle static fields } - // Size of instances - int notaligned_offset = next_nonstatic_padded_offset; - // Entire class is contended, pad in the back. // This helps to alleviate memory contention effects for subclass fields // and/or adjacent object. - if (parsed_annotations->is_contended()) { - notaligned_offset += pad_size; + if (is_contended_class) { + next_nonstatic_padded_offset += ContendedPaddingWidth; } - int next_static_type_offset = align_size_up(next_static_byte_offset, wordSize); - int static_field_size = (next_static_type_offset - - InstanceMirrorKlass::offset_of_static_fields()) / wordSize; - - next_nonstatic_type_offset = align_size_up(notaligned_offset, heapOopSize ); - nonstatic_field_size = nonstatic_field_size + ((next_nonstatic_type_offset - - first_nonstatic_field_offset)/heapOopSize); - - next_nonstatic_type_offset = align_size_up(notaligned_offset, wordSize ); - int instance_size = align_object_size(next_nonstatic_type_offset / wordSize); + int notaligned_nonstatic_fields_end = next_nonstatic_padded_offset; + + int nonstatic_fields_end = align_size_up(notaligned_nonstatic_fields_end, heapOopSize); + int instance_end = align_size_up(notaligned_nonstatic_fields_end, wordSize); + int static_fields_end = align_size_up(next_static_byte_offset, wordSize); + + int static_field_size = (static_fields_end - + InstanceMirrorKlass::offset_of_static_fields()) / wordSize; + nonstatic_field_size = nonstatic_field_size + + (nonstatic_fields_end - nonstatic_fields_start) / heapOopSize; + + int instance_size = align_object_size(instance_end / wordSize); assert(instance_size == align_object_size(align_size_up( - (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize + ((parsed_annotations->is_contended()) ? pad_size : 0)), + (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize), wordSize) / wordSize), "consistent layout helper value"); + // Invariant: nonstatic_field end/start should only change if there are + // nonstatic fields in the class, or if the class is contended. We compare + // against the non-aligned value, so that end alignment will not fail the + // assert without actually having the fields. + assert((notaligned_nonstatic_fields_end == nonstatic_fields_start) || + is_contended_class || + (nonstatic_fields_count > 0), "double-check nonstatic start/end"); + // Number of non-static oop map blocks allocated at end of klass. const unsigned int total_oop_map_count = compute_oop_map_count(_super_klass, nonstatic_oop_map_count, first_nonstatic_oop_offset); #ifndef PRODUCT - if( PrintCompactFieldsSavings ) { - ResourceMark rm; - if( nonstatic_field_size < orig_nonstatic_field_size ) { - tty->print("[Saved %d of %d bytes in %s]\n", - (orig_nonstatic_field_size - nonstatic_field_size)*heapOopSize, - orig_nonstatic_field_size*heapOopSize, - _class_name); - } else if( nonstatic_field_size > orig_nonstatic_field_size ) { - tty->print("[Wasted %d over %d bytes in %s]\n", - (nonstatic_field_size - orig_nonstatic_field_size)*heapOopSize, - orig_nonstatic_field_size*heapOopSize, - _class_name); - } - } - if (PrintFieldLayout) { print_field_layout(_class_name, _fields, _cp, instance_size, - first_nonstatic_field_offset, - next_nonstatic_field_offset, - next_static_type_offset); + nonstatic_fields_start, + nonstatic_fields_end, + static_fields_end); } #endif @@ -3837,7 +3826,7 @@ } if (TraceClassLoadingPreorder) { - tty->print("[Loading %s", name->as_klass_external_name()); + tty->print("[Loading %s", (name != NULL) ? name->as_klass_external_name() : "NoName"); if (cfs->source() != NULL) tty->print(" from %s", cfs->source()); tty->print_cr("]"); } @@ -4076,6 +4065,9 @@ } } + // Allocate mirror and initialize static fields + java_lang_Class::create_mirror(this_klass, protection_domain, CHECK_(nullHandle)); + #ifdef ASSERT if (ParseAllGenericSignatures) { @@ -4091,17 +4083,6 @@ this_klass(), &all_mirandas, CHECK_(nullHandle)); } - // Allocate mirror and initialize static fields - java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle)); - - // Allocate a simple java object for locking during class initialization. - // This needs to be a java object because it can be held across a java call. - typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_NULL); - this_klass->set_init_lock(r); - - // TODO: Move these oops to the mirror - this_klass->set_protection_domain(protection_domain()); - // Update the loader_data graph. record_defined_class_dependencies(this_klass, CHECK_NULL); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/classFileParser.hpp --- a/src/share/vm/classfile/classFileParser.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/classFileParser.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -304,7 +304,19 @@ inline void assert_property(bool b, const char* msg, TRAPS) { #ifdef ASSERT - if (!b) { fatal(msg); } + if (!b) { + ResourceMark rm(THREAD); + fatal(err_msg(msg, _class_name->as_C_string())); + } +#endif + } + + inline void assert_property(bool b, const char* msg, int index, TRAPS) { +#ifdef ASSERT + if (!b) { + ResourceMark rm(THREAD); + fatal(err_msg(msg, index, _class_name->as_C_string())); + } #endif } @@ -312,7 +324,7 @@ if (_need_verify) { guarantee_property(property, msg, index, CHECK); } else { - assert_property(property, msg, CHECK); + assert_property(property, msg, index, CHECK); } } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/classLoader.cpp --- a/src/share/vm/classfile/classLoader.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/classLoader.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1345,9 +1345,10 @@ tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_class_counter, buffer); // Preload all classes to get around uncommon traps // Iterate over all methods in class + int comp_level = CompilationPolicy::policy()->initial_compile_level(); for (int n = 0; n < k->methods()->length(); n++) { methodHandle m (THREAD, k->methods()->at(n)); - if (CompilationPolicy::can_be_compiled(m)) { + if (CompilationPolicy::can_be_compiled(m, comp_level)) { if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) { // Give sweeper a chance to keep up with CTW @@ -1356,7 +1357,7 @@ _codecache_sweep_counter = 0; } // Force compilation - CompileBroker::compile_method(m, InvocationEntryBci, CompilationPolicy::policy()->initial_compile_level(), + CompileBroker::compile_method(m, InvocationEntryBci, comp_level, methodHandle(), 0, "CTW", THREAD); if (HAS_PENDING_EXCEPTION) { clear_pending_exception_if_not_oom(CHECK); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/classLoaderData.cpp --- a/src/share/vm/classfile/classLoaderData.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/classLoaderData.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -53,6 +53,7 @@ #include "classfile/metadataOnStackMark.hpp" #include "classfile/systemDictionary.hpp" #include "code/codeCache.hpp" +#include "memory/gcLocker.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspaceShared.hpp" #include "memory/oopFactory.hpp" @@ -63,19 +64,26 @@ #include "utilities/growableArray.hpp" #include "utilities/ostream.hpp" +#if INCLUDE_TRACE + #include "trace/tracing.hpp" +#endif + + ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL; -ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) : +ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) : _class_loader(h_class_loader()), _is_anonymous(is_anonymous), _keep_alive(is_anonymous), // initially _metaspace(NULL), _unloading(false), _klasses(NULL), _claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL), - _next(NULL), _dependencies(), + _next(NULL), _dependencies(dependencies), _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) { // empty } void ClassLoaderData::init_dependencies(TRAPS) { + assert(!Universe::is_fully_initialized(), "should only be called when initializing"); + assert(is_the_null_class_loader_data(), "should only call this for the null class loader"); _dependencies.init(CHECK); } @@ -117,6 +125,12 @@ } } +void ClassLoaderData::classes_do(void f(Klass * const)) { + for (Klass* k = _klasses; k != NULL; k = k->next_link()) { + f(k); + } +} + void ClassLoaderData::classes_do(void f(InstanceKlass*)) { for (Klass* k = _klasses; k != NULL; k = k->next_link()) { if (k->oop_is_instance()) { @@ -277,6 +291,9 @@ void ClassLoaderData::unload() { _unloading = true; + // Tell serviceability tools these classes are unloading + classes_do(InstanceKlass::notify_unload_class); + if (TraceClassLoaderData) { ResourceMark rm; tty->print("[ClassLoaderData: unload loader data "PTR_FORMAT, this); @@ -300,6 +317,9 @@ ClassLoaderData::~ClassLoaderData() { + // Release C heap structures for all the classes. + classes_do(InstanceKlass::release_C_heap_structures); + Metaspace *m = _metaspace; if (m != NULL) { _metaspace = NULL; @@ -423,7 +443,7 @@ // These anonymous class loaders are to contain classes used for JSR292 ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) { // Add a new class loader data to the graph. - return ClassLoaderDataGraph::add(NULL, loader, CHECK_NULL); + return ClassLoaderDataGraph::add(loader, true, CHECK_NULL); } const char* ClassLoaderData::loader_name() { @@ -495,19 +515,22 @@ ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL; ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL; - // Add a new class loader data node to the list. Assign the newly created // ClassLoaderData into the java/lang/ClassLoader object as a hidden field -ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle loader, TRAPS) { - // Not assigned a class loader data yet. - // Create one. - ClassLoaderData* *list_head = &_head; - ClassLoaderData* next = _head; +ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRAPS) { + // We need to allocate all the oops for the ClassLoaderData before allocating the + // actual ClassLoaderData object. + ClassLoaderData::Dependencies dependencies(CHECK_NULL); - bool is_anonymous = (cld_addr == NULL); - ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous); + No_Safepoint_Verifier no_safepoints; // we mustn't GC until we've installed the + // ClassLoaderData in the graph since the CLD + // contains unhandled oops - if (cld_addr != NULL) { + ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous, dependencies); + + + if (!is_anonymous) { + ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader()); // First, Atomically set it ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL); if (old != NULL) { @@ -519,6 +542,9 @@ // We won the race, and therefore the task of adding the data to the list of // class loader data + ClassLoaderData** list_head = &_head; + ClassLoaderData* next = _head; + do { cld->set_next(next); ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next); @@ -531,10 +557,6 @@ cld->loader_name()); tty->print_cr("]"); } - // Create dependencies after the CLD is added to the list. Otherwise, - // the GC GC will not find the CLD and the _class_loader field will - // not be updated. - cld->init_dependencies(CHECK_NULL); return cld; } next = exchanged; @@ -572,6 +594,19 @@ } } +void ClassLoaderDataGraph::classes_do(void f(Klass* const)) { + for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) { + cld->classes_do(f); + } +} + +void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) { + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); + for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) { + cld->classes_do(f); + } +} + GrowableArray* ClassLoaderDataGraph::new_clds() { assert(_head == NULL || _saved_head != NULL, "remember_new_clds(true) not called?"); @@ -665,6 +700,8 @@ dead->unload(); data = data->next(); // Remove from loader list. + // This class loader data will no longer be found + // in the ClassLoaderDataGraph. if (prev != NULL) { prev->set_next(data); } else { @@ -674,6 +711,11 @@ dead->set_next(_unloading); _unloading = dead; } + + if (seen_dead_loader) { + post_class_unload_events(); + } + return seen_dead_loader; } @@ -686,6 +728,21 @@ next = purge_me->next(); delete purge_me; } + Metaspace::purge(); +} + +void ClassLoaderDataGraph::post_class_unload_events(void) { +#if INCLUDE_TRACE + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); + if (Tracing::enabled()) { + if (Tracing::is_event_enabled(TraceClassUnloadEvent)) { + assert(_unloading != NULL, "need class loader data unload list!"); + _class_unload_time = Tracing::time(); + classes_unloading_do(&class_unload_event); + } + Tracing::on_unloading_classes(); + } +#endif } // CDS support @@ -755,3 +812,21 @@ class_loader()->print_value_on(out); } } + +#if INCLUDE_TRACE + +TracingTime ClassLoaderDataGraph::_class_unload_time; + +void ClassLoaderDataGraph::class_unload_event(Klass* const k) { + + // post class unload event + EventClassUnload event(UNTIMED); + event.set_endtime(_class_unload_time); + event.set_unloadedClass(k); + oop defining_class_loader = k->class_loader(); + event.set_definingClassLoader(defining_class_loader != NULL ? + defining_class_loader->klass() : (Klass*)NULL); + event.commit(); +} + +#endif /* INCLUDE_TRACE */ diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/classLoaderData.hpp --- a/src/share/vm/classfile/classLoaderData.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/classLoaderData.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -32,6 +32,10 @@ #include "runtime/mutex.hpp" #include "utilities/growableArray.hpp" +#if INCLUDE_TRACE +# include "trace/traceTime.hpp" +#endif + // // A class loader represents a linkset. Conceptually, a linkset identifies // the complete transitive closure of resolved links that a dynamic linker can @@ -49,6 +53,7 @@ class JNIMethodBlock; class JNIHandleBlock; class Metadebug; + // GC root for walking class loader data created class ClassLoaderDataGraph : public AllStatic { @@ -62,7 +67,8 @@ // CMS support. static ClassLoaderData* _saved_head; - static ClassLoaderData* add(ClassLoaderData** loader_data_addr, Handle class_loader, TRAPS); + static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS); + static void post_class_unload_events(void); public: static ClassLoaderData* find_or_create(Handle class_loader, TRAPS); static void purge(); @@ -71,6 +77,8 @@ static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim); static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim); static void classes_do(KlassClosure* klass_closure); + static void classes_do(void f(Klass* const)); + static void classes_unloading_do(void f(Klass* const)); static bool do_unloading(BoolObjectClosure* is_alive); // CMS support. @@ -86,6 +94,12 @@ static bool contains(address x); static bool contains_loader_data(ClassLoaderData* loader_data); #endif + +#if INCLUDE_TRACE + private: + static TracingTime _class_unload_time; + static void class_unload_event(Klass* const k); +#endif }; // ClassLoaderData class @@ -100,6 +114,9 @@ Thread* THREAD); public: Dependencies() : _list_head(NULL) {} + Dependencies(TRAPS) : _list_head(NULL) { + init(CHECK); + } void add(Handle dependency, TRAPS); void init(TRAPS); void oops_do(OopClosure* f); @@ -150,7 +167,7 @@ void set_next(ClassLoaderData* next) { _next = next; } ClassLoaderData* next() const { return _next; } - ClassLoaderData(Handle h_class_loader, bool is_anonymous); + ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies); ~ClassLoaderData(); void set_metaspace(Metaspace* m) { _metaspace = m; } @@ -168,7 +185,7 @@ void unload(); bool keep_alive() const { return _keep_alive; } bool is_alive(BoolObjectClosure* is_alive_closure) const; - + void classes_do(void f(Klass*)); void classes_do(void f(InstanceKlass*)); // Deallocate free list during class unloading. @@ -190,7 +207,9 @@ static void init_null_class_loader_data() { assert(_the_null_class_loader_data == NULL, "cannot initialize twice"); assert(ClassLoaderDataGraph::_head == NULL, "cannot initialize twice"); - _the_null_class_loader_data = new ClassLoaderData((oop)NULL, false); + + // We explicitly initialize the Dependencies object at a later phase in the initialization + _the_null_class_loader_data = new ClassLoaderData((oop)NULL, false, Dependencies()); ClassLoaderDataGraph::_head = _the_null_class_loader_data; assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be"); if (DumpSharedSpaces) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/classLoaderData.inline.hpp --- a/src/share/vm/classfile/classLoaderData.inline.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/classLoaderData.inline.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -43,10 +43,9 @@ assert(loader() != NULL,"Must be a class loader"); // Gets the class loader data out of the java/lang/ClassLoader object, if non-null // it's already in the loader_data, so no need to add - ClassLoaderData** loader_data_addr = java_lang_ClassLoader::loader_data_addr(loader()); - ClassLoaderData* loader_data_id = *loader_data_addr; - if (loader_data_id) { - return loader_data_id; + ClassLoaderData* loader_data= java_lang_ClassLoader::loader_data(loader()); + if (loader_data) { + return loader_data; } - return ClassLoaderDataGraph::add(loader_data_addr, loader, THREAD); + return ClassLoaderDataGraph::add(loader, false, THREAD); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/defaultMethods.cpp --- a/src/share/vm/classfile/defaultMethods.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/defaultMethods.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1349,6 +1349,7 @@ // Replace klass methods with new merged lists klass->set_methods(merged_methods); + klass->set_initial_method_idnum(new_size); ClassLoaderData* cld = klass->class_loader_data(); MetadataFactory::free_array(cld, original_methods); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/dictionary.cpp --- a/src/share/vm/classfile/dictionary.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/dictionary.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -27,7 +27,6 @@ #include "classfile/systemDictionary.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" -#include "services/classLoadingService.hpp" #include "utilities/hashtable.inline.hpp" @@ -156,19 +155,7 @@ if (k_def_class_loader_data == loader_data) { // This is the defining entry, so the referred class is about // to be unloaded. - // Notify the debugger and clean up the class. class_was_unloaded = true; - // notify the debugger - if (JvmtiExport::should_post_class_unload()) { - JvmtiExport::post_class_unload(ik); - } - - // notify ClassLoadingService of class unload - ClassLoadingService::notify_class_unloaded(ik); - - // Clean up C heap - ik->release_C_heap_structures(); - ik->constants()->release_C_heap_structures(); } // Also remove this system dictionary entry. purge_entry = true; @@ -266,22 +253,6 @@ } } - -// All classes, and their class loaders -// (added for helpers that use HandleMarks and ResourceMarks) -// Don't iterate over placeholders -void Dictionary::classes_do(void f(Klass*, ClassLoaderData*, TRAPS), TRAPS) { - for (int index = 0; index < table_size(); index++) { - for (DictionaryEntry* probe = bucket(index); - probe != NULL; - probe = probe->next()) { - Klass* k = probe->klass(); - f(k, probe->loader_data(), CHECK); - } - } -} - - // All classes, and their class loaders // Don't iterate over placeholders void Dictionary::classes_do(void f(Klass*, ClassLoaderData*)) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/dictionary.hpp --- a/src/share/vm/classfile/dictionary.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/dictionary.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -90,7 +90,6 @@ void classes_do(void f(Klass*)); void classes_do(void f(Klass*, TRAPS), TRAPS); void classes_do(void f(Klass*, ClassLoaderData*)); - void classes_do(void f(Klass*, ClassLoaderData*, TRAPS), TRAPS); void methods_do(void f(Method*)); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/genericSignatures.cpp --- a/src/share/vm/classfile/genericSignatures.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/genericSignatures.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -268,8 +268,15 @@ Klass* outer = SystemDictionary::find( outer_name, class_loader, protection_domain, CHECK_NULL); if (outer == NULL && !THREAD->is_Compiler_thread()) { - outer = SystemDictionary::resolve_super_or_fail(original_name, - outer_name, class_loader, protection_domain, false, CHECK_NULL); + if (outer_name == ik->super()->name()) { + outer = SystemDictionary::resolve_super_or_fail(original_name, outer_name, + class_loader, protection_domain, + false, CHECK_NULL); + } + else { + outer = SystemDictionary::resolve_or_fail(outer_name, class_loader, + protection_domain, false, CHECK_NULL); + } } InstanceKlass* outer_ik; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/javaClasses.cpp --- a/src/share/vm/classfile/javaClasses.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/javaClasses.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -315,14 +315,18 @@ return string; } -jchar* java_lang_String::as_unicode_string(oop java_string, int& length) { +jchar* java_lang_String::as_unicode_string(oop java_string, int& length, TRAPS) { typeArrayOop value = java_lang_String::value(java_string); int offset = java_lang_String::offset(java_string); length = java_lang_String::length(java_string); - jchar* result = NEW_RESOURCE_ARRAY(jchar, length); - for (int index = 0; index < length; index++) { - result[index] = value->char_at(index + offset); + jchar* result = NEW_RESOURCE_ARRAY_RETURN_NULL(jchar, length); + if (result != NULL) { + for (int index = 0; index < length; index++) { + result[index] = value->char_at(index + offset); + } + } else { + THROW_MSG_0(vmSymbols::java_lang_OutOfMemoryError(), "could not allocate Unicode string"); } return result; } @@ -508,22 +512,22 @@ // If the offset was read from the shared archive, it was fixed up already if (!k->is_shared()) { - if (k->oop_is_instance()) { - // During bootstrap, java.lang.Class wasn't loaded so static field - // offsets were computed without the size added it. Go back and - // update all the static field offsets to included the size. - for (JavaFieldStream fs(InstanceKlass::cast(k())); !fs.done(); fs.next()) { - if (fs.access_flags().is_static()) { - int real_offset = fs.offset() + InstanceMirrorKlass::offset_of_static_fields(); - fs.set_offset(real_offset); + if (k->oop_is_instance()) { + // During bootstrap, java.lang.Class wasn't loaded so static field + // offsets were computed without the size added it. Go back and + // update all the static field offsets to included the size. + for (JavaFieldStream fs(InstanceKlass::cast(k())); !fs.done(); fs.next()) { + if (fs.access_flags().is_static()) { + int real_offset = fs.offset() + InstanceMirrorKlass::offset_of_static_fields(); + fs.set_offset(real_offset); + } } } } - } - create_mirror(k, CHECK); + create_mirror(k, Handle(NULL), CHECK); } -oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) { +oop java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAPS) { assert(k->java_mirror() == NULL, "should only assign mirror once"); // Use this moment of initialization to cache modifier_flags also, // to support Class.getModifiers(). Instance classes recalculate @@ -559,6 +563,16 @@ set_array_klass(comp_mirror(), k()); } else { assert(k->oop_is_instance(), "Must be"); + + // Allocate a simple java object for a lock. + // This needs to be a java object because during class initialization + // it can be held across a java call. + typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_NULL); + set_init_lock(mirror(), r); + + // Set protection domain also + set_protection_domain(mirror(), protection_domain()); + // Initialize static fields InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL); } @@ -593,6 +607,34 @@ java_class->int_field_put(_static_oop_field_count_offset, size); } +oop java_lang_Class::protection_domain(oop java_class) { + assert(_protection_domain_offset != 0, "must be set"); + return java_class->obj_field(_protection_domain_offset); +} +void java_lang_Class::set_protection_domain(oop java_class, oop pd) { + assert(_protection_domain_offset != 0, "must be set"); + java_class->obj_field_put(_protection_domain_offset, pd); +} + +oop java_lang_Class::init_lock(oop java_class) { + assert(_init_lock_offset != 0, "must be set"); + return java_class->obj_field(_init_lock_offset); +} +void java_lang_Class::set_init_lock(oop java_class, oop init_lock) { + assert(_init_lock_offset != 0, "must be set"); + java_class->obj_field_put(_init_lock_offset, init_lock); +} + +objArrayOop java_lang_Class::signers(oop java_class) { + assert(_signers_offset != 0, "must be set"); + return (objArrayOop)java_class->obj_field(_signers_offset); +} +void java_lang_Class::set_signers(oop java_class, objArrayOop signers) { + assert(_signers_offset != 0, "must be set"); + java_class->obj_field_put(_signers_offset, (oop)signers); +} + + oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) { // This should be improved by adding a field at the Java level or by // introducing a new VM klass (see comment in ClassFileParser) @@ -925,7 +967,7 @@ // Read thread status value from threadStatus field in java.lang.Thread java class. java_lang_Thread::ThreadStatus java_lang_Thread::get_thread_status(oop java_thread) { - assert(Thread::current()->is_VM_thread() || + assert(Thread::current()->is_Watcher_thread() || Thread::current()->is_VM_thread() || JavaThread::current()->thread_state() == _thread_in_vm, "Java Thread is not running in vm"); // The threadStatus is only present starting in 1.5 @@ -2631,6 +2673,15 @@ return (Metadata*)mname->address_field(_vmtarget_offset); } +#if INCLUDE_JVMTI +// Can be executed on VM thread only +void java_lang_invoke_MemberName::adjust_vmtarget(oop mname, Metadata* ref) { + assert((is_instance(mname) && (flags(mname) & (MN_IS_METHOD | MN_IS_CONSTRUCTOR)) > 0), "wrong type"); + assert(Thread::current()->is_VM_thread(), "not VM thread"); + mname->address_field_put(_vmtarget_offset, (address)ref); +} +#endif // INCLUDE_JVMTI + void java_lang_invoke_MemberName::set_vmtarget(oop mname, Metadata* ref) { assert(is_instance(mname), "wrong type"); // check the type of the vmtarget @@ -2927,6 +2978,9 @@ int java_lang_Class::_array_klass_offset; int java_lang_Class::_oop_size_offset; int java_lang_Class::_static_oop_field_count_offset; +int java_lang_Class::_protection_domain_offset; +int java_lang_Class::_init_lock_offset; +int java_lang_Class::_signers_offset; #ifdef GRAAL int java_lang_Class::_graal_mirror_offset; #endif diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/javaClasses.hpp --- a/src/share/vm/classfile/javaClasses.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/javaClasses.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -153,7 +153,7 @@ static char* as_utf8_string(oop java_string, char* buf, int buflen); static char* as_utf8_string(oop java_string, int start, int len); static char* as_platform_dependent_str(Handle java_string, TRAPS); - static jchar* as_unicode_string(oop java_string, int& length); + static jchar* as_unicode_string(oop java_string, int& length, TRAPS); // produce an ascii string with all other values quoted using \u#### static char* as_quoted_ascii(oop java_string); @@ -209,7 +209,10 @@ GRAAL_ONLY(macro(java_lang_Class, graal_mirror, object_signature, false))\ macro(java_lang_Class, array_klass, intptr_signature, false) \ macro(java_lang_Class, oop_size, int_signature, false) \ - macro(java_lang_Class, static_oop_field_count, int_signature, false) + macro(java_lang_Class, static_oop_field_count, int_signature, false) \ + macro(java_lang_Class, protection_domain, object_signature, false) \ + macro(java_lang_Class, init_lock, object_signature, false) \ + macro(java_lang_Class, signers, object_signature, false) class java_lang_Class : AllStatic { friend class VMStructs; @@ -226,15 +229,20 @@ static int _graal_mirror_offset; #endif + static int _protection_domain_offset; + static int _init_lock_offset; + static int _signers_offset; + static bool offsets_computed; static int classRedefinedCount_offset; static GrowableArray* _fixup_mirror_list; + static void set_init_lock(oop java_class, oop init_lock); public: static void compute_offsets(); // Instance creation - static oop create_mirror(KlassHandle k, TRAPS); + static oop create_mirror(KlassHandle k, Handle protection_domain, TRAPS); static void fixup_mirror(KlassHandle k, TRAPS); static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS); // Conversion @@ -266,6 +274,13 @@ static int classRedefinedCount(oop the_class_mirror); static void set_classRedefinedCount(oop the_class_mirror, int value); + // Support for embedded per-class oops + static oop protection_domain(oop java_class); + static void set_protection_domain(oop java_class, oop protection_domain); + static oop init_lock(oop java_class); + static objArrayOop signers(oop java_class); + static void set_signers(oop java_class, objArrayOop signers); + static int oop_size(oop java_class); static void set_oop_size(oop java_class, int size); static int static_oop_field_count(oop java_class); @@ -1046,6 +1061,9 @@ static Metadata* vmtarget(oop mname); static void set_vmtarget(oop mname, Metadata* target); +#if INCLUDE_JVMTI + static void adjust_vmtarget(oop mname, Metadata* target); +#endif // INCLUDE_JVMTI static intptr_t vmindex(oop mname); static void set_vmindex(oop mname, intptr_t index); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/symbolTable.cpp --- a/src/share/vm/classfile/symbolTable.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/symbolTable.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -35,7 +35,6 @@ #include "oops/oop.inline2.hpp" #include "runtime/mutexLocker.hpp" #include "utilities/hashtable.inline.hpp" -#include "utilities/numberSeq.hpp" // -------------------------------------------------------------------------- @@ -451,21 +450,7 @@ } void SymbolTable::dump(outputStream* st) { - NumberSeq summary; - for (int i = 0; i < the_table()->table_size(); ++i) { - int count = 0; - for (HashtableEntry* e = the_table()->bucket(i); - e != NULL; e = e->next()) { - count++; - } - summary.add((double)count); - } - st->print_cr("SymbolTable statistics:"); - st->print_cr("Number of buckets : %7d", summary.num()); - st->print_cr("Average bucket size : %7.0f", summary.avg()); - st->print_cr("Variance of bucket size : %7.0f", summary.variance()); - st->print_cr("Std. dev. of bucket size: %7.0f", summary.sd()); - st->print_cr("Maximum bucket size : %7.0f", summary.maximum()); + the_table()->dump_table(st, "SymbolTable"); } @@ -735,7 +720,7 @@ ResourceMark rm(THREAD); int length; Handle h_string (THREAD, string); - jchar* chars = java_lang_String::as_unicode_string(string, length); + jchar* chars = java_lang_String::as_unicode_string(string, length, CHECK_NULL); oop result = intern(h_string, chars, length, CHECK_NULL); return result; } @@ -752,7 +737,7 @@ return result; } -void StringTable::unlink(BoolObjectClosure* is_alive) { +void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { // Readers of the table are unlocked, so we should only be removing // entries at a safepoint. assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); @@ -760,41 +745,31 @@ HashtableEntry** p = the_table()->bucket_addr(i); HashtableEntry* entry = the_table()->bucket(i); while (entry != NULL) { - // Shared entries are normally at the end of the bucket and if we run into - // a shared entry, then there is nothing more to remove. However, if we - // have rehashed the table, then the shared entries are no longer at the - // end of the bucket. - if (entry->is_shared() && !use_alternate_hashcode()) { - break; - } - assert(entry->literal() != NULL, "just checking"); - if (entry->is_shared() || is_alive->do_object_b(entry->literal())) { + assert(!entry->is_shared(), "CDS not used for the StringTable"); + + if (is_alive->do_object_b(entry->literal())) { + if (f != NULL) { + f->do_oop((oop*)entry->literal_addr()); + } p = entry->next_addr(); } else { *p = entry->next(); the_table()->free_entry(entry); } - entry = (HashtableEntry*)HashtableEntry::make_ptr(*p); + entry = *p; } } } void StringTable::oops_do(OopClosure* f) { for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry** p = the_table()->bucket_addr(i); HashtableEntry* entry = the_table()->bucket(i); while (entry != NULL) { + assert(!entry->is_shared(), "CDS not used for the StringTable"); + f->do_oop((oop*)entry->literal_addr()); - // Did the closure remove the literal from the table? - if (entry->literal() == NULL) { - assert(!entry->is_shared(), "immutable hashtable entry?"); - *p = entry->next(); - the_table()->free_entry(entry); - } else { - p = entry->next_addr(); - } - entry = (HashtableEntry*)HashtableEntry::make_ptr(*p); + entry = entry->next(); } } } @@ -814,21 +789,7 @@ } void StringTable::dump(outputStream* st) { - NumberSeq summary; - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry* p = the_table()->bucket(i); - int count = 0; - for ( ; p != NULL; p = p->next()) { - count++; - } - summary.add((double)count); - } - st->print_cr("StringTable statistics:"); - st->print_cr("Number of buckets : %7d", summary.num()); - st->print_cr("Average bucket size : %7.0f", summary.avg()); - st->print_cr("Variance of bucket size : %7.0f", summary.variance()); - st->print_cr("Std. dev. of bucket size: %7.0f", summary.sd()); - st->print_cr("Maximum bucket size : %7.0f", summary.maximum()); + the_table()->dump_table(st, "StringTable"); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/symbolTable.hpp --- a/src/share/vm/classfile/symbolTable.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/symbolTable.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -272,7 +272,10 @@ // GC support // Delete pointers to otherwise-unreachable objects. - static void unlink(BoolObjectClosure* cl); + static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f); + static void unlink(BoolObjectClosure* cl) { + unlink_or_oops_do(cl, NULL); + } // Invoke "f->do_oop" on the locations of all oops in the table. static void oops_do(OopClosure* f); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/systemDictionary.cpp --- a/src/share/vm/classfile/systemDictionary.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/systemDictionary.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -56,6 +56,11 @@ #include "services/classLoadingService.hpp" #include "services/threadService.hpp" +#if INCLUDE_TRACE + #include "trace/tracing.hpp" + #include "trace/traceMacros.hpp" +#endif + Dictionary* SystemDictionary::_dictionary = NULL; PlaceholderTable* SystemDictionary::_placeholders = NULL; @@ -586,10 +591,15 @@ } -Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle class_loader, Handle protection_domain, TRAPS) { +Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, + Handle class_loader, + Handle protection_domain, + TRAPS) { assert(name != NULL && !FieldType::is_array(name) && !FieldType::is_obj(name), "invalid class name"); + TracingTime class_load_start_time = Tracing::time(); + // UseNewReflection // Fix for 4474172; see evaluation for more details class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader())); @@ -804,8 +814,9 @@ // during compilations. MutexLocker mu(Compile_lock, THREAD); update_dictionary(d_index, d_hash, p_index, p_hash, - k, class_loader, THREAD); + k, class_loader, THREAD); } + if (JvmtiExport::should_post_class_load()) { Thread *thread = THREAD; assert(thread->is_Java_thread(), "thread->is_Java_thread()"); @@ -830,7 +841,7 @@ Klass *kk; { MutexLocker mu(SystemDictionary_lock, THREAD); - kk = find_class(name, ik->class_loader_data()); + kk = find_class(d_index, d_hash, name, ik->class_loader_data()); } if (kk != NULL) { // No clean up is needed if the shared class has been entered @@ -861,8 +872,8 @@ // This brackets the SystemDictionary updates for both defining // and initiating loaders MutexLocker mu(SystemDictionary_lock, THREAD); - placeholders()->find_and_remove(p_index, p_hash, name, loader_data, PlaceholderTable::LOAD_INSTANCE, THREAD); - SystemDictionary_lock->notify_all(); + placeholders()->find_and_remove(p_index, p_hash, name, loader_data, PlaceholderTable::LOAD_INSTANCE, THREAD); + SystemDictionary_lock->notify_all(); } } @@ -870,6 +881,8 @@ return NULL; } + post_class_load_event(class_load_start_time, k, class_loader); + #ifdef ASSERT { ClassLoaderData* loader_data = k->class_loader_data(); @@ -993,6 +1006,8 @@ TRAPS) { TempNewSymbol parsed_name = NULL; + TracingTime class_load_start_time = Tracing::time(); + ClassLoaderData* loader_data; if (host_klass.not_null()) { // Create a new CLD for anonymous class, that uses the same class loader @@ -1048,6 +1063,8 @@ assert(THREAD->is_Java_thread(), "thread->is_Java_thread()"); JvmtiExport::post_class_load((JavaThread *) THREAD, k()); } + + post_class_load_event(class_load_start_time, k, class_loader); } assert(host_klass.not_null() || cp_patches == NULL, "cp_patches only found with host_klass"); @@ -1435,6 +1452,7 @@ JvmtiExport::post_class_load((JavaThread *) THREAD, k()); } + } // Support parallel classloading @@ -1678,6 +1696,7 @@ } return newsize; } + // Assumes classes in the SystemDictionary are only unloaded at a safepoint // Note: anonymous classes are not in the SD. bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) { @@ -1747,13 +1766,6 @@ dictionary()->classes_do(f); } -// All classes, and their class loaders -// (added for helpers that use HandleMarks and ResourceMarks) -// Don't iterate over placeholders -void SystemDictionary::classes_do(void f(Klass*, ClassLoaderData*, TRAPS), TRAPS) { - dictionary()->classes_do(f, CHECK); -} - void SystemDictionary::placeholders_do(void f(Symbol*)) { placeholders()->entries_do(f); } @@ -2031,12 +2043,6 @@ } } - // Assign a classid if one has not already been assigned. The - // counter does not need to be atomically incremented since this - // is only done while holding the SystemDictionary_lock. - // All loaded classes get a unique ID. - TRACE_INIT_ID(k); - // Make a new system dictionary entry. Klass* sd_check = find_class(d_index, d_hash, name, loader_data); if (sd_check == NULL) { @@ -2619,6 +2625,27 @@ "Loaded klasses should be in SystemDictionary"); } +// utility function for class load event +void SystemDictionary::post_class_load_event(TracingTime start_time, + instanceKlassHandle k, + Handle initiating_loader) { +#if INCLUDE_TRACE + EventClassLoad event(UNTIMED); + if (event.should_commit()) { + event.set_endtime(Tracing::time()); + event.set_starttime(start_time); + event.set_loadedClass(k()); + oop defining_class_loader = k->class_loader(); + event.set_definingClassLoader(defining_class_loader != NULL ? + defining_class_loader->klass() : (Klass*)NULL); + oop class_loader = initiating_loader.is_null() ? (oop)NULL : initiating_loader(); + event.set_initiatingClassLoader(class_loader != NULL ? + class_loader->klass() : (Klass*)NULL); + event.commit(); + } +#endif /* INCLUDE_TRACE */ +} + #ifndef PRODUCT // statistics code diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/systemDictionary.hpp --- a/src/share/vm/classfile/systemDictionary.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/systemDictionary.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -31,9 +31,11 @@ #include "oops/symbol.hpp" #include "runtime/java.hpp" #include "runtime/reflectionUtils.hpp" +#include "trace/traceTime.hpp" #include "utilities/hashtable.hpp" #include "utilities/hashtable.inline.hpp" + // The system dictionary stores all loaded classes and maps: // // [class name,class loader] -> class i.e. [Symbol*,oop] -> Klass* @@ -363,10 +365,7 @@ static void classes_do(void f(Klass*, TRAPS), TRAPS); // All classes, and their class loaders static void classes_do(void f(Klass*, ClassLoaderData*)); - // All classes, and their class loaders - // (added for helpers that use HandleMarks and ResourceMarks) - static void classes_do(void f(Klass*, ClassLoaderData*, TRAPS), TRAPS); - // All entries in the placeholder table and their class loaders + static void placeholders_do(void f(Symbol*)); // Iterate over all methods in all klasses in dictionary @@ -689,6 +688,9 @@ // Setup link to hierarchy static void add_to_hierarchy(instanceKlassHandle k, TRAPS); + // event based tracing + static void post_class_load_event(TracingTime start_time, instanceKlassHandle k, + Handle initiating_loader); // We pass in the hashtable index so we can calculate it outside of // the SystemDictionary_lock. diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/verifier.cpp --- a/src/share/vm/classfile/verifier.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/verifier.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -362,7 +362,7 @@ } #endif -void ErrorContext::details(outputStream* ss, Method* method) const { +void ErrorContext::details(outputStream* ss, const Method* method) const { if (is_valid()) { ss->print_cr(""); ss->print_cr("Exception Details:"); @@ -435,7 +435,7 @@ ss->print_cr(""); } -void ErrorContext::location_details(outputStream* ss, Method* method) const { +void ErrorContext::location_details(outputStream* ss, const Method* method) const { if (_bci != -1 && method != NULL) { streamIndentor si(ss); const char* bytecode_name = ""; @@ -470,7 +470,7 @@ } } -void ErrorContext::bytecode_details(outputStream* ss, Method* method) const { +void ErrorContext::bytecode_details(outputStream* ss, const Method* method) const { if (method != NULL) { streamIndentor si(ss); ss->indent().print_cr("Bytecode:"); @@ -479,7 +479,7 @@ } } -void ErrorContext::handler_details(outputStream* ss, Method* method) const { +void ErrorContext::handler_details(outputStream* ss, const Method* method) const { if (method != NULL) { streamIndentor si(ss); ExceptionTable table(method); @@ -494,7 +494,7 @@ } } -void ErrorContext::stackmap_details(outputStream* ss, Method* method) const { +void ErrorContext::stackmap_details(outputStream* ss, const Method* method) const { if (method != NULL && method->has_stackmap_table()) { streamIndentor si(ss); ss->indent().print_cr("Stackmap Table:"); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/verifier.hpp --- a/src/share/vm/classfile/verifier.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/verifier.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,8 +36,10 @@ class Verifier : AllStatic { public: enum { + STRICTER_ACCESS_CTRL_CHECK_VERSION = 49, STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50, - INVOKEDYNAMIC_MAJOR_VERSION = 51 + INVOKEDYNAMIC_MAJOR_VERSION = 51, + NO_RELAX_ACCESS_CTRL_CHECK_VERSION = 52 }; typedef enum { ThrowException, NoException } Mode; @@ -224,7 +226,7 @@ _expected.reset_frame(); } - void details(outputStream* ss, Method* method) const; + void details(outputStream* ss, const Method* method) const; #ifdef ASSERT void print_on(outputStream* str) const { @@ -237,12 +239,12 @@ #endif private: - void location_details(outputStream* ss, Method* method) const; + void location_details(outputStream* ss, const Method* method) const; void reason_details(outputStream* ss) const; void frame_details(outputStream* ss) const; - void bytecode_details(outputStream* ss, Method* method) const; - void handler_details(outputStream* ss, Method* method) const; - void stackmap_details(outputStream* ss, Method* method) const; + void bytecode_details(outputStream* ss, const Method* method) const; + void handler_details(outputStream* ss, const Method* method) const; + void stackmap_details(outputStream* ss, const Method* method) const; }; // A new instance of this class is created for each class being verified diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/classfile/vmSymbols.hpp --- a/src/share/vm/classfile/vmSymbols.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/classfile/vmSymbols.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -68,7 +68,7 @@ template(java_lang_Float, "java/lang/Float") \ template(java_lang_Double, "java/lang/Double") \ template(java_lang_Byte, "java/lang/Byte") \ - template(java_lang_Byte_Cache, "java/lang/Byte$ByteCache") \ + template(java_lang_Byte_ByteCache, "java/lang/Byte$ByteCache") \ template(java_lang_Short, "java/lang/Short") \ template(java_lang_Short_ShortCache, "java/lang/Short$ShortCache") \ template(java_lang_Integer, "java/lang/Integer") \ @@ -498,6 +498,9 @@ template(array_klass_name, "array_klass") \ template(oop_size_name, "oop_size") \ template(static_oop_field_count_name, "static_oop_field_count") \ + template(protection_domain_name, "protection_domain") \ + template(init_lock_name, "init_lock") \ + template(signers_name, "signers_name") \ GRAAL_ONLY(template(graal_mirror_name, "graal_mirror")) \ template(loader_data_name, "loader_data") \ template(dependencies_name, "dependencies") \ @@ -625,13 +628,18 @@ template(sun_management_ManagementFactory, "sun/management/ManagementFactory") \ template(sun_management_Sensor, "sun/management/Sensor") \ template(sun_management_Agent, "sun/management/Agent") \ + template(sun_management_DiagnosticCommandImpl, "sun/management/DiagnosticCommandImpl") \ template(sun_management_GarbageCollectorImpl, "sun/management/GarbageCollectorImpl") \ + template(sun_management_ManagementFactoryHelper, "sun/management/ManagementFactoryHelper") \ + template(getDiagnosticCommandMBean_name, "getDiagnosticCommandMBean") \ + template(getDiagnosticCommandMBean_signature, "()Lcom/sun/management/DiagnosticCommandMBean;") \ template(getGcInfoBuilder_name, "getGcInfoBuilder") \ template(getGcInfoBuilder_signature, "()Lsun/management/GcInfoBuilder;") \ template(com_sun_management_GcInfo, "com/sun/management/GcInfo") \ template(com_sun_management_GcInfo_constructor_signature, "(Lsun/management/GcInfoBuilder;JJJ[Ljava/lang/management/MemoryUsage;[Ljava/lang/management/MemoryUsage;[Ljava/lang/Object;)V") \ template(createGCNotification_name, "createGCNotification") \ template(createGCNotification_signature, "(JLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Lcom/sun/management/GcInfo;)V") \ + template(createDiagnosticFrameworkNotification_name, "createDiagnosticFrameworkNotification") \ template(createMemoryPoolMBean_name, "createMemoryPoolMBean") \ template(createMemoryManagerMBean_name, "createMemoryManagerMBean") \ template(createGarbageCollectorMBean_name, "createGarbageCollectorMBean") \ diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/code/codeBlob.cpp --- a/src/share/vm/code/codeBlob.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/code/codeBlob.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,581 +1,581 @@ -/* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "code/codeBlob.hpp" -#include "code/codeCache.hpp" -#include "code/relocInfo.hpp" -#include "compiler/disassembler.hpp" -#include "interpreter/bytecode.hpp" -#include "memory/allocation.inline.hpp" -#include "memory/heap.hpp" -#include "oops/oop.inline.hpp" -#include "prims/forte.hpp" -#include "runtime/handles.inline.hpp" -#include "runtime/interfaceSupport.hpp" -#include "runtime/mutexLocker.hpp" -#include "runtime/safepoint.hpp" -#include "runtime/sharedRuntime.hpp" -#include "runtime/vframe.hpp" -#include "services/memoryService.hpp" -#ifdef TARGET_ARCH_x86 -# include "nativeInst_x86.hpp" -#endif -#ifdef TARGET_ARCH_sparc -# include "nativeInst_sparc.hpp" -#endif -#ifdef TARGET_ARCH_zero -# include "nativeInst_zero.hpp" -#endif -#ifdef TARGET_ARCH_arm -# include "nativeInst_arm.hpp" -#endif -#ifdef TARGET_ARCH_ppc -# include "nativeInst_ppc.hpp" -#endif -#ifdef COMPILER1 -#include "c1/c1_Runtime1.hpp" -#endif - -unsigned int align_code_offset(int offset) { - // align the size to CodeEntryAlignment - return - ((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1)) - - (int)CodeHeap::header_size(); -} - - -// This must be consistent with the CodeBlob constructor's layout actions. -unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { - unsigned int size = header_size; - size += round_to(cb->total_relocation_size(), oopSize); - // align the size to CodeEntryAlignment - size = align_code_offset(size); - size += round_to(cb->total_content_size(), oopSize); - size += round_to(cb->total_oop_size(), oopSize); - size += round_to(cb->total_metadata_size(), oopSize); - return size; -} - - -// Creates a simple CodeBlob. Sets up the size of the different regions. -CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) { - assert(size == round_to(size, oopSize), "unaligned size"); - assert(locs_size == round_to(locs_size, oopSize), "unaligned size"); - assert(header_size == round_to(header_size, oopSize), "unaligned size"); - assert(!UseRelocIndex, "no space allocated for reloc index yet"); - - // Note: If UseRelocIndex is enabled, there needs to be (at least) one - // extra word for the relocation information, containing the reloc - // index table length. Unfortunately, the reloc index table imple- - // mentation is not easily understandable and thus it is not clear - // what exactly the format is supposed to be. For now, we just turn - // off the use of this table (gri 7/6/2000). - - _name = name; - _size = size; - _frame_complete_offset = frame_complete; - _header_size = header_size; - _relocation_size = locs_size; - _content_offset = align_code_offset(header_size + _relocation_size); - _code_offset = _content_offset; - _data_offset = size; - _frame_size = 0; - set_oop_maps(NULL); -} - - -// Creates a CodeBlob from a CodeBuffer. Sets up the size of the different regions, -// and copy code and relocation info. -CodeBlob::CodeBlob( - const char* name, - CodeBuffer* cb, - int header_size, - int size, - int frame_complete, - int frame_size, - OopMapSet* oop_maps -) { - assert(size == round_to(size, oopSize), "unaligned size"); - assert(header_size == round_to(header_size, oopSize), "unaligned size"); - - _name = name; - _size = size; - _frame_complete_offset = frame_complete; - _header_size = header_size; - _relocation_size = round_to(cb->total_relocation_size(), oopSize); - _content_offset = align_code_offset(header_size + _relocation_size); - _code_offset = _content_offset + cb->total_offset_of(cb->insts()); - _data_offset = _content_offset + round_to(cb->total_content_size(), oopSize); - assert(_data_offset <= size, "codeBlob is too small"); - - cb->copy_code_and_locs_to(this); - set_oop_maps(oop_maps); - _frame_size = frame_size; -#ifdef COMPILER1 - // probably wrong for tiered - assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); -#endif // COMPILER1 -} - - -void CodeBlob::set_oop_maps(OopMapSet* p) { - // Danger Will Robinson! This method allocates a big - // chunk of memory, its your job to free it. - if (p != NULL) { - // We need to allocate a chunk big enough to hold the OopMapSet and all of its OopMaps - _oop_maps = (OopMapSet* )NEW_C_HEAP_ARRAY(unsigned char, p->heap_size(), mtCode); - p->copy_to((address)_oop_maps); - } else { - _oop_maps = NULL; - } -} - - -void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) { - // Do not hold the CodeCache lock during name formatting. - assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); - - if (stub != NULL) { - char stub_id[256]; - assert(strlen(name1) + strlen(name2) < sizeof(stub_id), ""); - jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2); - if (PrintStubCode) { - ttyLocker ttyl; - tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub); - Disassembler::decode(stub->code_begin(), stub->code_end()); - tty->cr(); - } - Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); - - if (JvmtiExport::should_post_dynamic_code_generated()) { - const char* stub_name = name2; - if (name2[0] == '\0') stub_name = name1; - JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); - } - } - - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); -} - - -void CodeBlob::flush() { - if (_oop_maps) { - FREE_C_HEAP_ARRAY(unsigned char, _oop_maps, mtCode); - _oop_maps = NULL; - } +/* + * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "code/codeBlob.hpp" +#include "code/codeCache.hpp" +#include "code/relocInfo.hpp" +#include "compiler/disassembler.hpp" +#include "interpreter/bytecode.hpp" +#include "memory/allocation.inline.hpp" +#include "memory/heap.hpp" +#include "oops/oop.inline.hpp" +#include "prims/forte.hpp" +#include "runtime/handles.inline.hpp" +#include "runtime/interfaceSupport.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/vframe.hpp" +#include "services/memoryService.hpp" +#ifdef TARGET_ARCH_x86 +# include "nativeInst_x86.hpp" +#endif +#ifdef TARGET_ARCH_sparc +# include "nativeInst_sparc.hpp" +#endif +#ifdef TARGET_ARCH_zero +# include "nativeInst_zero.hpp" +#endif +#ifdef TARGET_ARCH_arm +# include "nativeInst_arm.hpp" +#endif +#ifdef TARGET_ARCH_ppc +# include "nativeInst_ppc.hpp" +#endif +#ifdef COMPILER1 +#include "c1/c1_Runtime1.hpp" +#endif + +unsigned int align_code_offset(int offset) { + // align the size to CodeEntryAlignment + return + ((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1)) + - (int)CodeHeap::header_size(); +} + + +// This must be consistent with the CodeBlob constructor's layout actions. +unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { + unsigned int size = header_size; + size += round_to(cb->total_relocation_size(), oopSize); + // align the size to CodeEntryAlignment + size = align_code_offset(size); + size += round_to(cb->total_content_size(), oopSize); + size += round_to(cb->total_oop_size(), oopSize); + size += round_to(cb->total_metadata_size(), oopSize); + return size; +} + + +// Creates a simple CodeBlob. Sets up the size of the different regions. +CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) { + assert(size == round_to(size, oopSize), "unaligned size"); + assert(locs_size == round_to(locs_size, oopSize), "unaligned size"); + assert(header_size == round_to(header_size, oopSize), "unaligned size"); + assert(!UseRelocIndex, "no space allocated for reloc index yet"); + + // Note: If UseRelocIndex is enabled, there needs to be (at least) one + // extra word for the relocation information, containing the reloc + // index table length. Unfortunately, the reloc index table imple- + // mentation is not easily understandable and thus it is not clear + // what exactly the format is supposed to be. For now, we just turn + // off the use of this table (gri 7/6/2000). + + _name = name; + _size = size; + _frame_complete_offset = frame_complete; + _header_size = header_size; + _relocation_size = locs_size; + _content_offset = align_code_offset(header_size + _relocation_size); + _code_offset = _content_offset; + _data_offset = size; + _frame_size = 0; + set_oop_maps(NULL); +} + + +// Creates a CodeBlob from a CodeBuffer. Sets up the size of the different regions, +// and copy code and relocation info. +CodeBlob::CodeBlob( + const char* name, + CodeBuffer* cb, + int header_size, + int size, + int frame_complete, + int frame_size, + OopMapSet* oop_maps +) { + assert(size == round_to(size, oopSize), "unaligned size"); + assert(header_size == round_to(header_size, oopSize), "unaligned size"); + + _name = name; + _size = size; + _frame_complete_offset = frame_complete; + _header_size = header_size; + _relocation_size = round_to(cb->total_relocation_size(), oopSize); + _content_offset = align_code_offset(header_size + _relocation_size); + _code_offset = _content_offset + cb->total_offset_of(cb->insts()); + _data_offset = _content_offset + round_to(cb->total_content_size(), oopSize); + assert(_data_offset <= size, "codeBlob is too small"); + + cb->copy_code_and_locs_to(this); + set_oop_maps(oop_maps); + _frame_size = frame_size; +#ifdef COMPILER1 + // probably wrong for tiered + assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); +#endif // COMPILER1 +} + + +void CodeBlob::set_oop_maps(OopMapSet* p) { + // Danger Will Robinson! This method allocates a big + // chunk of memory, its your job to free it. + if (p != NULL) { + // We need to allocate a chunk big enough to hold the OopMapSet and all of its OopMaps + _oop_maps = (OopMapSet* )NEW_C_HEAP_ARRAY(unsigned char, p->heap_size(), mtCode); + p->copy_to((address)_oop_maps); + } else { + _oop_maps = NULL; + } +} + + +void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) { + // Do not hold the CodeCache lock during name formatting. + assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); + + if (stub != NULL) { + char stub_id[256]; + assert(strlen(name1) + strlen(name2) < sizeof(stub_id), ""); + jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2); + if (PrintStubCode) { + ttyLocker ttyl; + tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub); + Disassembler::decode(stub->code_begin(), stub->code_end()); + tty->cr(); + } + Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); + + if (JvmtiExport::should_post_dynamic_code_generated()) { + const char* stub_name = name2; + if (name2[0] == '\0') stub_name = name1; + JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); + } + } + + // Track memory usage statistic after releasing CodeCache_lock + MemoryService::track_code_cache_memory_usage(); +} + + +void CodeBlob::flush() { + if (_oop_maps) { + FREE_C_HEAP_ARRAY(unsigned char, _oop_maps, mtCode); + _oop_maps = NULL; + } _strings.free(); -} - - -OopMap* CodeBlob::oop_map_for_return_address(address return_address) { - assert(oop_maps() != NULL, "nope"); - return oop_maps()->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); -} - - -//---------------------------------------------------------------------------------------------------- -// Implementation of BufferBlob - - -BufferBlob::BufferBlob(const char* name, int size) -: CodeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0) -{} - -BufferBlob* BufferBlob::create(const char* name, int buffer_size) { - ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock - - BufferBlob* blob = NULL; - unsigned int size = sizeof(BufferBlob); - // align the size to CodeEntryAlignment - size = align_code_offset(size); - size += round_to(buffer_size, oopSize); - assert(name != NULL, "must provide a name"); - { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - blob = new (size) BufferBlob(name, size); - } - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); - - return blob; -} - - -BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb) - : CodeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL) -{} - -BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { - ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock - - BufferBlob* blob = NULL; - unsigned int size = allocation_size(cb, sizeof(BufferBlob)); - assert(name != NULL, "must provide a name"); - { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - blob = new (size) BufferBlob(name, size, cb); - } - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); - - return blob; -} - - -void* BufferBlob::operator new(size_t s, unsigned size) { - void* p = CodeCache::allocate(size); - return p; -} - - -void BufferBlob::free( BufferBlob *blob ) { - ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock - { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - CodeCache::free((CodeBlob*)blob); - } - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); -} - - -//---------------------------------------------------------------------------------------------------- -// Implementation of AdapterBlob - -AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) : - BufferBlob("I2C/C2I adapters", size, cb) { - CodeCache::commit(this); -} - -AdapterBlob* AdapterBlob::create(CodeBuffer* cb) { - ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock - - AdapterBlob* blob = NULL; - unsigned int size = allocation_size(cb, sizeof(AdapterBlob)); - { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - blob = new (size) AdapterBlob(size, cb); - } - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); - - return blob; -} - - -//---------------------------------------------------------------------------------------------------- -// Implementation of MethodHandlesAdapterBlob - -MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) { - ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock - - MethodHandlesAdapterBlob* blob = NULL; - unsigned int size = sizeof(MethodHandlesAdapterBlob); - // align the size to CodeEntryAlignment - size = align_code_offset(size); - size += round_to(buffer_size, oopSize); - { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - blob = new (size) MethodHandlesAdapterBlob(size); - } - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); - - return blob; -} - - -//---------------------------------------------------------------------------------------------------- -// Implementation of RuntimeStub - -RuntimeStub::RuntimeStub( - const char* name, - CodeBuffer* cb, - int size, - int frame_complete, - int frame_size, - OopMapSet* oop_maps, - bool caller_must_gc_arguments -) -: CodeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps) -{ - _caller_must_gc_arguments = caller_must_gc_arguments; -} - - -RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, - CodeBuffer* cb, - int frame_complete, - int frame_size, - OopMapSet* oop_maps, - bool caller_must_gc_arguments) -{ - RuntimeStub* stub = NULL; - ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock - { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - unsigned int size = allocation_size(cb, sizeof(RuntimeStub)); - stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); - } - - trace_new_stub(stub, "RuntimeStub - ", stub_name); - - return stub; -} - - -void* RuntimeStub::operator new(size_t s, unsigned size) { - void* p = CodeCache::allocate(size); - if (!p) fatal("Initial size of CodeCache is too small"); - return p; -} - -// operator new shared by all singletons: -void* SingletonBlob::operator new(size_t s, unsigned size) { - void* p = CodeCache::allocate(size); - if (!p) fatal("Initial size of CodeCache is too small"); - return p; -} - - -//---------------------------------------------------------------------------------------------------- -// Implementation of DeoptimizationBlob - -DeoptimizationBlob::DeoptimizationBlob( - CodeBuffer* cb, - int size, - OopMapSet* oop_maps, - int unpack_offset, - int unpack_with_exception_offset, - int unpack_with_reexecution_offset, - int frame_size -) -: SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps) -{ - _unpack_offset = unpack_offset; - _unpack_with_exception = unpack_with_exception_offset; - _unpack_with_reexecution = unpack_with_reexecution_offset; -#ifdef COMPILER1 - _unpack_with_exception_in_tls = -1; -#endif -} - - -DeoptimizationBlob* DeoptimizationBlob::create( - CodeBuffer* cb, - OopMapSet* oop_maps, - int unpack_offset, - int unpack_with_exception_offset, - int unpack_with_reexecution_offset, - int frame_size) -{ - DeoptimizationBlob* blob = NULL; - ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock - { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - unsigned int size = allocation_size(cb, sizeof(DeoptimizationBlob)); - blob = new (size) DeoptimizationBlob(cb, - size, - oop_maps, - unpack_offset, - unpack_with_exception_offset, - unpack_with_reexecution_offset, - frame_size); - } - - trace_new_stub(blob, "DeoptimizationBlob"); - - return blob; -} - - -//---------------------------------------------------------------------------------------------------- -// Implementation of UncommonTrapBlob - -#ifdef COMPILER2 -UncommonTrapBlob::UncommonTrapBlob( - CodeBuffer* cb, - int size, - OopMapSet* oop_maps, - int frame_size -) -: SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps) -{} - - -UncommonTrapBlob* UncommonTrapBlob::create( - CodeBuffer* cb, - OopMapSet* oop_maps, - int frame_size) -{ - UncommonTrapBlob* blob = NULL; - ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock - { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - unsigned int size = allocation_size(cb, sizeof(UncommonTrapBlob)); - blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size); - } - - trace_new_stub(blob, "UncommonTrapBlob"); - - return blob; -} - - -#endif // COMPILER2 - - -//---------------------------------------------------------------------------------------------------- -// Implementation of ExceptionBlob - -#ifdef COMPILER2 -ExceptionBlob::ExceptionBlob( - CodeBuffer* cb, - int size, - OopMapSet* oop_maps, - int frame_size -) -: SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps) -{} - - -ExceptionBlob* ExceptionBlob::create( - CodeBuffer* cb, - OopMapSet* oop_maps, - int frame_size) -{ - ExceptionBlob* blob = NULL; - ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock - { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - unsigned int size = allocation_size(cb, sizeof(ExceptionBlob)); - blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size); - } - - trace_new_stub(blob, "ExceptionBlob"); - - return blob; -} - - -#endif // COMPILER2 - - -//---------------------------------------------------------------------------------------------------- -// Implementation of SafepointBlob - -SafepointBlob::SafepointBlob( - CodeBuffer* cb, - int size, - OopMapSet* oop_maps, - int frame_size -) -: SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps) -{} - - -SafepointBlob* SafepointBlob::create( - CodeBuffer* cb, - OopMapSet* oop_maps, - int frame_size) -{ - SafepointBlob* blob = NULL; - ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock - { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - unsigned int size = allocation_size(cb, sizeof(SafepointBlob)); - blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); - } - - trace_new_stub(blob, "SafepointBlob"); - - return blob; -} - - -//---------------------------------------------------------------------------------------------------- -// Verification and printing - -void CodeBlob::verify() { - ShouldNotReachHere(); -} - -void CodeBlob::print_on(outputStream* st) const { - st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", this); - st->print_cr("Framesize: %d", _frame_size); -} - -void CodeBlob::print_value_on(outputStream* st) const { - st->print_cr("[CodeBlob]"); -} - -void BufferBlob::verify() { - // unimplemented -} - -void BufferBlob::print_on(outputStream* st) const { - CodeBlob::print_on(st); - print_value_on(st); -} - -void BufferBlob::print_value_on(outputStream* st) const { - st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", this, name()); -} - -void RuntimeStub::verify() { - // unimplemented -} - -void RuntimeStub::print_on(outputStream* st) const { - ttyLocker ttyl; - CodeBlob::print_on(st); - st->print("Runtime Stub (" INTPTR_FORMAT "): ", this); - st->print_cr(name()); - Disassembler::decode((CodeBlob*)this, st); -} - -void RuntimeStub::print_value_on(outputStream* st) const { - st->print("RuntimeStub (" INTPTR_FORMAT "): ", this); st->print(name()); -} - -void SingletonBlob::verify() { - // unimplemented -} - -void SingletonBlob::print_on(outputStream* st) const { - ttyLocker ttyl; - CodeBlob::print_on(st); - st->print_cr(name()); - Disassembler::decode((CodeBlob*)this, st); -} - -void SingletonBlob::print_value_on(outputStream* st) const { - st->print_cr(name()); -} - -void DeoptimizationBlob::print_value_on(outputStream* st) const { - st->print_cr("Deoptimization (frame not available)"); -} +} + + +OopMap* CodeBlob::oop_map_for_return_address(address return_address) { + assert(oop_maps() != NULL, "nope"); + return oop_maps()->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); +} + + +//---------------------------------------------------------------------------------------------------- +// Implementation of BufferBlob + + +BufferBlob::BufferBlob(const char* name, int size) +: CodeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0) +{} + +BufferBlob* BufferBlob::create(const char* name, int buffer_size) { + ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + + BufferBlob* blob = NULL; + unsigned int size = sizeof(BufferBlob); + // align the size to CodeEntryAlignment + size = align_code_offset(size); + size += round_to(buffer_size, oopSize); + assert(name != NULL, "must provide a name"); + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + blob = new (size) BufferBlob(name, size); + } + // Track memory usage statistic after releasing CodeCache_lock + MemoryService::track_code_cache_memory_usage(); + + return blob; +} + + +BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb) + : CodeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL) +{} + +BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { + ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + + BufferBlob* blob = NULL; + unsigned int size = allocation_size(cb, sizeof(BufferBlob)); + assert(name != NULL, "must provide a name"); + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + blob = new (size) BufferBlob(name, size, cb); + } + // Track memory usage statistic after releasing CodeCache_lock + MemoryService::track_code_cache_memory_usage(); + + return blob; +} + + +void* BufferBlob::operator new(size_t s, unsigned size) { + void* p = CodeCache::allocate(size); + return p; +} + + +void BufferBlob::free( BufferBlob *blob ) { + ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + CodeCache::free((CodeBlob*)blob); + } + // Track memory usage statistic after releasing CodeCache_lock + MemoryService::track_code_cache_memory_usage(); +} + + +//---------------------------------------------------------------------------------------------------- +// Implementation of AdapterBlob + +AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) : + BufferBlob("I2C/C2I adapters", size, cb) { + CodeCache::commit(this); +} + +AdapterBlob* AdapterBlob::create(CodeBuffer* cb) { + ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + + AdapterBlob* blob = NULL; + unsigned int size = allocation_size(cb, sizeof(AdapterBlob)); + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + blob = new (size) AdapterBlob(size, cb); + } + // Track memory usage statistic after releasing CodeCache_lock + MemoryService::track_code_cache_memory_usage(); + + return blob; +} + + +//---------------------------------------------------------------------------------------------------- +// Implementation of MethodHandlesAdapterBlob + +MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) { + ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + + MethodHandlesAdapterBlob* blob = NULL; + unsigned int size = sizeof(MethodHandlesAdapterBlob); + // align the size to CodeEntryAlignment + size = align_code_offset(size); + size += round_to(buffer_size, oopSize); + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + blob = new (size) MethodHandlesAdapterBlob(size); + } + // Track memory usage statistic after releasing CodeCache_lock + MemoryService::track_code_cache_memory_usage(); + + return blob; +} + + +//---------------------------------------------------------------------------------------------------- +// Implementation of RuntimeStub + +RuntimeStub::RuntimeStub( + const char* name, + CodeBuffer* cb, + int size, + int frame_complete, + int frame_size, + OopMapSet* oop_maps, + bool caller_must_gc_arguments +) +: CodeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps) +{ + _caller_must_gc_arguments = caller_must_gc_arguments; +} + + +RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, + CodeBuffer* cb, + int frame_complete, + int frame_size, + OopMapSet* oop_maps, + bool caller_must_gc_arguments) +{ + RuntimeStub* stub = NULL; + ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + unsigned int size = allocation_size(cb, sizeof(RuntimeStub)); + stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); + } + + trace_new_stub(stub, "RuntimeStub - ", stub_name); + + return stub; +} + + +void* RuntimeStub::operator new(size_t s, unsigned size) { + void* p = CodeCache::allocate(size, true); + if (!p) fatal("Initial size of CodeCache is too small"); + return p; +} + +// operator new shared by all singletons: +void* SingletonBlob::operator new(size_t s, unsigned size) { + void* p = CodeCache::allocate(size, true); + if (!p) fatal("Initial size of CodeCache is too small"); + return p; +} + + +//---------------------------------------------------------------------------------------------------- +// Implementation of DeoptimizationBlob + +DeoptimizationBlob::DeoptimizationBlob( + CodeBuffer* cb, + int size, + OopMapSet* oop_maps, + int unpack_offset, + int unpack_with_exception_offset, + int unpack_with_reexecution_offset, + int frame_size +) +: SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps) +{ + _unpack_offset = unpack_offset; + _unpack_with_exception = unpack_with_exception_offset; + _unpack_with_reexecution = unpack_with_reexecution_offset; +#ifdef COMPILER1 + _unpack_with_exception_in_tls = -1; +#endif +} + + +DeoptimizationBlob* DeoptimizationBlob::create( + CodeBuffer* cb, + OopMapSet* oop_maps, + int unpack_offset, + int unpack_with_exception_offset, + int unpack_with_reexecution_offset, + int frame_size) +{ + DeoptimizationBlob* blob = NULL; + ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + unsigned int size = allocation_size(cb, sizeof(DeoptimizationBlob)); + blob = new (size) DeoptimizationBlob(cb, + size, + oop_maps, + unpack_offset, + unpack_with_exception_offset, + unpack_with_reexecution_offset, + frame_size); + } + + trace_new_stub(blob, "DeoptimizationBlob"); + + return blob; +} + + +//---------------------------------------------------------------------------------------------------- +// Implementation of UncommonTrapBlob + +#ifdef COMPILER2 +UncommonTrapBlob::UncommonTrapBlob( + CodeBuffer* cb, + int size, + OopMapSet* oop_maps, + int frame_size +) +: SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps) +{} + + +UncommonTrapBlob* UncommonTrapBlob::create( + CodeBuffer* cb, + OopMapSet* oop_maps, + int frame_size) +{ + UncommonTrapBlob* blob = NULL; + ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + unsigned int size = allocation_size(cb, sizeof(UncommonTrapBlob)); + blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size); + } + + trace_new_stub(blob, "UncommonTrapBlob"); + + return blob; +} + + +#endif // COMPILER2 + + +//---------------------------------------------------------------------------------------------------- +// Implementation of ExceptionBlob + +#ifdef COMPILER2 +ExceptionBlob::ExceptionBlob( + CodeBuffer* cb, + int size, + OopMapSet* oop_maps, + int frame_size +) +: SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps) +{} + + +ExceptionBlob* ExceptionBlob::create( + CodeBuffer* cb, + OopMapSet* oop_maps, + int frame_size) +{ + ExceptionBlob* blob = NULL; + ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + unsigned int size = allocation_size(cb, sizeof(ExceptionBlob)); + blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size); + } + + trace_new_stub(blob, "ExceptionBlob"); + + return blob; +} + + +#endif // COMPILER2 + + +//---------------------------------------------------------------------------------------------------- +// Implementation of SafepointBlob + +SafepointBlob::SafepointBlob( + CodeBuffer* cb, + int size, + OopMapSet* oop_maps, + int frame_size +) +: SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps) +{} + + +SafepointBlob* SafepointBlob::create( + CodeBuffer* cb, + OopMapSet* oop_maps, + int frame_size) +{ + SafepointBlob* blob = NULL; + ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + unsigned int size = allocation_size(cb, sizeof(SafepointBlob)); + blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); + } + + trace_new_stub(blob, "SafepointBlob"); + + return blob; +} + + +//---------------------------------------------------------------------------------------------------- +// Verification and printing + +void CodeBlob::verify() { + ShouldNotReachHere(); +} + +void CodeBlob::print_on(outputStream* st) const { + st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", this); + st->print_cr("Framesize: %d", _frame_size); +} + +void CodeBlob::print_value_on(outputStream* st) const { + st->print_cr("[CodeBlob]"); +} + +void BufferBlob::verify() { + // unimplemented +} + +void BufferBlob::print_on(outputStream* st) const { + CodeBlob::print_on(st); + print_value_on(st); +} + +void BufferBlob::print_value_on(outputStream* st) const { + st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", this, name()); +} + +void RuntimeStub::verify() { + // unimplemented +} + +void RuntimeStub::print_on(outputStream* st) const { + ttyLocker ttyl; + CodeBlob::print_on(st); + st->print("Runtime Stub (" INTPTR_FORMAT "): ", this); + st->print_cr(name()); + Disassembler::decode((CodeBlob*)this, st); +} + +void RuntimeStub::print_value_on(outputStream* st) const { + st->print("RuntimeStub (" INTPTR_FORMAT "): ", this); st->print(name()); +} + +void SingletonBlob::verify() { + // unimplemented +} + +void SingletonBlob::print_on(outputStream* st) const { + ttyLocker ttyl; + CodeBlob::print_on(st); + st->print_cr(name()); + Disassembler::decode((CodeBlob*)this, st); +} + +void SingletonBlob::print_value_on(outputStream* st) const { + st->print_cr(name()); +} + +void DeoptimizationBlob::print_value_on(outputStream* st) const { + st->print_cr("Deoptimization (frame not available)"); +} diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/code/codeCache.cpp --- a/src/share/vm/code/codeCache.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/code/codeCache.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,6 +45,7 @@ #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" #include "services/memoryService.hpp" +#include "trace/tracing.hpp" #include "utilities/xmlstream.hpp" // Helper class for printing in CodeCache @@ -114,7 +115,6 @@ } }; - // CodeCache implementation CodeHeap * CodeCache::_heap = new CodeHeap(); @@ -126,6 +126,7 @@ nmethod* CodeCache::_scavenge_root_nmethods = NULL; nmethod* CodeCache::_saved_nmethods = NULL; +int CodeCache::_codemem_full_count = 0; CodeBlob* CodeCache::first() { assert_locked_or_safepoint(CodeCache_lock); @@ -172,7 +173,7 @@ static size_t maxCodeCacheUsed = 0; -CodeBlob* CodeCache::allocate(int size) { +CodeBlob* CodeCache::allocate(int size, bool is_critical) { // Do not seize the CodeCache lock here--if the caller has not // already done so, we are going to lose bigtime, since the code // cache will contain a garbage CodeBlob until the caller can @@ -183,7 +184,7 @@ CodeBlob* cb = NULL; _number_of_blobs++; while (true) { - cb = (CodeBlob*)_heap->allocate(size); + cb = (CodeBlob*)_heap->allocate(size, is_critical); if (cb != NULL) break; if (!_heap->expand_by(CodeCacheExpansionSize)) { // Expansion failed @@ -192,8 +193,8 @@ if (PrintCodeCacheExtension) { ResourceMark rm; tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)", - (intptr_t)_heap->begin(), (intptr_t)_heap->end(), - (address)_heap->end() - (address)_heap->begin()); + (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(), + (address)_heap->high() - (address)_heap->low_boundary()); } } maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() - @@ -472,8 +473,10 @@ } #endif //PRODUCT - -nmethod* CodeCache::find_and_remove_saved_code(Method* m) { +/** + * Remove and return nmethod from the saved code list in order to reanimate it. + */ +nmethod* CodeCache::reanimate_saved_code(Method* m) { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); nmethod* saved = _saved_nmethods; nmethod* prev = NULL; @@ -488,7 +491,7 @@ saved->set_speculatively_disconnected(false); saved->set_saved_nmethod_link(NULL); if (PrintMethodFlushing) { - saved->print_on(tty, " ### nmethod is reconnected\n"); + saved->print_on(tty, " ### nmethod is reconnected"); } if (LogCompilation && (xtty != NULL)) { ttyLocker ttyl; @@ -505,6 +508,9 @@ return NULL; } +/** + * Remove nmethod from the saved code list in order to discard it permanently + */ void CodeCache::remove_saved_code(nmethod* nm) { // For conc swpr this will be called with CodeCache_lock taken by caller assert_locked_or_safepoint(CodeCache_lock); @@ -538,7 +544,7 @@ nm->set_saved_nmethod_link(_saved_nmethods); _saved_nmethods = nm; if (PrintMethodFlushing) { - nm->print_on(tty, " ### nmethod is speculatively disconnected\n"); + nm->print_on(tty, " ### nmethod is speculatively disconnected"); } if (LogCompilation && (xtty != NULL)) { ttyLocker ttyl; @@ -617,15 +623,24 @@ address CodeCache::first_address() { assert_locked_or_safepoint(CodeCache_lock); - return (address)_heap->begin(); + return (address)_heap->low_boundary(); } address CodeCache::last_address() { assert_locked_or_safepoint(CodeCache_lock); - return (address)_heap->end(); + return (address)_heap->high(); } +/** + * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache + * is free, reverse_free_ratio() returns 4. + */ +double CodeCache::reverse_free_ratio() { + double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace); + double max_capacity = (double)CodeCache::max_capacity(); + return max_capacity / unallocated_capacity; +} void icache_init(); @@ -824,6 +839,22 @@ } } +void CodeCache::report_codemem_full() { + _codemem_full_count++; + EventCodeCacheFull event; + if (event.should_commit()) { + event.set_startAddress((u8)low_bound()); + event.set_commitedTopAddress((u8)high()); + event.set_reservedTopAddress((u8)high_bound()); + event.set_entryCount(nof_blobs()); + event.set_methodCount(nof_nmethods()); + event.set_adaptorCount(nof_adapters()); + event.set_unallocatedCapacity(unallocated_capacity()/K); + event.set_fullCount(_codemem_full_count); + event.commit(); + } +} + //------------------------------------------------------------------------------------------------ // Non-product version @@ -1005,10 +1036,9 @@ void CodeCache::print_summary(outputStream* st, bool detailed) { size_t total = (_heap->high_boundary() - _heap->low_boundary()); st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT - "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT - "Kb max_free_chunk=" SIZE_FORMAT "Kb", + "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", total/K, (total - unallocated_capacity())/K, - maxCodeCacheUsed/K, unallocated_capacity()/K, largest_free_block()/K); + maxCodeCacheUsed/K, unallocated_capacity()/K); if (detailed) { st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", @@ -1027,19 +1057,8 @@ void CodeCache::log_state(outputStream* st) { st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" - " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'" - " largest_free_block='" SIZE_FORMAT "'", + " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", nof_blobs(), nof_nmethods(), nof_adapters(), - unallocated_capacity(), largest_free_block()); + unallocated_capacity()); } -size_t CodeCache::largest_free_block() { - // This is called both with and without CodeCache_lock held so - // handle both cases. - if (CodeCache_lock->owned_by_self()) { - return _heap->largest_free_block(); - } else { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - return _heap->largest_free_block(); - } -} diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/code/codeCache.hpp --- a/src/share/vm/code/codeCache.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/code/codeCache.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,20 +57,24 @@ static int _number_of_nmethods_with_dependencies; static bool _needs_cache_clean; static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() - static nmethod* _saved_nmethods; // linked via nm->saved_nmethod_look() + static nmethod* _saved_nmethods; // Linked list of speculatively disconnected nmethods. static void verify_if_often() PRODUCT_RETURN; static void mark_scavenge_root_nmethods() PRODUCT_RETURN; static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN; + static int _codemem_full_count; + public: // Initialization static void initialize(); + static void report_codemem_full(); + // Allocation/administration - static CodeBlob* allocate(int size); // allocates a new CodeBlob + static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled static int alignment_unit(); // guaranteed alignment of all CodeBlobs static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header) @@ -160,11 +164,7 @@ // The full limits of the codeCache static address low_bound() { return (address) _heap->low_boundary(); } static address high_bound() { return (address) _heap->high_boundary(); } - - static bool has_space(int size) { - // Always leave some room in the CodeCache for I2C/C2I adapters - return largest_free_block() > (CodeCacheMinimumFreeSpace + size); - } + static address high() { return (address) _heap->high(); } // Profiling static address first_address(); // first address used for CodeBlobs @@ -172,14 +172,14 @@ static size_t capacity() { return _heap->capacity(); } static size_t max_capacity() { return _heap->max_capacity(); } static size_t unallocated_capacity() { return _heap->unallocated_capacity(); } - static size_t largest_free_block(); - static bool needs_flushing() { return largest_free_block() < CodeCacheFlushingMinimumFreeSpace; } + static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; } + static double reverse_free_ratio(); static bool needs_cache_clean() { return _needs_cache_clean; } static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } static void clear_inline_caches(); // clear all inline caches - static nmethod* find_and_remove_saved_code(Method* m); + static nmethod* reanimate_saved_code(Method* m); static void remove_saved_code(nmethod* nm); static void speculatively_disconnect(nmethod* nm); @@ -196,6 +196,8 @@ // tells how many nmethods have dependencies static int number_of_nmethods_with_dependencies(); + + static int get_codemem_full_count() { return _codemem_full_count; } }; #endif // SHARE_VM_CODE_CODECACHE_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/code/compiledIC.cpp --- a/src/share/vm/code/compiledIC.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/code/compiledIC.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -45,25 +45,6 @@ // Every time a compiled IC is changed or its type is being accessed, // either the CompiledIC_lock must be set or we must be at a safe point. - -// Release the CompiledICHolder* associated with this call site is there is one. -void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) { - // This call site might have become stale so inspect it carefully. - NativeCall* call = nativeCall_at(call_site->addr()); - if (is_icholder_entry(call->destination())) { - NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); - InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); - } -} - - -bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { - // This call site might have become stale so inspect it carefully. - NativeCall* call = nativeCall_at(call_site->addr()); - return is_icholder_entry(call->destination()); -} - - //----------------------------------------------------------------------------- // Low-level access to an inline cache. Private, since they might not be // MT-safe to use. @@ -497,33 +478,6 @@ return (cb != NULL && cb->is_adapter_blob()); } - -CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) - : _ic_call(call) -{ - address ic_call = call->instruction_address(); - - assert(ic_call != NULL, "ic_call address must be set"); - assert(nm != NULL, "must pass nmethod"); - assert(nm->contains(ic_call), "must be in nmethod"); - - // search for the ic_call at the given address - RelocIterator iter(nm, ic_call, ic_call+1); - bool ret = iter.next(); - assert(ret == true, "relocInfo must exist at this address"); - assert(iter.addr() == ic_call, "must find ic_call"); - if (iter.type() == relocInfo::virtual_call_type) { - virtual_call_Relocation* r = iter.virtual_call_reloc(); - _is_optimized = false; - _value = nativeMovConstReg_at(r->cached_value()); - } else { - assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); - _is_optimized = true; - _value = NULL; -} -} - - // ---------------------------------------------------------------------------- void CompiledStaticCall::set_to_clean() { @@ -558,39 +512,6 @@ return nm->stub_contains(destination()); } - -void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { - address stub=find_stub(); -#ifdef GRAAL - if (stub == NULL) { - set_destination_mt_safe(entry); - return; - } -#endif - guarantee(stub != NULL, "stub not found"); - - if (TraceICs) { - ResourceMark rm; - tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", - instruction_address(), - callee->name_and_sig_as_C_string()); - } - - NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object - NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); - - assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), "a) MT-unsafe modification of inline cache"); - assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache"); - - // Update stub - method_holder->set_data((intptr_t)callee()); - jump->set_jump_destination(entry); - - // Update jump to call - set_destination_mt_safe(stub); -} - - void CompiledStaticCall::set(const StaticCallInfo& info) { assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); @@ -633,19 +554,6 @@ } } - -void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { - assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); - // Reset stub - address stub = static_stub->addr(); - assert(stub!=NULL, "stub not found"); - NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object - NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); - method_holder->set_data(0); - jump->set_jump_destination((address)-1); -} - - address CompiledStaticCall::find_stub() { // Find reloc. information containing this call-site RelocIterator iter((nmethod*)NULL, instruction_address()); @@ -683,19 +591,16 @@ || is_optimized() || is_megamorphic(), "sanity check"); } - void CompiledIC::print() { print_compiled_ic(); tty->cr(); } - void CompiledIC::print_compiled_ic() { tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT, instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination(), is_optimized() ? NULL : cached_value()); } - void CompiledStaticCall::print() { tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address()); if (is_clean()) { @@ -708,23 +613,4 @@ tty->cr(); } -void CompiledStaticCall::verify() { - // Verify call - NativeCall::verify(); - if (os::is_MP()) { - verify_alignment(); - } - - // Verify stub - address stub = find_stub(); -#ifndef GRAAL - assert(stub != NULL, "no stub found for static call"); - NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object - NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); -#endif - - // Verify state - assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); -} - -#endif +#endif // !PRODUCT diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/code/compiledIC.hpp --- a/src/share/vm/code/compiledIC.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/code/compiledIC.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -304,6 +304,11 @@ friend CompiledStaticCall* compiledStaticCall_at(address native_call); friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site); + // Code + static void emit_to_interp_stub(CodeBuffer &cbuf); + static int to_interp_stub_size(); + static int reloc_to_interp_stub(); + // State bool is_clean() const; bool is_call_to_compiled() const; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/code/nmethod.cpp --- a/src/share/vm/code/nmethod.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/code/nmethod.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -510,18 +510,17 @@ { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); - if (CodeCache::has_space(native_nmethod_size)) { - CodeOffsets offsets; - offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); - offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); - nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size, - compile_id, &offsets, - code_buffer, frame_size, - basic_lock_owner_sp_offset, - basic_lock_sp_offset, oop_maps); - if (nm != NULL) nmethod_stats.note_native_nmethod(nm); - if (PrintAssembly && nm != NULL) - Disassembler::decode(nm); + CodeOffsets offsets; + offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); + offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); + nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size, + compile_id, &offsets, + code_buffer, frame_size, + basic_lock_owner_sp_offset, + basic_lock_sp_offset, oop_maps); + NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm)); + if (PrintAssembly && nm != NULL) { + Disassembler::decode(nm); } } // verify nmethod @@ -547,18 +546,17 @@ { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); int nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); - if (CodeCache::has_space(nmethod_size)) { - CodeOffsets offsets; - offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); - offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset); - offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); - - nm = new (nmethod_size) nmethod(method(), nmethod_size, - &offsets, code_buffer, frame_size); - - if (nm != NULL) nmethod_stats.note_nmethod(nm); - if (PrintAssembly && nm != NULL) - Disassembler::decode(nm); + CodeOffsets offsets; + offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); + offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset); + offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); + + nm = new (nmethod_size) nmethod(method(), nmethod_size, + &offsets, code_buffer, frame_size); + + NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm)); + if (PrintAssembly && nm != NULL) { + Disassembler::decode(nm); } } // verify nmethod @@ -607,22 +605,21 @@ + round_to(nul_chk_table->size_in_bytes(), oopSize) + round_to(debug_info->data_size() , oopSize) + leaf_graph_ids_size; - if (CodeCache::has_space(nmethod_size)) { - nm = new (nmethod_size) - nmethod(method(), nmethod_size, compile_id, entry_bci, offsets, - orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, - oop_maps, - handler_table, - nul_chk_table, - compiler, - comp_level, - leaf_graph_ids + nm = new (nmethod_size) + nmethod(method(), nmethod_size, compile_id, entry_bci, offsets, + orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, + oop_maps, + handler_table, + nul_chk_table, + compiler, + comp_level, + leaf_graph_ids #ifdef GRAAL - , installed_code, - triggered_deoptimizations + , installed_code, + triggered_deoptimizations #endif - ); - } + ); + if (nm != NULL) { // To make dependency checking during class loading fast, record // the nmethod dependencies in the classes it is dependent on. @@ -634,15 +631,18 @@ // classes the slow way is too slow. for (Dependencies::DepStream deps(nm); deps.next(); ) { Klass* klass = deps.context_type(); - if (klass == NULL) continue; // ignore things like evol_method + if (klass == NULL) { + continue; // ignore things like evol_method + } // record this nmethod as dependent on this klass InstanceKlass::cast(klass)->add_dependent_nmethod(nm); } } - if (nm != NULL) nmethod_stats.note_nmethod(nm); - if (PrintAssembly && nm != NULL) + NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm)); + if (PrintAssembly && nm != NULL) { Disassembler::decode(nm); + } } // verify nmethod @@ -821,13 +821,11 @@ } #endif // def HAVE_DTRACE_H -void* nmethod::operator new(size_t size, int nmethod_size) { - void* alloc = CodeCache::allocate(nmethod_size); - guarantee(alloc != NULL, "CodeCache should have enough space"); - return alloc; +void* nmethod::operator new(size_t size, int nmethod_size) throw () { + // Not critical, may return null if there is too little continuous memory + return CodeCache::allocate(nmethod_size); } - nmethod::nmethod( Method* method, int nmethod_size, @@ -1893,6 +1891,19 @@ Metadata* md = r->metadata_value(); f(md); } + } else if (iter.type() == relocInfo::virtual_call_type) { + // Check compiledIC holders associated with this nmethod + CompiledIC *ic = CompiledIC_at(iter.reloc()); + if (ic->is_icholder_call()) { + CompiledICHolder* cichk = ic->cached_icholder(); + f(cichk->holder_method()); + f(cichk->holder_klass()); + } else { + Metadata* ic_oop = ic->cached_metadata(); + if (ic_oop != NULL) { + f(ic_oop); + } + } } } } @@ -1903,6 +1914,7 @@ Metadata* md = *p; f(md); } + // Call function Method*, not embedded in these other places. if (_method != NULL) f(_method); } @@ -2070,11 +2082,10 @@ if (!method()->is_native()) { SimpleScopeDesc ssd(this, fr.pc()); Bytecode_invoke call(ssd.method(), ssd.bci()); - // compiled invokedynamic call sites have an implicit receiver at - // resolution time, so make sure it gets GC'ed. - bool has_receiver = !call.is_invokestatic(); + bool has_receiver = call.has_receiver(); + bool has_appendix = call.has_appendix(); Symbol* signature = call.signature(); - fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f); + fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f); } #endif // !SHARK } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/code/stubs.cpp --- a/src/share/vm/code/stubs.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/code/stubs.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -67,7 +67,7 @@ intptr_t size = round_to(buffer_size, 2*BytesPerWord); BufferBlob* blob = BufferBlob::create(name, size); if( blob == NULL) { - vm_exit_out_of_memory(size, err_msg("CodeCache: no room for %s", name)); + vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, err_msg("CodeCache: no room for %s", name)); } _stub_interface = stub_interface; _buffer_size = blob->content_size(); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/code/vtableStubs.cpp --- a/src/share/vm/code/vtableStubs.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/code/vtableStubs.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -60,7 +60,7 @@ const int bytes = chunk_factor * real_size + pd_code_alignment(); BufferBlob* blob = BufferBlob::create("vtable chunks", bytes); if (blob == NULL) { - vm_exit_out_of_memory(bytes, "CodeCache: no room for vtable chunks"); + vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks"); } _chunk = blob->content_begin(); _chunk_end = _chunk + bytes; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/compiler/compileBroker.cpp --- a/src/share/vm/compiler/compileBroker.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/compiler/compileBroker.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,6 +43,7 @@ #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/sweeper.hpp" +#include "trace/tracing.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" #ifdef COMPILER1 @@ -68,7 +69,7 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end, char*, intptr_t, char*, intptr_t, char*, intptr_t, char*, intptr_t, bool); -#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name) \ +#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \ { \ Symbol* klass_name = (method)->klass_name(); \ Symbol* name = (method)->name(); \ @@ -80,8 +81,7 @@ signature->bytes(), signature->utf8_length()); \ } -#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, \ - comp_name, success) \ +#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \ { \ Symbol* klass_name = (method)->klass_name(); \ Symbol* name = (method)->name(); \ @@ -95,7 +95,7 @@ #else /* USDT2 */ -#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name) \ +#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \ { \ Symbol* klass_name = (method)->klass_name(); \ Symbol* name = (method)->name(); \ @@ -107,8 +107,7 @@ (char *) signature->bytes(), signature->utf8_length()); \ } -#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, \ - comp_name, success) \ +#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \ { \ Symbol* klass_name = (method)->klass_name(); \ Symbol* name = (method)->name(); \ @@ -123,8 +122,8 @@ #else // ndef DTRACE_ENABLED -#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name) -#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, comp_name, success) +#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) +#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) #endif // ndef DTRACE_ENABLED @@ -184,9 +183,11 @@ int CompileBroker::_sum_nmethod_size = 0; int CompileBroker::_sum_nmethod_code_size = 0; -CompileQueue* CompileBroker::_c2_method_queue = NULL; -CompileQueue* CompileBroker::_c1_method_queue = NULL; -CompileTask* CompileBroker::_task_free_list = NULL; +long CompileBroker::_peak_compilation_time = 0; + +CompileQueue* CompileBroker::_c2_method_queue = NULL; +CompileQueue* CompileBroker::_c1_method_queue = NULL; +CompileTask* CompileBroker::_task_free_list = NULL; GrowableArray* CompileBroker::_method_threads = NULL; @@ -1248,7 +1249,7 @@ if (method->is_not_compilable(comp_level)) return NULL; if (UseCodeCacheFlushing) { - nmethod* saved = CodeCache::find_and_remove_saved_code(method()); + nmethod* saved = CodeCache::reanimate_saved_code(method()); if (saved != NULL) { method->set_code(method, saved); return saved; @@ -1307,9 +1308,9 @@ method->jmethod_id(); } - // If the compiler is shut off due to code cache flushing or otherwise, + // If the compiler is shut off due to code cache getting full // fail out now so blocking compiles dont hang the java thread - if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) { + if (!should_compile_new_jobs()) { CompilationPolicy::policy()->delay_compilation(method()); return NULL; } @@ -1600,7 +1601,7 @@ // We need this HandleMark to avoid leaking VM handles. HandleMark hm(thread); - if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) { + if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) { // the code cache is really full handle_full_code_cache(); } else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) { @@ -1663,42 +1664,37 @@ // Set up state required by +LogCompilation. void CompileBroker::init_compiler_thread_log() { CompilerThread* thread = CompilerThread::current(); - char fileBuf[4*K]; + char file_name[4*K]; FILE* fp = NULL; - char* file = NULL; intx thread_id = os::current_thread_id(); for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) { const char* dir = (try_temp_dir ? os::get_temp_directory() : NULL); if (dir == NULL) { - jio_snprintf(fileBuf, sizeof(fileBuf), "hs_c" UINTX_FORMAT "_pid%u.log", + jio_snprintf(file_name, sizeof(file_name), "hs_c" UINTX_FORMAT "_pid%u.log", thread_id, os::current_process_id()); } else { - jio_snprintf(fileBuf, sizeof(fileBuf), + jio_snprintf(file_name, sizeof(file_name), "%s%shs_c" UINTX_FORMAT "_pid%u.log", dir, os::file_separator(), thread_id, os::current_process_id()); } - fp = fopen(fileBuf, "at"); + + fp = fopen(file_name, "at"); if (fp != NULL) { - file = NEW_C_HEAP_ARRAY(char, strlen(fileBuf)+1, mtCompiler); - strcpy(file, fileBuf); - break; + if (LogCompilation && Verbose) { + tty->print_cr("Opening compilation log %s", file_name); + } + CompileLog* log = new(ResourceObj::C_HEAP, mtCompiler) CompileLog(file_name, fp, thread_id); + thread->init_log(log); + + if (xtty != NULL) { + ttyLocker ttyl; + // Record any per thread log files + xtty->elem("thread_logfile thread='%d' filename='%s'", thread_id, file_name); + } + return; } } - if (fp == NULL) { - warning("Cannot open log file: %s", fileBuf); - } else { - if (LogCompilation && Verbose) - tty->print_cr("Opening compilation log %s", file); - CompileLog* log = new(ResourceObj::C_HEAP, mtCompiler) CompileLog(file, fp, thread_id); - thread->init_log(log); - - if (xtty != NULL) { - ttyLocker ttyl; - - // Record any per thread log files - xtty->elem("thread_logfile thread='%d' filename='%s'", thread_id, file); - } - } + warning("Cannot open log file: %s", file_name); } // ------------------------------------------------------------------ @@ -1785,8 +1781,7 @@ // Save information about this method in case of failure. set_last_compile(thread, method, is_osr, task_level); - DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler(task_level), method, - compiler_name(task_level)); + DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level)); } // Allocate a new set of JNI handles. @@ -1822,6 +1817,7 @@ ciMethod* target = ci_env.get_method_from_handle(target_handle); TraceTime t1("compilation", &time); + EventCompilation event; AbstractCompiler *comp = compiler(task_level); if (comp == NULL) { @@ -1861,13 +1857,24 @@ } } } + // simulate crash during compilation + assert(task->compile_id() != CICrashAt, "just as planned"); + if (event.should_commit()) { + event.set_method(target->get_Method()); + event.set_compileID(compile_id); + event.set_compileLevel(task->comp_level()); + event.set_succeded(task->is_success()); + event.set_isOsr(is_osr); + event.set_codeSize((task->code() == NULL) ? 0 : task->code()->total_size()); + event.set_inlinedBytes(task->num_inlined_bytecodes()); + event.commit(); + } } pop_jni_handle_block(); methodHandle method(thread, task->method()); - DTRACE_METHOD_COMPILE_END_PROBE(compiler(task_level), method, - compiler_name(task_level), task->is_success()); + DTRACE_METHOD_COMPILE_END_PROBE(method, compiler_name(task_level), task->is_success()); collect_statistics(thread, time, task); @@ -1875,8 +1882,10 @@ tty->print("%7d ", (int) tty->time_stamp().milliseconds()); // print timestamp tty->print("%4d ", compile_id); // print compilation number tty->print("%s ", (is_osr ? "%" : " ")); - int code_size = (task->code() == NULL) ? 0 : task->code()->total_size(); - tty->print_cr("size: %d time: %d inlined: %d bytes", code_size, (int)time.milliseconds(), task->num_inlined_bytecodes()); + if (task->code() != NULL) { + tty->print("size: %d(%d) ", task->code()->total_size(), task->code()->insts_size()); + } + tty->print_cr("time: %d inlined: %d bytes", (int)time.milliseconds(), task->num_inlined_bytecodes()); } if (PrintCodeCacheOnCompilation) @@ -1940,6 +1949,10 @@ } warning("CodeCache is full. Compiler has been disabled."); warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); + + CodeCache::report_codemem_full(); + + #ifndef PRODUCT if (CompileTheWorld || ExitOnFullCodeCache) { codecache_print(/* detailed= */ true); @@ -2097,8 +2110,10 @@ // java.lang.management.CompilationMBean _perf_total_compilation->inc(time.ticks()); + _t_total_compilation.add(time); + _peak_compilation_time = time.milliseconds() > _peak_compilation_time ? time.milliseconds() : _peak_compilation_time; + if (CITime) { - _t_total_compilation.add(time); if (is_osr) { _t_osr_compilation.add(time); _sum_osr_bytes_compiled += method->code_size() + task->num_inlined_bytecodes(); @@ -2221,7 +2236,6 @@ tty->print_cr(" nmethod total size : %6d bytes", CompileBroker::_sum_nmethod_size); } - // Debugging output for failure void CompileBroker::print_last_compile() { if ( _last_compile_level != CompLevel_none && diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/compiler/compileBroker.hpp --- a/src/share/vm/compiler/compileBroker.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/compiler/compileBroker.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -299,17 +299,17 @@ static elapsedTimer _t_osr_compilation; static elapsedTimer _t_standard_compilation; + static int _total_compile_count; static int _total_bailout_count; static int _total_invalidated_count; - static int _total_compile_count; static int _total_native_compile_count; static int _total_osr_compile_count; static int _total_standard_compile_count; - static int _sum_osr_bytes_compiled; static int _sum_standard_bytes_compiled; static int _sum_nmethod_size; static int _sum_nmethod_code_size; + static long _peak_compilation_time; static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS); static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count); @@ -421,6 +421,19 @@ // compiler name for debugging static const char* compiler_name(int comp_level); + + static int get_total_compile_count() { return _total_compile_count; } + static int get_total_bailout_count() { return _total_bailout_count; } + static int get_total_invalidated_count() { return _total_invalidated_count; } + static int get_total_native_compile_count() { return _total_native_compile_count; } + static int get_total_osr_compile_count() { return _total_osr_compile_count; } + static int get_total_standard_compile_count() { return _total_standard_compile_count; } + static int get_sum_osr_bytes_compiled() { return _sum_osr_bytes_compiled; } + static int get_sum_standard_bytes_compiled() { return _sum_standard_bytes_compiled; } + static int get_sum_nmethod_size() { return _sum_nmethod_size;} + static int get_sum_nmethod_code_size() { return _sum_nmethod_code_size; } + static long get_peak_compilation_time() { return _peak_compilation_time; } + static long get_total_compilation_time() { return _t_total_compilation.milliseconds(); } }; #endif // SHARE_VM_COMPILER_COMPILEBROKER_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/compiler/compileLog.cpp --- a/src/share/vm/compiler/compileLog.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/compiler/compileLog.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -34,17 +34,18 @@ // ------------------------------------------------------------------ // CompileLog::CompileLog -CompileLog::CompileLog(const char* file, FILE* fp, intx thread_id) +CompileLog::CompileLog(const char* file_name, FILE* fp, intx thread_id) : _context(_context_buffer, sizeof(_context_buffer)) { - initialize(new(ResourceObj::C_HEAP, mtCompiler) fileStream(fp)); - _file = file; + initialize(new(ResourceObj::C_HEAP, mtCompiler) fileStream(fp, true)); _file_end = 0; _thread_id = thread_id; _identities_limit = 0; _identities_capacity = 400; _identities = NEW_C_HEAP_ARRAY(char, _identities_capacity, mtCompiler); + _file = NEW_C_HEAP_ARRAY(char, strlen(file_name)+1, mtCompiler); + strcpy((char*)_file, file_name); // link into the global list { MutexLocker locker(CompileTaskAlloc_lock); @@ -57,6 +58,7 @@ delete _out; _out = NULL; FREE_C_HEAP_ARRAY(char, _identities, mtCompiler); + FREE_C_HEAP_ARRAY(char, _file, mtCompiler); } @@ -188,7 +190,8 @@ if (called_exit) return; called_exit = true; - for (CompileLog* log = _first; log != NULL; log = log->_next) { + CompileLog* log = _first; + while (log != NULL) { log->flush(); const char* partial_file = log->file(); int partial_fd = open(partial_file, O_RDONLY); @@ -267,7 +270,11 @@ close(partial_fd); unlink(partial_file); } + CompileLog* next_log = log->_next; + delete log; + log = next_log; } + _first = NULL; } // ------------------------------------------------------------------ diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/compiler/compileLog.hpp --- a/src/share/vm/compiler/compileLog.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/compiler/compileLog.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -57,7 +57,7 @@ void va_tag(bool push, const char* format, va_list ap); public: - CompileLog(const char* file, FILE* fp, intx thread_id); + CompileLog(const char* file_name, FILE* fp, intx thread_id); ~CompileLog(); intx thread_id() { return _thread_id; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -51,14 +51,6 @@ } template -AdaptiveFreeList::AdaptiveFreeList(Chunk* fc) : FreeList(fc), _hint(0) { - init_statistics(); -#ifndef PRODUCT - _allocation_stats.set_returned_bytes(size() * HeapWordSize); -#endif -} - -template void AdaptiveFreeList::initialize() { FreeList::initialize(); set_hint(0); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -55,7 +55,6 @@ public: AdaptiveFreeList(); - AdaptiveFreeList(Chunk* fc); using FreeList::assert_proper_lock_protection; #ifdef ASSERT diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -969,8 +969,8 @@ } -void CMSAdaptiveSizePolicy::compute_young_generation_free_space(size_t cur_eden, - size_t max_eden_size) +void CMSAdaptiveSizePolicy::compute_eden_space_size(size_t cur_eden, + size_t max_eden_size) { size_t desired_eden_size = cur_eden; size_t eden_limit = max_eden_size; @@ -978,7 +978,7 @@ // Printout input if (PrintGC && PrintAdaptiveSizePolicy) { gclog_or_tty->print_cr( - "CMSAdaptiveSizePolicy::compute_young_generation_free_space: " + "CMSAdaptiveSizePolicy::compute_eden_space_size: " "cur_eden " SIZE_FORMAT, cur_eden); } @@ -1024,7 +1024,7 @@ if (PrintGC && PrintAdaptiveSizePolicy) { gclog_or_tty->print_cr( - "CMSAdaptiveSizePolicy::compute_young_generation_free_space limits:" + "CMSAdaptiveSizePolicy::compute_eden_space_size limits:" " desired_eden_size: " SIZE_FORMAT " old_eden_size: " SIZE_FORMAT, desired_eden_size, cur_eden); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -436,8 +436,8 @@ size_t generation_alignment() { return _generation_alignment; } - virtual void compute_young_generation_free_space(size_t cur_eden, - size_t max_eden_size); + virtual void compute_eden_space_size(size_t cur_eden, + size_t max_eden_size); // Calculates new survivor space size; returns a new tenuring threshold // value. Stores new survivor size in _survivor_size. virtual uint compute_survivor_space_size_and_threshold( diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,7 +52,7 @@ } void ConcurrentMarkSweepPolicy::initialize_generations() { - _generations = new GenerationSpecPtr[number_of_generations()]; + _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL); if (_generations == NULL) vm_exit_during_initialization("Unable to allocate gen spec"); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -153,8 +153,6 @@ _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1 "a freelist par lock", true); - if (_indexedFreeListParLocks[i] == NULL) - vm_exit_during_initialization("Could not allocate a par lock"); DEBUG_ONLY( _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]); ) @@ -285,6 +283,7 @@ _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); _indexedFreeList[mr.word_size()].return_chunk_at_head(fc); } + coalBirth(mr.word_size()); } _promoInfo.reset(); _smallLinearAllocBlock._ptr = NULL; @@ -1762,7 +1761,7 @@ } ec->set_size(size); debug_only(ec->mangleFreed(size)); - if (size < SmallForDictionary) { + if (size < SmallForDictionary && ParallelGCThreads != 0) { lock = _indexedFreeListParLocks[size]; } MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -37,8 +37,12 @@ #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" #include "gc_implementation/parNew/parNewGeneration.hpp" #include "gc_implementation/shared/collectorCounters.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_interface/collectedHeap.inline.hpp" +#include "memory/allocation.hpp" #include "memory/cardTableRS.hpp" #include "memory/collectorPolicy.hpp" #include "memory/gcLocker.inline.hpp" @@ -60,7 +64,8 @@ // statics CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL; -bool CMSCollector::_full_gc_requested = false; +bool CMSCollector::_full_gc_requested = false; +GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc; ////////////////////////////////////////////////////////////////// // In support of CMS/VM thread synchronization @@ -193,7 +198,8 @@ FreeBlockDictionary::DictionaryChoice dictionaryChoice) : CardGeneration(rs, initial_byte_size, level, ct), _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), - _debug_collection_type(Concurrent_collection_type) + _debug_collection_type(Concurrent_collection_type), + _did_compact(false) { HeapWord* bottom = (HeapWord*) _virtual_space.low(); HeapWord* end = (HeapWord*) _virtual_space.high(); @@ -590,7 +596,10 @@ _concurrent_cycles_since_last_unload(0), _roots_scanning_options(0), _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), - _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding) + _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), + _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()), + _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), + _cms_start_registered(false) { if (ExplicitGCInvokesConcurrentAndUnloadsClasses) { ExplicitGCInvokesConcurrent = true; @@ -691,8 +700,7 @@ _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio); // Clip CMSBootstrapOccupancy between 0 and 100. - _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy))) - /(double)100; + _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100; _full_gcs_since_conc_gc = 0; @@ -917,18 +925,15 @@ return; } - // Compute some numbers about the state of the heap. - const size_t used_after_gc = used(); - const size_t capacity_after_gc = capacity(); + // The heap has been compacted but not reset yet. + // Any metric such as free() or used() will be incorrect. CardGeneration::compute_new_size(); // Reset again after a possible resizing - cmsSpace()->reset_after_compaction(); - - assert(used() == used_after_gc && used_after_gc <= capacity(), - err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT - " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity())); + if (did_compact()) { + cmsSpace()->reset_after_compaction(); + } } void ConcurrentMarkSweepGeneration::compute_new_size_free_list() { @@ -1578,6 +1583,8 @@ return false; } +void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); } + // Clear _expansion_cause fields of constituent generations void CMSCollector::clear_expansion_cause() { _cmsGen->clear_expansion_cause(); @@ -1675,21 +1682,40 @@ } acquire_control_and_collect(full, clear_all_soft_refs); _full_gcs_since_conc_gc++; - -} - -void CMSCollector::request_full_gc(unsigned int full_gc_count) { +} + +void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) { GenCollectedHeap* gch = GenCollectedHeap::heap(); unsigned int gc_count = gch->total_full_collections(); if (gc_count == full_gc_count) { MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag); _full_gc_requested = true; + _full_gc_cause = cause; CGC_lock->notify(); // nudge CMS thread } else { assert(gc_count > full_gc_count, "Error: causal loop"); } } +bool CMSCollector::is_external_interruption() { + GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause(); + return GCCause::is_user_requested_gc(cause) || + GCCause::is_serviceability_requested_gc(cause); +} + +void CMSCollector::report_concurrent_mode_interruption() { + if (is_external_interruption()) { + if (PrintGCDetails) { + gclog_or_tty->print(" (concurrent mode interrupted)"); + } + } else { + if (PrintGCDetails) { + gclog_or_tty->print(" (concurrent mode failure)"); + } + _gc_tracer_cm->report_concurrent_mode_failure(); + } +} + // The foreground and background collectors need to coordinate in order // to make sure that they do not mutually interfere with CMS collections. @@ -1847,16 +1873,11 @@ } ) - if (PrintGCDetails && first_state > Idling) { - GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause(); - if (GCCause::is_user_requested_gc(cause) || - GCCause::is_serviceability_requested_gc(cause)) { - gclog_or_tty->print(" (concurrent mode interrupted)"); - } else { - gclog_or_tty->print(" (concurrent mode failure)"); - } - } - + if (first_state > Idling) { + report_concurrent_mode_interruption(); + } + + set_did_compact(should_compact); if (should_compact) { // If the collection is being acquired from the background // collector, there may be references on the discovered @@ -1869,6 +1890,10 @@ // Reference objects are active. ref_processor()->clean_up_discovered_references(); + if (first_state > Idling) { + save_heap_summary(); + } + do_compaction_work(clear_all_soft_refs); // Has the GC time limit been exceeded? @@ -1972,7 +1997,14 @@ // a mark-sweep-compact. void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { GenCollectedHeap* gch = GenCollectedHeap::heap(); - TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty); + + STWGCTimer* gc_timer = GenMarkSweep::gc_timer(); + gc_timer->register_gc_start(os::elapsed_counter()); + + SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); + gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); + + GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL); if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) { gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d " "collections passed to foreground collector", _full_gcs_since_conc_gc); @@ -2063,6 +2095,10 @@ size_policy()->msc_collection_end(gch->gc_cause()); } + gc_timer->register_gc_end(os::elapsed_counter()); + + gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); + // For a mark-sweep-compact, compute_new_size() will be called // in the heap's do_collection() method. } @@ -2094,7 +2130,7 @@ // required. _collectorState = FinalMarking; } - collect_in_foreground(clear_all_soft_refs); + collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause()); // For a mark-sweep, compute_new_size() will be called // in the heap's do_collection() method. @@ -2154,7 +2190,7 @@ // one "collect" method between the background collector and the foreground // collector but the if-then-else required made it cleaner to have // separate methods. -void CMSCollector::collect_in_background(bool clear_all_soft_refs) { +void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) { assert(Thread::current()->is_ConcurrentGC_thread(), "A CMS asynchronous collection is only allowed on a CMS thread."); @@ -2173,6 +2209,7 @@ } else { assert(_collectorState == Idling, "Should be idling before start."); _collectorState = InitialMarking; + register_gc_start(cause); // Reset the expansion cause, now that we are about to begin // a new cycle. clear_expansion_cause(); @@ -2185,6 +2222,7 @@ // ensuing concurrent GC cycle. update_should_unload_classes(); _full_gc_requested = false; // acks all outstanding full gc requests + _full_gc_cause = GCCause::_no_gc; // Signal that we are about to start a collection gch->increment_total_full_collections(); // ... starting a collection cycle _collection_count_start = gch->total_full_collections(); @@ -2264,7 +2302,6 @@ { ReleaseForegroundGC x(this); stats().record_cms_begin(); - VM_CMS_Initial_Mark initial_mark_op(this); VMThread::execute(&initial_mark_op); } @@ -2344,6 +2381,7 @@ CMSTokenSync z(true); // not strictly needed. if (_collectorState == Resizing) { compute_new_size(); + save_heap_summary(); _collectorState = Resetting; } else { assert(_collectorState == Idling, "The state should only change" @@ -2402,7 +2440,39 @@ } } -void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) { +void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) { + if (!_cms_start_registered) { + register_gc_start(cause); + } +} + +void CMSCollector::register_gc_start(GCCause::Cause cause) { + _cms_start_registered = true; + _gc_timer_cm->register_gc_start(os::elapsed_counter()); + _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start()); +} + +void CMSCollector::register_gc_end() { + if (_cms_start_registered) { + report_heap_summary(GCWhen::AfterGC); + + _gc_timer_cm->register_gc_end(os::elapsed_counter()); + _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); + _cms_start_registered = false; + } +} + +void CMSCollector::save_heap_summary() { + GenCollectedHeap* gch = GenCollectedHeap::heap(); + _last_heap_summary = gch->create_heap_summary(); + _last_metaspace_summary = gch->create_metaspace_summary(); +} + +void CMSCollector::report_heap_summary(GCWhen::Type when) { + _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_metaspace_summary); +} + +void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) { assert(_foregroundGCIsActive && !_foregroundGCShouldWait, "Foreground collector should be waiting, not executing"); assert(Thread::current()->is_VM_thread(), "A foreground collection" @@ -2410,8 +2480,8 @@ assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), "VM thread should have CMS token"); - NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, - true, gclog_or_tty);) + NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, + true, NULL);) if (UseAdaptiveSizePolicy) { size_policy()->ms_collection_begin(); } @@ -2435,6 +2505,7 @@ } switch (_collectorState) { case InitialMarking: + register_foreground_gc_start(cause); init_mark_was_synchronous = true; // fact to be exploited in re-mark checkpointRootsInitial(false); assert(_collectorState == Marking, "Collector state should have changed" @@ -2444,8 +2515,7 @@ // initial marking in checkpointRootsInitialWork has been completed if (VerifyDuringGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - gclog_or_tty->print("Verify before initial mark: "); - Universe::verify(); + Universe::verify("Verify before initial mark: "); } { bool res = markFromRoots(false); @@ -2456,8 +2526,7 @@ case FinalMarking: if (VerifyDuringGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - gclog_or_tty->print("Verify before re-mark: "); - Universe::verify(); + Universe::verify("Verify before re-mark: "); } checkpointRootsFinal(false, clear_all_soft_refs, init_mark_was_synchronous); @@ -2468,8 +2537,7 @@ // final marking in checkpointRootsFinal has been completed if (VerifyDuringGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - gclog_or_tty->print("Verify before sweep: "); - Universe::verify(); + Universe::verify("Verify before sweep: "); } sweep(false); assert(_collectorState == Resizing, "Incorrect state"); @@ -2484,9 +2552,9 @@ // The heap has been resized. if (VerifyDuringGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - gclog_or_tty->print("Verify before reset: "); - Universe::verify(); + Universe::verify("Verify before reset: "); } + save_heap_summary(); reset(false); assert(_collectorState == Idling, "Collector state should " "have changed"); @@ -2722,6 +2790,7 @@ Chunk::clean_chunk_pool(); } + set_did_compact(false); _between_prologue_and_epilogue = false; // ready for next cycle } @@ -2853,8 +2922,8 @@ bool failed() { return _failed; } }; -bool CMSCollector::verify_after_remark() { - gclog_or_tty->print(" [Verifying CMS Marking... "); +bool CMSCollector::verify_after_remark(bool silent) { + if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... "); MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); static bool init = false; @@ -2915,7 +2984,7 @@ warning("Unrecognized value %d for CMSRemarkVerifyVariant", CMSRemarkVerifyVariant); } - gclog_or_tty->print(" done] "); + if (!silent) gclog_or_tty->print(" done] "); return true; } @@ -3385,7 +3454,6 @@ assert_locked_or_safepoint(Heap_lock); bool result = _virtual_space.expand_by(bytes); if (result) { - HeapWord* old_end = _cmsSpace->end(); size_t new_word_size = heap_word_size(_virtual_space.committed_size()); MemRegion mr(_cmsSpace->bottom(), new_word_size); @@ -3426,8 +3494,9 @@ void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) { assert_locked_or_safepoint(Heap_lock); assert_lock_strong(freelistLock()); - // XXX Fix when compaction is implemented. - warning("Shrinking of CMS not yet implemented"); + if (PrintGCDetails && Verbose) { + warning("Shrinking of CMS not yet implemented"); + } return; } @@ -3508,6 +3577,9 @@ check_correct_thread_executing(); TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); + save_heap_summary(); + report_heap_summary(GCWhen::BeforeGC); + ReferenceProcessor* rp = ref_processor(); SpecializationStats::clear(); assert(_restart_addr == NULL, "Control point invariant"); @@ -3553,8 +3625,8 @@ // CMS collection cycle. setup_cms_unloading_and_verification_state(); - NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork", - PrintGCDetails && Verbose, true, gclog_or_tty);) + NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork", + PrintGCDetails && Verbose, true, _gc_timer_cm);) if (UseAdaptiveSizePolicy) { size_policy()->checkpoint_roots_initial_begin(); } @@ -4546,8 +4618,10 @@ // The code in this method may need further // tweaking for better performance and some restructuring // for cleaner interfaces. + GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases rp->preclean_discovered_references( - rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl); + rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl, + gc_timer); } if (clean_survivor) { // preclean the active survivor space(s) @@ -4889,8 +4963,8 @@ // Temporarily set flag to false, GCH->do_collection will // expect it to be false and set to true FlagSetting fl(gch->_is_gc_active, false); - NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark", - PrintGCDetails && Verbose, true, gclog_or_tty);) + NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark", + PrintGCDetails && Verbose, true, _gc_timer_cm);) int level = _cmsGen->level() - 1; if (level >= 0) { gch->do_collection(true, // full (i.e. force, see below) @@ -4919,7 +4993,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, bool init_mark_was_synchronous) { - NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);) + NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);) assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); @@ -4970,11 +5044,11 @@ // the most recent young generation GC, minus those cleaned up by the // concurrent precleaning. if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) { - TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm); do_remark_parallel(); } else { - TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, - gclog_or_tty); + GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, + _gc_timer_cm); do_remark_non_parallel(); } } @@ -4987,7 +5061,7 @@ verify_overflow_empty(); { - NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);) + NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);) refProcessingWork(asynch, clear_all_soft_refs); } verify_work_stacks_empty(); @@ -5048,6 +5122,8 @@ verify_after_remark(); } + _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure); + // Change under the freelistLocks. _collectorState = Sweeping; // Call isAllClear() under bitMapLock @@ -5701,7 +5777,7 @@ NULL, // space is set further below &_markBitMap, &_markStack, &mrias_cl); { - TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm); // Iterate over the dirty cards, setting the corresponding bits in the // mod union table. { @@ -5738,7 +5814,7 @@ Universe::verify(); } { - TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm); verify_work_stacks_empty(); @@ -5760,7 +5836,7 @@ } { - TraceTime t("visit unhandled CLDs", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm); verify_work_stacks_empty(); @@ -5779,7 +5855,7 @@ } { - TraceTime t("dirty klass scan", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm); verify_work_stacks_empty(); @@ -5981,7 +6057,9 @@ _span, &_markBitMap, &_markStack, &cmsKeepAliveClosure, false /* !preclean */); { - TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm); + + ReferenceProcessorStats stats; if (rp->processing_is_mt()) { // Set the degree of MT here. If the discovery is done MT, there // may have been a different number of threads doing the discovery @@ -6000,40 +6078,41 @@ } rp->set_active_mt_degree(active_workers); CMSRefProcTaskExecutor task_executor(*this); - rp->process_discovered_references(&_is_alive_closure, + stats = rp->process_discovered_references(&_is_alive_closure, + &cmsKeepAliveClosure, + &cmsDrainMarkingStackClosure, + &task_executor, + _gc_timer_cm); + } else { + stats = rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, - &task_executor); - } else { - rp->process_discovered_references(&_is_alive_closure, - &cmsKeepAliveClosure, - &cmsDrainMarkingStackClosure, - NULL); - } - verify_work_stacks_empty(); - } + NULL, + _gc_timer_cm); + } + _gc_tracer_cm->report_gc_reference_stats(stats); + + } + + // This is the point where the entire marking should have completed. + verify_work_stacks_empty(); if (should_unload_classes()) { { - TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty); - - // Follow SystemDictionary roots and unload classes + GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm); + + // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); - // Follow CodeCache roots and unload any methods marked for unloading + // Unload nmethods. CodeCache::do_unloading(&_is_alive_closure, purged_class); - cmsDrainMarkingStackClosure.do_void(); - verify_work_stacks_empty(); - - // Update subklass/sibling/implementor links in KlassKlass descendants + // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&_is_alive_closure); - // Nothing should have been pushed onto the working stacks. - verify_work_stacks_empty(); } { - TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); } @@ -6042,12 +6121,11 @@ // CMS doesn't use the StringTable as hard roots when class unloading is turned off. // Need to check if we really scanned the StringTable. if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) { - TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty); - // Now clean up stale oops in StringTable + GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm); + // Delete entries for dead interned strings. StringTable::unlink(&_is_alive_closure); } - verify_work_stacks_empty(); // Restore any preserved marks as a result of mark stack or // work queue overflow restore_preserved_marks_if_any(); // done single-threaded for now @@ -6388,12 +6466,14 @@ _cmsGen->rotate_debug_collection_type(); } ) + + register_gc_end(); } void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) { gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - TraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty); + GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); switch (op) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -25,8 +25,10 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP +#include "gc_implementation/shared/gcHeapSummary.hpp" #include "gc_implementation/shared/gSpaceCounters.hpp" #include "gc_implementation/shared/gcStats.hpp" +#include "gc_implementation/shared/gcWhen.hpp" #include "gc_implementation/shared/generationCounters.hpp" #include "memory/freeBlockDictionary.hpp" #include "memory/generation.hpp" @@ -53,6 +55,8 @@ class CMSAdaptiveSizePolicy; class CMSConcMarkingTask; class CMSGCAdaptivePolicyCounters; +class CMSTracer; +class ConcurrentGCTimer; class ConcurrentMarkSweepGeneration; class ConcurrentMarkSweepPolicy; class ConcurrentMarkSweepThread; @@ -61,6 +65,7 @@ class PromotionInfo; class ScanMarkedObjectsAgainCarefullyClosure; class TenuredGeneration; +class SerialOldTracer; // A generic CMS bit map. It's the basis for both the CMS marking bit map // as well as for the mod union table (in each case only a subset of the @@ -485,10 +490,6 @@ assert(!span.is_empty(), "Empty span could spell trouble"); } - void do_object(oop obj) { - assert(false, "not to be invoked"); - } - bool do_object_b(oop obj); }; @@ -571,8 +572,9 @@ bool _completed_initialization; // In support of ExplicitGCInvokesConcurrent - static bool _full_gc_requested; - unsigned int _collection_count_start; + static bool _full_gc_requested; + static GCCause::Cause _full_gc_cause; + unsigned int _collection_count_start; // Should we unload classes this concurrent cycle? bool _should_unload_classes; @@ -604,6 +606,8 @@ ConcurrentMarkSweepPolicy* _collector_policy; ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } + void set_did_compact(bool v); + // XXX Move these to CMSStats ??? FIX ME !!! elapsedTimer _inter_sweep_timer; // time between sweeps elapsedTimer _intra_sweep_timer; // time _in_ sweeps @@ -611,6 +615,20 @@ AdaptivePaddedAverage _inter_sweep_estimate; AdaptivePaddedAverage _intra_sweep_estimate; + CMSTracer* _gc_tracer_cm; + ConcurrentGCTimer* _gc_timer_cm; + + bool _cms_start_registered; + + GCHeapSummary _last_heap_summary; + MetaspaceSummary _last_metaspace_summary; + + void register_foreground_gc_start(GCCause::Cause cause); + void register_gc_start(GCCause::Cause cause); + void register_gc_end(); + void save_heap_summary(); + void report_heap_summary(GCWhen::Type when); + protected: ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) MemRegion _span; // span covering above two @@ -829,6 +847,10 @@ void do_mark_sweep_work(bool clear_all_soft_refs, CollectorState first_state, bool should_start_over); + // Work methods for reporting concurrent mode interruption or failure + bool is_external_interruption(); + void report_concurrent_mode_interruption(); + // If the backgrould GC is active, acquire control from the background // GC and do the collection. void acquire_control_and_collect(bool full, bool clear_all_soft_refs); @@ -878,11 +900,11 @@ bool clear_all_soft_refs, size_t size, bool tlab); - void collect_in_background(bool clear_all_soft_refs); - void collect_in_foreground(bool clear_all_soft_refs); + void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause); + void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause); // In support of ExplicitGCInvokesConcurrent - static void request_full_gc(unsigned int full_gc_count); + static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause); // Should we unload classes in a particular concurrent cycle? bool should_unload_classes() const { return _should_unload_classes; @@ -990,7 +1012,7 @@ // debugging void verify(); - bool verify_after_remark(); + bool verify_after_remark(bool silent = VerifySilently); void verify_ok_to_terminate() const PRODUCT_RETURN; void verify_work_stacks_empty() const PRODUCT_RETURN; void verify_overflow_empty() const PRODUCT_RETURN; @@ -1081,6 +1103,10 @@ CollectionTypes _debug_collection_type; + // True if a compactiing collection was done. + bool _did_compact; + bool did_compact() { return _did_compact; } + // Fraction of current occupancy at which to start a CMS collection which // will collect this generation (at least). double _initiating_occupancy; @@ -1121,6 +1147,8 @@ // Adaptive size policy CMSAdaptiveSizePolicy* size_policy(); + void set_did_compact(bool v) { _did_compact = v; } + bool refs_discovery_is_atomic() const { return false; } bool refs_discovery_is_mt() const { // Note: CMS does MT-discovery during the parallel-remark @@ -1528,9 +1556,6 @@ _bit_map(bit_map), _par_scan_closure(cl) { } - void do_object(oop obj) { - guarantee(false, "Call do_object_b(oop, MemRegion) instead"); - } bool do_object_b(oop obj) { guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); return false; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -140,7 +140,9 @@ while (!_should_terminate) { sleepBeforeNextCycle(); if (_should_terminate) break; - _collector->collect_in_background(false); // !clear_all_soft_refs + GCCause::Cause cause = _collector->_full_gc_requested ? + _collector->_full_gc_cause : GCCause::_cms_concurrent_mark; + _collector->collect_in_background(false, cause); } assert(_should_terminate, "just checking"); // Check that the state of any protocol for synchronization diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,9 +26,12 @@ #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp" #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "memory/gcLocker.inline.hpp" #include "runtime/interfaceSupport.hpp" +#include "runtime/os.hpp" #include "utilities/dtrace.hpp" @@ -60,6 +63,7 @@ void VM_CMS_Operation::verify_before_gc() { if (VerifyBeforeGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { + GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm); HandleMark hm; FreelistLocker x(_collector); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); @@ -71,6 +75,7 @@ void VM_CMS_Operation::verify_after_gc() { if (VerifyAfterGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { + GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm); HandleMark hm; FreelistLocker x(_collector); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); @@ -140,6 +145,8 @@ ); #endif /* USDT2 */ + _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark", os::elapsed_counter()); + GenCollectedHeap* gch = GenCollectedHeap::heap(); GCCauseSetter gccs(gch, GCCause::_cms_initial_mark); @@ -149,6 +156,9 @@ _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause()); VM_CMS_Operation::verify_after_gc(); + + _collector->_gc_timer_cm->register_gc_pause_end(os::elapsed_counter()); + #ifndef USDT2 HS_DTRACE_PROBE(hs_private, cms__initmark__end); #else /* USDT2 */ @@ -172,6 +182,8 @@ ); #endif /* USDT2 */ + _collector->_gc_timer_cm->register_gc_pause_start("Final Mark", os::elapsed_counter()); + GenCollectedHeap* gch = GenCollectedHeap::heap(); GCCauseSetter gccs(gch, GCCause::_cms_final_remark); @@ -181,6 +193,10 @@ _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause()); VM_CMS_Operation::verify_after_gc(); + + _collector->save_heap_summary(); + _collector->_gc_timer_cm->register_gc_pause_end(os::elapsed_counter()); + #ifndef USDT2 HS_DTRACE_PROBE(hs_private, cms__remark__end); #else /* USDT2 */ @@ -225,7 +241,7 @@ // In case CMS thread was in icms_wait(), wake it up. CMSCollector::start_icms(); // Nudge the CMS thread to start a concurrent collection. - CMSCollector::request_full_gc(_full_gc_count_before); + CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause); } else { assert(_full_gc_count_before < gch->total_full_collections(), "Error"); FullGCCount_lock->notify_all(); // Inform the Java thread its work is done diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp --- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,40 +26,12 @@ #include "gc_implementation/g1/concurrentG1Refine.hpp" #include "gc_implementation/g1/concurrentG1RefineThread.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" -#include "gc_implementation/g1/g1CollectorPolicy.hpp" -#include "gc_implementation/g1/g1GCPhaseTimes.hpp" -#include "gc_implementation/g1/g1RemSet.hpp" -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" -#include "memory/space.inline.hpp" -#include "runtime/atomic.hpp" -#include "runtime/java.hpp" -#include "utilities/copy.hpp" - -// Possible sizes for the card counts cache: odd primes that roughly double in size. -// (See jvmtiTagMap.cpp). - -#define MAX_SIZE ((size_t) -1) +#include "gc_implementation/g1/g1HotCardCache.hpp" -size_t ConcurrentG1Refine::_cc_cache_sizes[] = { - 16381, 32771, 76831, 150001, 307261, - 614563, 1228891, 2457733, 4915219, 9830479, - 19660831, 39321619, 78643219, 157286461, MAX_SIZE - }; - -ConcurrentG1Refine::ConcurrentG1Refine() : - _card_counts(NULL), _card_epochs(NULL), - _n_card_counts(0), _max_cards(0), _max_n_card_counts(0), - _cache_size_index(0), _expand_card_counts(false), - _hot_cache(NULL), - _def_use_cache(false), _use_cache(false), - // We initialize the epochs of the array to 0. By initializing - // _n_periods to 1 and not 0 we automatically invalidate all the - // entries on the array. Otherwise we might accidentally think that - // we claimed a card that was in fact never set (see CR7033292). - _n_periods(1), - _threads(NULL), _n_threads(0) +ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) : + _threads(NULL), _n_threads(0), + _hot_card_cache(g1h) { - // Ergomonically select initial concurrent refinement parameters if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) { FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2(ParallelGCThreads, 1)); @@ -75,13 +47,17 @@ FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2); } set_red_zone(MAX2(G1ConcRefinementRedZone, yellow_zone())); + _n_worker_threads = thread_num(); // We need one extra thread to do the young gen rset size sampling. _n_threads = _n_worker_threads + 1; + reset_threshold_step(); _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads, mtGC); + int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids(); + ConcurrentG1RefineThread *next = NULL; for (int i = _n_threads - 1; i >= 0; i--) { ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i); @@ -100,74 +76,8 @@ } } -int ConcurrentG1Refine::thread_num() { - return MAX2((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1); -} - void ConcurrentG1Refine::init() { - if (G1ConcRSLogCacheSize > 0) { - _g1h = G1CollectedHeap::heap(); - - _max_cards = _g1h->max_capacity() >> CardTableModRefBS::card_shift; - _max_n_card_counts = _max_cards * G1MaxHotCardCountSizePercent / 100; - - size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1; - guarantee(_max_cards < max_card_num, "card_num representation"); - - // We need _n_card_counts to be less than _max_n_card_counts here - // so that the expansion call (below) actually allocates the - // _counts and _epochs arrays. - assert(_n_card_counts == 0, "pre-condition"); - assert(_max_n_card_counts > 0, "pre-condition"); - - // Find the index into cache size array that is of a size that's - // large enough to hold desired_sz. - size_t desired_sz = _max_cards / InitialCacheFraction; - int desired_sz_index = 0; - while (_cc_cache_sizes[desired_sz_index] < desired_sz) { - desired_sz_index += 1; - assert(desired_sz_index < MAX_CC_CACHE_INDEX, "invariant"); - } - assert(desired_sz_index < MAX_CC_CACHE_INDEX, "invariant"); - - // If the desired_sz value is between two sizes then - // _cc_cache_sizes[desired_sz_index-1] < desired_sz <= _cc_cache_sizes[desired_sz_index] - // we will start with the lower size in the optimistic expectation that - // we will not need to expand up. Note desired_sz_index could also be 0. - if (desired_sz_index > 0 && - _cc_cache_sizes[desired_sz_index] > desired_sz) { - desired_sz_index -= 1; - } - - if (!expand_card_count_cache(desired_sz_index)) { - // Allocation was unsuccessful - exit - vm_exit_during_initialization("Could not reserve enough space for card count cache"); - } - assert(_n_card_counts > 0, "post-condition"); - assert(_cache_size_index == desired_sz_index, "post-condition"); - - Copy::fill_to_bytes(&_card_counts[0], - _n_card_counts * sizeof(CardCountCacheEntry)); - Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry)); - - ModRefBarrierSet* bs = _g1h->mr_bs(); - guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition"); - _ct_bs = (CardTableModRefBS*)bs; - _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start()); - - _def_use_cache = true; - _use_cache = true; - _hot_cache_size = (1 << G1ConcRSLogCacheSize); - _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC); - _n_hot = 0; - _hot_cache_idx = 0; - - // For refining the cards in the hot cache in parallel - int n_workers = (ParallelGCThreads > 0 ? - _g1h->workers()->total_workers() : 1); - _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers); - _hot_cache_par_claimed_idx = 0; - } + _hot_card_cache.initialize(); } void ConcurrentG1Refine::stop() { @@ -188,17 +98,6 @@ } ConcurrentG1Refine::~ConcurrentG1Refine() { - if (G1ConcRSLogCacheSize > 0) { - // Please see the comment in allocate_card_count_cache - // for why we call os::malloc() and os::free() directly. - assert(_card_counts != NULL, "Logic"); - os::free(_card_counts, mtGC); - assert(_card_epochs != NULL, "Logic"); - os::free(_card_epochs, mtGC); - - assert(_hot_cache != NULL, "Logic"); - FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC); - } if (_threads != NULL) { for (int i = 0; i < _n_threads; i++) { delete _threads[i]; @@ -215,317 +114,18 @@ } } -bool ConcurrentG1Refine::is_young_card(jbyte* card_ptr) { - HeapWord* start = _ct_bs->addr_for(card_ptr); - HeapRegion* r = _g1h->heap_region_containing(start); - if (r != NULL && r->is_young()) { - return true; - } - // This card is not associated with a heap region - // so can't be young. - return false; -} - -jbyte* ConcurrentG1Refine::add_card_count(jbyte* card_ptr, int* count, bool* defer) { - unsigned new_card_num = ptr_2_card_num(card_ptr); - unsigned bucket = hash(new_card_num); - assert(0 <= bucket && bucket < _n_card_counts, "Bounds"); - - CardCountCacheEntry* count_ptr = &_card_counts[bucket]; - CardEpochCacheEntry* epoch_ptr = &_card_epochs[bucket]; - - // We have to construct a new entry if we haven't updated the counts - // during the current period, or if the count was updated for a - // different card number. - unsigned int new_epoch = (unsigned int) _n_periods; - julong new_epoch_entry = make_epoch_entry(new_card_num, new_epoch); - - while (true) { - // Fetch the previous epoch value - julong prev_epoch_entry = epoch_ptr->_value; - julong cas_res; - - if (extract_epoch(prev_epoch_entry) != new_epoch) { - // This entry has not yet been updated during this period. - // Note: we update the epoch value atomically to ensure - // that there is only one winner that updates the cached - // card_ptr value even though all the refine threads share - // the same epoch value. - - cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry, - (volatile jlong*)&epoch_ptr->_value, - (jlong) prev_epoch_entry); - - if (cas_res == prev_epoch_entry) { - // We have successfully won the race to update the - // epoch and card_num value. Make it look like the - // count and eviction count were previously cleared. - count_ptr->_count = 1; - count_ptr->_evict_count = 0; - *count = 0; - // We can defer the processing of card_ptr - *defer = true; - return card_ptr; - } - // We did not win the race to update the epoch field, so some other - // thread must have done it. The value that gets returned by CAS - // should be the new epoch value. - assert(extract_epoch(cas_res) == new_epoch, "unexpected epoch"); - // We could 'continue' here or just re-read the previous epoch value - prev_epoch_entry = epoch_ptr->_value; - } - - // The epoch entry for card_ptr has been updated during this period. - unsigned old_card_num = extract_card_num(prev_epoch_entry); - - // The card count that will be returned to caller - *count = count_ptr->_count; - - // Are we updating the count for the same card? - if (new_card_num == old_card_num) { - // Same card - just update the count. We could have more than one - // thread racing to update count for the current card. It should be - // OK not to use a CAS as the only penalty should be some missed - // increments of the count which delays identifying the card as "hot". - - if (*count < max_jubyte) count_ptr->_count++; - // We can defer the processing of card_ptr - *defer = true; - return card_ptr; - } - - // Different card - evict old card info - if (count_ptr->_evict_count < max_jubyte) count_ptr->_evict_count++; - if (count_ptr->_evict_count > G1CardCountCacheExpandThreshold) { - // Trigger a resize the next time we clear - _expand_card_counts = true; - } - - cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry, - (volatile jlong*)&epoch_ptr->_value, - (jlong) prev_epoch_entry); - - if (cas_res == prev_epoch_entry) { - // We successfully updated the card num value in the epoch entry - count_ptr->_count = 0; // initialize counter for new card num - jbyte* old_card_ptr = card_num_2_ptr(old_card_num); - - // Even though the region containg the card at old_card_num was not - // in the young list when old_card_num was recorded in the epoch - // cache it could have been added to the free list and subsequently - // added to the young list in the intervening time. See CR 6817995. - // We do not deal with this case here - it will be handled in - // HeapRegion::oops_on_card_seq_iterate_careful after it has been - // determined that the region containing the card has been allocated - // to, and it's safe to check the young type of the region. - - // We do not want to defer processing of card_ptr in this case - // (we need to refine old_card_ptr and card_ptr) - *defer = false; - return old_card_ptr; - } - // Someone else beat us - try again. - } -} - -jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) { - int count; - jbyte* cached_ptr = add_card_count(card_ptr, &count, defer); - assert(cached_ptr != NULL, "bad cached card ptr"); - - // We've just inserted a card pointer into the card count cache - // and got back the card that we just inserted or (evicted) the - // previous contents of that count slot. - - // The card we got back could be in a young region. When the - // returned card (if evicted) was originally inserted, we had - // determined that its containing region was not young. However - // it is possible for the region to be freed during a cleanup - // pause, then reallocated and tagged as young which will result - // in the returned card residing in a young region. - // - // We do not deal with this case here - the change from non-young - // to young could be observed at any time - it will be handled in - // HeapRegion::oops_on_card_seq_iterate_careful after it has been - // determined that the region containing the card has been allocated - // to. - - // The card pointer we obtained from card count cache is not hot - // so do not store it in the cache; return it for immediate - // refining. - if (count < G1ConcRSHotCardLimit) { - return cached_ptr; - } - - // Otherwise, the pointer we got from the _card_counts cache is hot. - jbyte* res = NULL; - MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag); - if (_n_hot == _hot_cache_size) { - res = _hot_cache[_hot_cache_idx]; - _n_hot--; - } - // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx. - _hot_cache[_hot_cache_idx] = cached_ptr; - _hot_cache_idx++; - if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0; - _n_hot++; - - // The card obtained from the hot card cache could be in a young - // region. See above on how this can happen. - - return res; -} - -void ConcurrentG1Refine::clean_up_cache(int worker_i, - G1RemSet* g1rs, - DirtyCardQueue* into_cset_dcq) { - assert(!use_cache(), "cache should be disabled"); - int start_idx; - - while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once - int end_idx = start_idx + _hot_cache_par_chunk_size; - - if (start_idx == - Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) { - // The current worker has successfully claimed the chunk [start_idx..end_idx) - end_idx = MIN2(end_idx, _n_hot); - for (int i = start_idx; i < end_idx; i++) { - jbyte* entry = _hot_cache[i]; - if (entry != NULL) { - if (g1rs->concurrentRefineOneCard(entry, worker_i, true)) { - // 'entry' contains references that point into the current - // collection set. We need to record 'entry' in the DCQS - // that's used for that purpose. - // - // The only time we care about recording cards that contain - // references that point into the collection set is during - // RSet updating while within an evacuation pause. - // In this case worker_i should be the id of a GC worker thread - assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); - assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id"); - into_cset_dcq->enqueue(entry); - } - } - } +void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) { + if (_threads != NULL) { + for (int i = 0; i < worker_thread_num(); i++) { + tc->do_thread(_threads[i]); } } } -// The arrays used to hold the card counts and the epochs must have -// a 1:1 correspondence. Hence they are allocated and freed together -// Returns true if the allocations of both the counts and epochs -// were successful; false otherwise. -bool ConcurrentG1Refine::allocate_card_count_cache(size_t n, - CardCountCacheEntry** counts, - CardEpochCacheEntry** epochs) { - // We call the allocation/free routines directly for the counts - // and epochs arrays. The NEW_C_HEAP_ARRAY/FREE_C_HEAP_ARRAY - // macros call AllocateHeap and FreeHeap respectively. - // AllocateHeap will call vm_exit_out_of_memory in the event - // of an allocation failure and abort the JVM. With the - // _counts/epochs arrays we only need to abort the JVM if the - // initial allocation of these arrays fails. - // - // Additionally AllocateHeap/FreeHeap do some tracing of - // allocate/free calls so calling one without calling the - // other can cause inconsistencies in the tracing. So we - // call neither. - - assert(*counts == NULL, "out param"); - assert(*epochs == NULL, "out param"); - - size_t counts_size = n * sizeof(CardCountCacheEntry); - size_t epochs_size = n * sizeof(CardEpochCacheEntry); - - *counts = (CardCountCacheEntry*) os::malloc(counts_size, mtGC); - if (*counts == NULL) { - // allocation was unsuccessful - return false; - } - - *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size, mtGC); - if (*epochs == NULL) { - // allocation was unsuccessful - free counts array - assert(*counts != NULL, "must be"); - os::free(*counts, mtGC); - *counts = NULL; - return false; - } - - // We successfully allocated both counts and epochs - return true; -} - -// Returns true if the card counts/epochs cache was -// successfully expanded; false otherwise. -bool ConcurrentG1Refine::expand_card_count_cache(int cache_size_idx) { - // Can we expand the card count and epoch tables? - if (_n_card_counts < _max_n_card_counts) { - assert(cache_size_idx >= 0 && cache_size_idx < MAX_CC_CACHE_INDEX, "oob"); - - size_t cache_size = _cc_cache_sizes[cache_size_idx]; - // Make sure we don't go bigger than we will ever need - cache_size = MIN2(cache_size, _max_n_card_counts); - - // Should we expand the card count and card epoch tables? - if (cache_size > _n_card_counts) { - // We have been asked to allocate new, larger, arrays for - // the card counts and the epochs. Attempt the allocation - // of both before we free the existing arrays in case - // the allocation is unsuccessful... - CardCountCacheEntry* counts = NULL; - CardEpochCacheEntry* epochs = NULL; - - if (allocate_card_count_cache(cache_size, &counts, &epochs)) { - // Allocation was successful. - // We can just free the old arrays; we're - // not interested in preserving the contents - if (_card_counts != NULL) os::free(_card_counts, mtGC); - if (_card_epochs != NULL) os::free(_card_epochs, mtGC); - - // Cache the size of the arrays and the index that got us there. - _n_card_counts = cache_size; - _cache_size_index = cache_size_idx; - - _card_counts = counts; - _card_epochs = epochs; - - // We successfully allocated/expanded the caches. - return true; - } - } - } - - // We did not successfully expand the caches. - return false; -} - -void ConcurrentG1Refine::clear_and_record_card_counts() { - if (G1ConcRSLogCacheSize == 0) { - return; - } - - double start = os::elapsedTime(); - - if (_expand_card_counts) { - int new_idx = _cache_size_index + 1; - - if (expand_card_count_cache(new_idx)) { - // Allocation was successful and _n_card_counts has - // been updated to the new size. We only need to clear - // the epochs so we don't read a bogus epoch value - // when inserting a card into the hot card cache. - Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry)); - } - _expand_card_counts = false; - } - - int this_epoch = (int) _n_periods; - assert((this_epoch+1) <= max_jint, "to many periods"); - // Update epoch - _n_periods++; - double cc_clear_time_ms = (os::elapsedTime() - start) * 1000; - _g1h->g1_policy()->phase_times()->record_cc_clear_time_ms(cc_clear_time_ms); +int ConcurrentG1Refine::thread_num() { + int n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads + : ParallelGCThreads; + return MAX2(n_threads, 1); } void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const { @@ -534,3 +134,7 @@ st->cr(); } } + +ConcurrentG1RefineThread * ConcurrentG1Refine::sampling_thread() const { + return _threads[worker_thread_num()]; +} diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp --- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,14 +25,17 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP +#include "gc_implementation/g1/g1HotCardCache.hpp" #include "memory/allocation.hpp" -#include "memory/cardTableModRefBS.hpp" #include "runtime/thread.hpp" #include "utilities/globalDefinitions.hpp" // Forward decl class ConcurrentG1RefineThread; +class G1CollectedHeap; +class G1HotCardCache; class G1RemSet; +class DirtyCardQueue; class ConcurrentG1Refine: public CHeapObj { ConcurrentG1RefineThread** _threads; @@ -61,141 +64,14 @@ int _thread_threshold_step; + // We delay the refinement of 'hot' cards using the hot card cache. + G1HotCardCache _hot_card_cache; + // Reset the threshold step value based of the current zone boundaries. void reset_threshold_step(); - // The cache for card refinement. - bool _use_cache; - bool _def_use_cache; - - size_t _n_periods; // Used as clearing epoch - - // An evicting cache of the number of times each card - // is accessed. Reduces, but does not eliminate, the amount - // of duplicated processing of dirty cards. - - enum SomePrivateConstants { - epoch_bits = 32, - card_num_shift = epoch_bits, - epoch_mask = AllBits, - card_num_mask = AllBits, - - // The initial cache size is approximately this fraction - // of a maximal cache (i.e. the size needed for all cards - // in the heap) - InitialCacheFraction = 512 - }; - - const static julong card_num_mask_in_place = - (julong) card_num_mask << card_num_shift; - - typedef struct { - julong _value; // | card_num | epoch | - } CardEpochCacheEntry; - - julong make_epoch_entry(unsigned int card_num, unsigned int epoch) { - assert(0 <= card_num && card_num < _max_cards, "Bounds"); - assert(0 <= epoch && epoch <= _n_periods, "must be"); - - return ((julong) card_num << card_num_shift) | epoch; - } - - unsigned int extract_epoch(julong v) { - return (v & epoch_mask); - } - - unsigned int extract_card_num(julong v) { - return (v & card_num_mask_in_place) >> card_num_shift; - } - - typedef struct { - unsigned char _count; - unsigned char _evict_count; - } CardCountCacheEntry; - - CardCountCacheEntry* _card_counts; - CardEpochCacheEntry* _card_epochs; - - // The current number of buckets in the card count cache - size_t _n_card_counts; - - // The number of cards for the entire reserved heap - size_t _max_cards; - - // The max number of buckets for the card counts and epochs caches. - // This is the maximum that the counts and epochs will grow to. - // It is specified as a fraction or percentage of _max_cards using - // G1MaxHotCardCountSizePercent. - size_t _max_n_card_counts; - - // Possible sizes of the cache: odd primes that roughly double in size. - // (See jvmtiTagMap.cpp). - enum { - MAX_CC_CACHE_INDEX = 15 // maximum index into the cache size array. - }; - - static size_t _cc_cache_sizes[MAX_CC_CACHE_INDEX]; - - // The index in _cc_cache_sizes corresponding to the size of - // _card_counts. - int _cache_size_index; - - bool _expand_card_counts; - - const jbyte* _ct_bot; - - jbyte** _hot_cache; - int _hot_cache_size; - int _n_hot; - int _hot_cache_idx; - - int _hot_cache_par_chunk_size; - volatile int _hot_cache_par_claimed_idx; - - // Needed to workaround 6817995 - CardTableModRefBS* _ct_bs; - G1CollectedHeap* _g1h; - - // Helper routine for expand_card_count_cache(). - // The arrays used to hold the card counts and the epochs must have - // a 1:1 correspondence. Hence they are allocated and freed together. - // Returns true if the allocations of both the counts and epochs - // were successful; false otherwise. - bool allocate_card_count_cache(size_t n, - CardCountCacheEntry** counts, - CardEpochCacheEntry** epochs); - - // Expands the arrays that hold the card counts and epochs - // to the cache size at index. Returns true if the expansion/ - // allocation was successful; false otherwise. - bool expand_card_count_cache(int index); - - // hash a given key (index of card_ptr) with the specified size - static unsigned int hash(size_t key, size_t size) { - return (unsigned int) (key % size); - } - - // hash a given key (index of card_ptr) - unsigned int hash(size_t key) { - return hash(key, _n_card_counts); - } - - unsigned int ptr_2_card_num(jbyte* card_ptr) { - return (unsigned int) (card_ptr - _ct_bot); - } - - jbyte* card_num_2_ptr(unsigned int card_num) { - return (jbyte*) (_ct_bot + card_num); - } - - // Returns the count of this card after incrementing it. - jbyte* add_card_count(jbyte* card_ptr, int* count, bool* defer); - - // Returns true if this card is in a young region - bool is_young_card(jbyte* card_ptr); - public: - ConcurrentG1Refine(); + ConcurrentG1Refine(G1CollectedHeap* g1h); ~ConcurrentG1Refine(); void init(); // Accomplish some initialization that has to wait. @@ -203,36 +79,14 @@ void reinitialize_threads(); - // Iterate over the conc refine threads + // Iterate over all concurrent refinement threads void threads_do(ThreadClosure *tc); - // If this is the first entry for the slot, writes into the cache and - // returns NULL. If it causes an eviction, returns the evicted pointer. - // Otherwise, its a cache hit, and returns NULL. - jbyte* cache_insert(jbyte* card_ptr, bool* defer); - - // Process the cached entries. - void clean_up_cache(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq); - - // Set up for parallel processing of the cards in the hot cache - void clear_hot_cache_claimed_index() { - _hot_cache_par_claimed_idx = 0; - } + // Iterate over all worker refinement threads + void worker_threads_do(ThreadClosure * tc); - // Discard entries in the hot cache. - void clear_hot_cache() { - _hot_cache_idx = 0; _n_hot = 0; - } - - bool hot_cache_is_empty() { return _n_hot == 0; } - - bool use_cache() { return _use_cache; } - void set_use_cache(bool b) { - if (b) _use_cache = _def_use_cache; - else _use_cache = false; - } - - void clear_and_record_card_counts(); + // The RS sampling thread + ConcurrentG1RefineThread * sampling_thread() const; static int thread_num(); @@ -250,6 +104,8 @@ int worker_thread_num() const { return _n_worker_threads; } int thread_threshold_step() const { return _thread_threshold_step; } + + G1HotCardCache* hot_card_cache() { return &_hot_card_cache; } }; #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -36,6 +36,9 @@ #include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "gc_implementation/shared/vmGCOperations.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "memory/genOopClosures.inline.hpp" #include "memory/referencePolicy.hpp" #include "memory/resourceArea.hpp" @@ -1273,10 +1276,9 @@ if (VerifyDuringGC) { HandleMark hm; // handle scope - gclog_or_tty->print(" VerifyDuringGC:(before)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* silent */ false, - /* option */ VerifyOption_G1UsePrevMarking); + Universe::verify(VerifyOption_G1UsePrevMarking, + " VerifyDuringGC:(before)"); } G1CollectorPolicy* g1p = g1h->g1_policy(); @@ -1300,10 +1302,9 @@ // Verify the heap w.r.t. the previous marking bitmap. if (VerifyDuringGC) { HandleMark hm; // handle scope - gclog_or_tty->print(" VerifyDuringGC:(overflow)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* silent */ false, - /* option */ VerifyOption_G1UsePrevMarking); + Universe::verify(VerifyOption_G1UsePrevMarking, + " VerifyDuringGC:(overflow)"); } // Clear the marking state because we will be restarting @@ -1323,10 +1324,9 @@ if (VerifyDuringGC) { HandleMark hm; // handle scope - gclog_or_tty->print(" VerifyDuringGC:(after)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* silent */ false, - /* option */ VerifyOption_G1UseNextMarking); + Universe::verify(VerifyOption_G1UseNextMarking, + " VerifyDuringGC:(after)"); } assert(!restart_for_overflow(), "sanity"); // Completely reset the marking state since marking completed @@ -1345,6 +1345,9 @@ _remark_times.add((now - start) * 1000.0); g1p->record_concurrent_mark_remark_end(); + + G1CMIsAliveClosure is_alive(g1h); + g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); } // Base class of the closures that finalize and verify the @@ -1972,10 +1975,9 @@ if (VerifyDuringGC) { HandleMark hm; // handle scope - gclog_or_tty->print(" VerifyDuringGC:(before)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* silent */ false, - /* option */ VerifyOption_G1UsePrevMarking); + Universe::verify(VerifyOption_G1UsePrevMarking, + " VerifyDuringGC:(before)"); } G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); @@ -2127,13 +2129,13 @@ if (VerifyDuringGC) { HandleMark hm; // handle scope - gclog_or_tty->print(" VerifyDuringGC:(after)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* silent */ false, - /* option */ VerifyOption_G1UsePrevMarking); + Universe::verify(VerifyOption_G1UsePrevMarking, + " VerifyDuringGC:(after)"); } g1h->verify_region_sets_optional(); + g1h->trace_heap_after_concurrent_cycle(); } void ConcurrentMark::completeCleanup() { @@ -2444,7 +2446,7 @@ if (G1Log::finer()) { gclog_or_tty->put(' '); } - TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty); + GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm()); ReferenceProcessor* rp = g1h->ref_processor_cm(); @@ -2496,10 +2498,13 @@ rp->set_active_mt_degree(active_workers); // Process the weak references. - rp->process_discovered_references(&g1_is_alive, - &g1_keep_alive, - &g1_drain_mark_stack, - executor); + const ReferenceProcessorStats& stats = + rp->process_discovered_references(&g1_is_alive, + &g1_keep_alive, + &g1_drain_mark_stack, + executor, + g1h->gc_timer_cm()); + g1h->gc_tracer_cm()->report_gc_reference_stats(stats); // The do_oop work routines of the keep_alive and drain_marking_stack // oop closures will set the has_overflown flag if we overflow the @@ -3232,6 +3237,9 @@ satb_mq_set.set_active_all_threads( false, /* new active value */ satb_mq_set.is_active() /* expected_active */); + + _g1h->trace_heap_after_concurrent_cycle(); + _g1h->register_concurrent_cycle_end(); } static void print_ms_time_info(const char* prefix, const char* name, @@ -4520,7 +4528,8 @@ _total_used_bytes(0), _total_capacity_bytes(0), _total_prev_live_bytes(0), _total_next_live_bytes(0), _hum_used_bytes(0), _hum_capacity_bytes(0), - _hum_prev_live_bytes(0), _hum_next_live_bytes(0) { + _hum_prev_live_bytes(0), _hum_next_live_bytes(0), + _total_remset_bytes(0) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); MemRegion g1_committed = g1h->g1_committed(); MemRegion g1_reserved = g1h->g1_reserved(); @@ -4538,23 +4547,25 @@ HeapRegion::GrainBytes); _out->print_cr(G1PPRL_LINE_PREFIX); _out->print_cr(G1PPRL_LINE_PREFIX - G1PPRL_TYPE_H_FORMAT - G1PPRL_ADDR_BASE_H_FORMAT - G1PPRL_BYTE_H_FORMAT - G1PPRL_BYTE_H_FORMAT - G1PPRL_BYTE_H_FORMAT - G1PPRL_DOUBLE_H_FORMAT, - "type", "address-range", - "used", "prev-live", "next-live", "gc-eff"); + G1PPRL_TYPE_H_FORMAT + G1PPRL_ADDR_BASE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_DOUBLE_H_FORMAT + G1PPRL_BYTE_H_FORMAT, + "type", "address-range", + "used", "prev-live", "next-live", "gc-eff", "remset"); _out->print_cr(G1PPRL_LINE_PREFIX - G1PPRL_TYPE_H_FORMAT - G1PPRL_ADDR_BASE_H_FORMAT - G1PPRL_BYTE_H_FORMAT - G1PPRL_BYTE_H_FORMAT - G1PPRL_BYTE_H_FORMAT - G1PPRL_DOUBLE_H_FORMAT, - "", "", - "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)"); + G1PPRL_TYPE_H_FORMAT + G1PPRL_ADDR_BASE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_DOUBLE_H_FORMAT + G1PPRL_BYTE_H_FORMAT, + "", "", + "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", "(bytes)"); } // It takes as a parameter a reference to one of the _hum_* fields, it @@ -4596,6 +4607,7 @@ size_t prev_live_bytes = r->live_bytes(); size_t next_live_bytes = r->next_live_bytes(); double gc_eff = r->gc_efficiency(); + size_t remset_bytes = r->rem_set()->mem_size(); if (r->used() == 0) { type = "FREE"; } else if (r->is_survivor()) { @@ -4629,6 +4641,7 @@ _total_capacity_bytes += capacity_bytes; _total_prev_live_bytes += prev_live_bytes; _total_next_live_bytes += next_live_bytes; + _total_remset_bytes += remset_bytes; // Print a line for this particular region. _out->print_cr(G1PPRL_LINE_PREFIX @@ -4637,14 +4650,17 @@ G1PPRL_BYTE_FORMAT G1PPRL_BYTE_FORMAT G1PPRL_BYTE_FORMAT - G1PPRL_DOUBLE_FORMAT, + G1PPRL_DOUBLE_FORMAT + G1PPRL_BYTE_FORMAT, type, bottom, end, - used_bytes, prev_live_bytes, next_live_bytes, gc_eff); + used_bytes, prev_live_bytes, next_live_bytes, gc_eff , remset_bytes); return false; } G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { + // add static memory usages to remembered set sizes + _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); // Print the footer of the output. _out->print_cr(G1PPRL_LINE_PREFIX); _out->print_cr(G1PPRL_LINE_PREFIX @@ -4652,13 +4668,15 @@ G1PPRL_SUM_MB_FORMAT("capacity") G1PPRL_SUM_MB_PERC_FORMAT("used") G1PPRL_SUM_MB_PERC_FORMAT("prev-live") - G1PPRL_SUM_MB_PERC_FORMAT("next-live"), + G1PPRL_SUM_MB_PERC_FORMAT("next-live") + G1PPRL_SUM_MB_FORMAT("remset"), bytes_to_mb(_total_capacity_bytes), bytes_to_mb(_total_used_bytes), perc(_total_used_bytes, _total_capacity_bytes), bytes_to_mb(_total_prev_live_bytes), perc(_total_prev_live_bytes, _total_capacity_bytes), bytes_to_mb(_total_next_live_bytes), - perc(_total_next_live_bytes, _total_capacity_bytes)); + perc(_total_next_live_bytes, _total_capacity_bytes), + bytes_to_mb(_total_remset_bytes)); _out->cr(); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/concurrentMark.hpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -44,9 +44,6 @@ public: G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { } - void do_object(oop obj) { - ShouldNotCallThis(); - } bool do_object_b(oop obj); }; @@ -572,8 +569,6 @@ void clear_has_overflown() { _has_overflown = false; } bool restart_for_overflow() { return _restart_for_overflow; } - bool has_aborted() { return _has_aborted; } - // Methods to enter the two overflow sync barriers void enter_first_sync_barrier(uint worker_id); void enter_second_sync_barrier(uint worker_id); @@ -824,6 +819,8 @@ // Called to abort the marking cycle after a Full GC takes palce. void abort(); + bool has_aborted() { return _has_aborted; } + // This prints the global/local fingers. It is used for debugging. NOT_PRODUCT(void print_finger();) @@ -1257,6 +1254,9 @@ size_t _hum_prev_live_bytes; size_t _hum_next_live_bytes; + // Accumulator for the remembered set size + size_t _total_remset_bytes; + static double perc(size_t val, size_t total) { if (total == 0) { return 0.0; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -93,7 +93,6 @@ ResourceMark rm; HandleMark hm; double cycle_start = os::elapsedVTime(); - char verbose_str[128]; // We have to ensure that we finish scanning the root regions // before the next GC takes place. To ensure this we have to @@ -155,8 +154,7 @@ } CMCheckpointRootsFinalClosure final_cl(_cm); - sprintf(verbose_str, "GC remark"); - VM_CGC_Operation op(&final_cl, verbose_str, true /* needs_pll */); + VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */); VMThread::execute(&op); } if (cm()->restart_for_overflow()) { @@ -187,8 +185,7 @@ } CMCleanUp cl_cl(_cm); - sprintf(verbose_str, "GC cleanup"); - VM_CGC_Operation op(&cl_cl, verbose_str, false /* needs_pll */); + VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */); VMThread::execute(&op); } else { // We don't want to update the marking status if a GC pause @@ -292,6 +289,7 @@ // called System.gc() with +ExplicitGCInvokesConcurrent). _sts.join(); g1h->increment_old_marking_cycles_completed(true /* concurrent */); + g1h->register_concurrent_cycle_end(); _sts.leave(); } assert(_should_terminate, "just checking"); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/evacuationInfo.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/evacuationInfo.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP + +#include "memory/allocation.hpp" + +class EvacuationInfo : public StackObj { + uint _collectionset_regions; + uint _allocation_regions; + size_t _collectionset_used_before; + size_t _collectionset_used_after; + size_t _alloc_regions_used_before; + size_t _bytes_copied; + uint _regions_freed; + +public: + EvacuationInfo() : _collectionset_regions(0), _allocation_regions(0), _collectionset_used_before(0), + _collectionset_used_after(0), _alloc_regions_used_before(0), + _bytes_copied(0), _regions_freed(0) { } + + void set_collectionset_regions(uint collectionset_regions) { + _collectionset_regions = collectionset_regions; + } + + void set_allocation_regions(uint allocation_regions) { + _allocation_regions = allocation_regions; + } + + void set_collectionset_used_before(size_t used) { + _collectionset_used_before = used; + } + + void increment_collectionset_used_after(size_t used) { + _collectionset_used_after += used; + } + + void set_alloc_regions_used_before(size_t used) { + _alloc_regions_used_before = used; + } + + void set_bytes_copied(size_t copied) { + _bytes_copied = copied; + } + + void set_regions_freed(uint freed) { + _regions_freed += freed; + } + + uint collectionset_regions() { return _collectionset_regions; } + uint allocation_regions() { return _allocation_regions; } + size_t collectionset_used_before() { return _collectionset_used_before; } + size_t collectionset_used_after() { return _collectionset_used_after; } + size_t alloc_regions_used_before() { return _alloc_regions_used_before; } + size_t bytes_copied() { return _bytes_copied; } + uint regions_freed() { return _regions_freed; } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp --- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -77,7 +77,7 @@ assert(delta > 0, "just checking"); if (!_vs.expand_by(delta)) { // Do better than this for Merlin - vm_exit_out_of_memory(delta, "offset table expansion"); + vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion"); } assert(_vs.high() == high + delta, "invalid expansion"); // Initialization of the contents is left to the diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1CardCounts.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1CardCounts.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" +#include "gc_implementation/g1/g1CollectorPolicy.hpp" +#include "gc_implementation/g1/g1GCPhaseTimes.hpp" +#include "memory/cardTableModRefBS.hpp" +#include "services/memTracker.hpp" +#include "utilities/copy.hpp" + +void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) { + if (has_count_table()) { + check_card_num(from_card_num, + err_msg("from card num out of range: "SIZE_FORMAT, from_card_num)); + assert(from_card_num < to_card_num, + err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT, + from_card_num, to_card_num)); + assert(to_card_num <= _committed_max_card_num, + err_msg("to card num out of range: " + "to: "SIZE_FORMAT ", " + "max: "SIZE_FORMAT, + to_card_num, _committed_max_card_num)); + + to_card_num = MIN2(_committed_max_card_num, to_card_num); + + Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num)); + } +} + +G1CardCounts::G1CardCounts(G1CollectedHeap *g1h): + _g1h(g1h), _card_counts(NULL), + _reserved_max_card_num(0), _committed_max_card_num(0), + _committed_size(0) {} + +void G1CardCounts::initialize() { + assert(_g1h->max_capacity() > 0, "initialization order"); + assert(_g1h->capacity() == 0, "initialization order"); + + if (G1ConcRSHotCardLimit > 0) { + // The max value we can store in the counts table is + // max_jubyte. Guarantee the value of the hot + // threshold limit is no more than this. + guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity"); + + ModRefBarrierSet* bs = _g1h->mr_bs(); + guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition"); + _ct_bs = (CardTableModRefBS*)bs; + _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start()); + + // Allocate/Reserve the counts table + size_t reserved_bytes = _g1h->max_capacity(); + _reserved_max_card_num = reserved_bytes >> CardTableModRefBS::card_shift; + + size_t reserved_size = _reserved_max_card_num * sizeof(jbyte); + ReservedSpace rs(ReservedSpace::allocation_align_size_up(reserved_size)); + if (!rs.is_reserved()) { + warning("Could not reserve enough space for the card counts table"); + guarantee(!has_reserved_count_table(), "should be NULL"); + return; + } + + MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); + + _card_counts_storage.initialize(rs, 0); + _card_counts = (jubyte*) _card_counts_storage.low(); + } +} + +void G1CardCounts::resize(size_t heap_capacity) { + // Expand the card counts table to handle a heap with the given capacity. + + if (!has_reserved_count_table()) { + // Don't expand if we failed to reserve the card counts table. + return; + } + + assert(_committed_size == + ReservedSpace::allocation_align_size_up(_committed_size), + err_msg("Unaligned? committed_size: " SIZE_FORMAT, _committed_size)); + + // Verify that the committed space for the card counts matches our + // committed max card num. Note for some allocation alignments, the + // amount of space actually committed for the counts table will be able + // to span more cards than the number spanned by the maximum heap. + size_t prev_committed_size = _committed_size; + size_t prev_committed_card_num = committed_to_card_num(prev_committed_size); + + assert(prev_committed_card_num == _committed_max_card_num, + err_msg("Card mismatch: " + "prev: " SIZE_FORMAT ", " + "committed: "SIZE_FORMAT", " + "reserved: "SIZE_FORMAT, + prev_committed_card_num, _committed_max_card_num, _reserved_max_card_num)); + + size_t new_size = (heap_capacity >> CardTableModRefBS::card_shift) * sizeof(jbyte); + size_t new_committed_size = ReservedSpace::allocation_align_size_up(new_size); + size_t new_committed_card_num = committed_to_card_num(new_committed_size); + + if (_committed_max_card_num < new_committed_card_num) { + // we need to expand the backing store for the card counts + size_t expand_size = new_committed_size - prev_committed_size; + + if (!_card_counts_storage.expand_by(expand_size)) { + warning("Card counts table backing store commit failure"); + return; + } + assert(_card_counts_storage.committed_size() == new_committed_size, + "expansion commit failure"); + + _committed_size = new_committed_size; + _committed_max_card_num = new_committed_card_num; + + clear_range(prev_committed_card_num, _committed_max_card_num); + } +} + +uint G1CardCounts::add_card_count(jbyte* card_ptr) { + // Returns the number of times the card has been refined. + // If we failed to reserve/commit the counts table, return 0. + // If card_ptr is beyond the committed end of the counts table, + // return 0. + // Otherwise return the actual count. + // Unless G1ConcRSHotCardLimit has been set appropriately, + // returning 0 will result in the card being considered + // cold and will be refined immediately. + uint count = 0; + if (has_count_table()) { + size_t card_num = ptr_2_card_num(card_ptr); + if (card_num < _committed_max_card_num) { + count = (uint) _card_counts[card_num]; + if (count < G1ConcRSHotCardLimit) { + _card_counts[card_num] += 1; + } + assert(_card_counts[card_num] <= G1ConcRSHotCardLimit, + err_msg("Refinement count overflow? " + "new count: "UINT32_FORMAT, + (uint) _card_counts[card_num])); + } + } + return count; +} + +bool G1CardCounts::is_hot(uint count) { + return (count >= G1ConcRSHotCardLimit); +} + +void G1CardCounts::clear_region(HeapRegion* hr) { + assert(!hr->isHumongous(), "Should have been cleared"); + if (has_count_table()) { + HeapWord* bottom = hr->bottom(); + + // We use the last address in hr as hr could be the + // last region in the heap. In which case trying to find + // the card for hr->end() will be an OOB accesss to the + // card table. + HeapWord* last = hr->end() - 1; + assert(_g1h->g1_committed().contains(last), + err_msg("last not in committed: " + "last: " PTR_FORMAT ", " + "committed: [" PTR_FORMAT ", " PTR_FORMAT ")", + last, + _g1h->g1_committed().start(), + _g1h->g1_committed().end())); + + const jbyte* from_card_ptr = _ct_bs->byte_for_const(bottom); + const jbyte* last_card_ptr = _ct_bs->byte_for_const(last); + +#ifdef ASSERT + HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr); + assert(start_addr == hr->bottom(), "alignment"); + HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr); + assert((last_addr + CardTableModRefBS::card_size_in_words) == hr->end(), "alignment"); +#endif // ASSERT + + // Clear the counts for the (exclusive) card range. + size_t from_card_num = ptr_2_card_num(from_card_ptr); + size_t to_card_num = ptr_2_card_num(last_card_ptr) + 1; + clear_range(from_card_num, to_card_num); + } +} + +void G1CardCounts::clear_all() { + assert(SafepointSynchronize::is_at_safepoint(), "don't call this otherwise"); + clear_range((size_t)0, _committed_max_card_num); +} + +G1CardCounts::~G1CardCounts() { + if (has_reserved_count_table()) { + _card_counts_storage.release(); + } +} + diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1CardCounts.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1CardCounts.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP + +#include "memory/allocation.hpp" +#include "runtime/virtualspace.hpp" +#include "utilities/globalDefinitions.hpp" + +class CardTableModRefBS; +class G1CollectedHeap; +class HeapRegion; + +// Table to track the number of times a card has been refined. Once +// a card has been refined a certain number of times, it is +// considered 'hot' and its refinement is delayed by inserting the +// card into the hot card cache. The card will then be refined when +// it is evicted from the hot card cache, or when the hot card cache +// is 'drained' during the next evacuation pause. + +class G1CardCounts: public CHeapObj { + G1CollectedHeap* _g1h; + + // The table of counts + jubyte* _card_counts; + + // Max capacity of the reserved space for the counts table + size_t _reserved_max_card_num; + + // Max capacity of the committed space for the counts table + size_t _committed_max_card_num; + + // Size of committed space for the counts table + size_t _committed_size; + + // CardTable bottom. + const jbyte* _ct_bot; + + // Barrier set + CardTableModRefBS* _ct_bs; + + // The virtual memory backing the counts table + VirtualSpace _card_counts_storage; + + // Returns true if the card counts table has been reserved. + bool has_reserved_count_table() { return _card_counts != NULL; } + + // Returns true if the card counts table has been reserved and committed. + bool has_count_table() { + return has_reserved_count_table() && _committed_max_card_num > 0; + } + + void check_card_num(size_t card_num, const char* msg) { + assert(card_num >= 0 && card_num < _committed_max_card_num, msg); + } + + size_t ptr_2_card_num(const jbyte* card_ptr) { + assert(card_ptr >= _ct_bot, + err_msg("Inavalied card pointer: " + "card_ptr: " PTR_FORMAT ", " + "_ct_bot: " PTR_FORMAT, + card_ptr, _ct_bot)); + size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte)); + check_card_num(card_num, + err_msg("card pointer out of range: " PTR_FORMAT, card_ptr)); + return card_num; + } + + jbyte* card_num_2_ptr(size_t card_num) { + check_card_num(card_num, + err_msg("card num out of range: "SIZE_FORMAT, card_num)); + return (jbyte*) (_ct_bot + card_num); + } + + // Helper routine. + // Returns the number of cards that can be counted by the given committed + // table size, with a maximum of the number of cards spanned by the max + // capacity of the heap. + size_t committed_to_card_num(size_t committed_size) { + return MIN2(_reserved_max_card_num, committed_size / sizeof(jbyte)); + } + + // Clear the counts table for the given (exclusive) index range. + void clear_range(size_t from_card_num, size_t to_card_num); + + public: + G1CardCounts(G1CollectedHeap* g1h); + ~G1CardCounts(); + + void initialize(); + + // Resize the committed space for the card counts table in + // response to a resize of the committed space for the heap. + void resize(size_t heap_capacity); + + // Increments the refinement count for the given card. + // Returns the pre-increment count value. + uint add_card_count(jbyte* card_ptr); + + // Returns true if the given count is high enough to be considered + // 'hot'; false otherwise. + bool is_hot(uint count); + + // Clears the card counts for the cards spanned by the region + void clear_region(HeapRegion* hr); + + // Clear the entire card counts table during GC. + // Updates the policy stats with the duration. + void clear_all(); +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,10 +38,15 @@ #include "gc_implementation/g1/g1MarkSweep.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1RemSet.inline.hpp" +#include "gc_implementation/g1/g1YCTypes.hpp" #include "gc_implementation/g1/heapRegion.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "memory/gcLocker.inline.hpp" #include "memory/genOopClosures.inline.hpp" @@ -76,7 +81,7 @@ // The number of GC workers is passed to heap_region_par_iterate_chunked(). // It does use run_task() which sets _n_workers in the task. // G1ParTask executes g1_process_strong_roots() -> -// SharedHeap::process_strong_roots() which calls eventuall to +// SharedHeap::process_strong_roots() which calls eventually to // CardTableModRefBS::par_non_clean_card_iterate_work() which uses // SequentialSubTasksDone. SharedHeap::process_strong_roots() also // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap). @@ -96,7 +101,7 @@ _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) {} bool do_card_ptr(jbyte* card_ptr, int worker_i) { - bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false); + bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false); // This path is executed by the concurrent refine or mutator threads, // concurrently, and so we do not care if card_ptr contains references // that point into the collection set. @@ -457,7 +462,7 @@ #endif // Returns true if the reference points to an object that -// can move in an incremental collecction. +// can move in an incremental collection. bool G1CollectedHeap::is_scavengable(const void* p) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectorPolicy* g1p = g1h->g1_policy(); @@ -548,7 +553,7 @@ return res; } - // Wait here until we get notifed either when (a) there are no + // Wait here until we get notified either when (a) there are no // more free regions coming or (b) some regions have been moved on // the secondary_free_list. SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); @@ -623,7 +628,7 @@ uint first = G1_NULL_HRS_INDEX; if (num_regions == 1) { // Only one region to allocate, no need to go through the slower - // path. The caller will attempt the expasion if this fails, so + // path. The caller will attempt the expansion if this fails, so // let's not try to expand here too. HeapRegion* hr = new_region(word_size, false /* do_expand */); if (hr != NULL) { @@ -688,7 +693,7 @@ // the first region. HeapWord* new_obj = first_hr->bottom(); // This will be the new end of the first region in the series that - // should also match the end of the last region in the seriers. + // should also match the end of the last region in the series. HeapWord* new_end = new_obj + word_size_sum; // This will be the new top of the first region that will reflect // this allocation. @@ -863,7 +868,7 @@ bool* gc_overhead_limit_was_exceeded) { assert_heap_not_locked_and_not_at_safepoint(); - // Loop until the allocation is satisified, or unsatisfied after GC. + // Loop until the allocation is satisfied, or unsatisfied after GC. for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { unsigned int gc_count_before; @@ -1003,7 +1008,7 @@ (*gclocker_retry_count_ret) += 1; } - // We can reach here if we were unsuccessul in scheduling a + // We can reach here if we were unsuccessful in scheduling a // collection (because another thread beat us to it) or if we were // stalled due to the GC locker. In either can we should retry the // allocation attempt in case another thread successfully @@ -1128,7 +1133,7 @@ (*gclocker_retry_count_ret) += 1; } - // We can reach here if we were unsuccessul in scheduling a + // We can reach here if we were unsuccessful in scheduling a // collection (because another thread beat us to it) or if we were // stalled due to the GC locker. In either can we should retry the // allocation attempt in case another thread successfully @@ -1271,9 +1276,8 @@ if (guard && total_collections() >= VerifyGCStartAt) { double verify_start = os::elapsedTime(); HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(msg); prepare_for_verify(); - Universe::verify(false /* silent */, VerifyOption_G1UsePrevMarking); + Universe::verify(VerifyOption_G1UsePrevMarking, msg); verify_time_ms = (os::elapsedTime() - verify_start) * 1000; } @@ -1299,12 +1303,19 @@ return false; } + STWGCTimer* gc_timer = G1MarkSweep::gc_timer(); + gc_timer->register_gc_start(os::elapsed_counter()); + + SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer(); + gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start()); + SvcGCMarker sgcm(SvcGCMarker::FULL); ResourceMark rm; print_heap_before_gc(); - - size_t metadata_prev_used = MetaspaceAux::used_in_bytes(); + trace_heap_before_gc(gc_tracer); + + size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); HRSPhaseSetter x(HRSPhaseFullGC); verify_region_sets_optional(); @@ -1322,233 +1333,245 @@ gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); - TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty); - TraceCollectorStats tcs(g1mm()->full_collection_counters()); - TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); - - double start = os::elapsedTime(); - g1_policy()->record_full_collection_start(); - - // Note: When we have a more flexible GC logging framework that - // allows us to add optional attributes to a GC log record we - // could consider timing and reporting how long we wait in the - // following two methods. - wait_while_free_regions_coming(); - // If we start the compaction before the CM threads finish - // scanning the root regions we might trip them over as we'll - // be moving objects / updating references. So let's wait until - // they are done. By telling them to abort, they should complete - // early. - _cm->root_regions()->abort(); - _cm->root_regions()->wait_until_scan_finished(); - append_secondary_free_list_if_not_empty_with_lock(); - - gc_prologue(true); - increment_total_collections(true /* full gc */); - increment_old_marking_cycles_started(); - - size_t g1h_prev_used = used(); - assert(used() == recalculate_used(), "Should be equal"); - - verify_before_gc(); - - pre_full_gc_dump(); - - COMPILER2_PRESENT(DerivedPointerTable::clear()); - - // Disable discovery and empty the discovered lists - // for the CM ref processor. - ref_processor_cm()->disable_discovery(); - ref_processor_cm()->abandon_partial_discovery(); - ref_processor_cm()->verify_no_references_recorded(); - - // Abandon current iterations of concurrent marking and concurrent - // refinement, if any are in progress. We have to do this before - // wait_until_scan_finished() below. - concurrent_mark()->abort(); - - // Make sure we'll choose a new allocation region afterwards. - release_mutator_alloc_region(); - abandon_gc_alloc_regions(); - g1_rem_set()->cleanupHRRS(); - - // We should call this after we retire any currently active alloc - // regions so that all the ALLOC / RETIRE events are generated - // before the start GC event. - _hr_printer.start_gc(true /* full */, (size_t) total_collections()); - - // We may have added regions to the current incremental collection - // set between the last GC or pause and now. We need to clear the - // incremental collection set and then start rebuilding it afresh - // after this full GC. - abandon_collection_set(g1_policy()->inc_cset_head()); - g1_policy()->clear_incremental_cset(); - g1_policy()->stop_incremental_cset_building(); - - tear_down_region_sets(false /* free_list_only */); - g1_policy()->set_gcs_are_young(true); - - // See the comments in g1CollectedHeap.hpp and - // G1CollectedHeap::ref_processing_init() about - // how reference processing currently works in G1. - - // Temporarily make discovery by the STW ref processor single threaded (non-MT). - ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false); - - // Temporarily clear the STW ref processor's _is_alive_non_header field. - ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL); - - ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); - ref_processor_stw()->setup_policy(do_clear_all_soft_refs); - - // Do collection work { - HandleMark hm; // Discard invalid handles created during gc - G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); - } - - assert(free_regions() == 0, "we should not have added any free regions"); - rebuild_region_sets(false /* free_list_only */); - - // Enqueue any discovered reference objects that have - // not been removed from the discovered lists. - ref_processor_stw()->enqueue_discovered_references(); - - COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); - - MemoryService::track_memory_usage(); - - verify_after_gc(); - - assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); - ref_processor_stw()->verify_no_references_recorded(); - - // Delete metaspaces for unloaded class loaders and clean up loader_data graph - ClassLoaderDataGraph::purge(); - - // Note: since we've just done a full GC, concurrent - // marking is no longer active. Therefore we need not - // re-enable reference discovery for the CM ref processor. - // That will be done at the start of the next marking cycle. - assert(!ref_processor_cm()->discovery_enabled(), "Postcondition"); - ref_processor_cm()->verify_no_references_recorded(); - - reset_gc_time_stamp(); - // Since everything potentially moved, we will clear all remembered - // sets, and clear all cards. Later we will rebuild remebered - // sets. We will also reset the GC time stamps of the regions. - clear_rsets_post_compaction(); - check_gc_time_stamps(); - - // Resize the heap if necessary. - resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); - - if (_hr_printer.is_active()) { - // We should do this after we potentially resize the heap so - // that all the COMMIT / UNCOMMIT events are generated before - // the end GC event. - - print_hrs_post_compaction(); - _hr_printer.end_gc(true /* full */, (size_t) total_collections()); - } - - if (_cg1r->use_cache()) { - _cg1r->clear_and_record_card_counts(); - _cg1r->clear_hot_cache(); - } - - // Rebuild remembered sets of all regions. - if (G1CollectedHeap::use_parallel_gc_threads()) { - uint n_workers = - AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), - workers()->active_workers(), - Threads::number_of_non_daemon_threads()); - assert(UseDynamicNumberOfGCThreads || - n_workers == workers()->total_workers(), - "If not dynamic should be using all the workers"); - workers()->set_active_workers(n_workers); - // Set parallel threads in the heap (_n_par_threads) only - // before a parallel phase and always reset it to 0 after - // the phase so that the number of parallel threads does - // no get carried forward to a serial phase where there - // may be code that is "possibly_parallel". - set_par_threads(n_workers); - - ParRebuildRSTask rebuild_rs_task(this); - assert(check_heap_region_claim_values( - HeapRegion::InitialClaimValue), "sanity check"); - assert(UseDynamicNumberOfGCThreads || - workers()->active_workers() == workers()->total_workers(), - "Unless dynamic should use total workers"); - // Use the most recent number of active workers - assert(workers()->active_workers() > 0, - "Active workers not properly set"); - set_par_threads(workers()->active_workers()); - workers()->run_task(&rebuild_rs_task); - set_par_threads(0); - assert(check_heap_region_claim_values( - HeapRegion::RebuildRSClaimValue), "sanity check"); - reset_heap_region_claim_values(); - } else { - RebuildRSOutOfRegionClosure rebuild_rs(this); - heap_region_iterate(&rebuild_rs); - } - - if (G1Log::fine()) { - print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); - } - - if (true) { // FIXME - MetaspaceGC::compute_new_size(); - } - - // Start a new incremental collection set for the next pause - assert(g1_policy()->collection_set() == NULL, "must be"); - g1_policy()->start_incremental_cset_building(); - - // Clear the _cset_fast_test bitmap in anticipation of adding - // regions to the incremental collection set for the next - // evacuation pause. - clear_cset_fast_test(); - - init_mutator_alloc_region(); - - double end = os::elapsedTime(); - g1_policy()->record_full_collection_end(); + GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL); + TraceCollectorStats tcs(g1mm()->full_collection_counters()); + TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); + + double start = os::elapsedTime(); + g1_policy()->record_full_collection_start(); + + // Note: When we have a more flexible GC logging framework that + // allows us to add optional attributes to a GC log record we + // could consider timing and reporting how long we wait in the + // following two methods. + wait_while_free_regions_coming(); + // If we start the compaction before the CM threads finish + // scanning the root regions we might trip them over as we'll + // be moving objects / updating references. So let's wait until + // they are done. By telling them to abort, they should complete + // early. + _cm->root_regions()->abort(); + _cm->root_regions()->wait_until_scan_finished(); + append_secondary_free_list_if_not_empty_with_lock(); + + gc_prologue(true); + increment_total_collections(true /* full gc */); + increment_old_marking_cycles_started(); + + assert(used() == recalculate_used(), "Should be equal"); + + verify_before_gc(); + + pre_full_gc_dump(gc_timer); + + COMPILER2_PRESENT(DerivedPointerTable::clear()); + + // Disable discovery and empty the discovered lists + // for the CM ref processor. + ref_processor_cm()->disable_discovery(); + ref_processor_cm()->abandon_partial_discovery(); + ref_processor_cm()->verify_no_references_recorded(); + + // Abandon current iterations of concurrent marking and concurrent + // refinement, if any are in progress. We have to do this before + // wait_until_scan_finished() below. + concurrent_mark()->abort(); + + // Make sure we'll choose a new allocation region afterwards. + release_mutator_alloc_region(); + abandon_gc_alloc_regions(); + g1_rem_set()->cleanupHRRS(); + + // We should call this after we retire any currently active alloc + // regions so that all the ALLOC / RETIRE events are generated + // before the start GC event. + _hr_printer.start_gc(true /* full */, (size_t) total_collections()); + + // We may have added regions to the current incremental collection + // set between the last GC or pause and now. We need to clear the + // incremental collection set and then start rebuilding it afresh + // after this full GC. + abandon_collection_set(g1_policy()->inc_cset_head()); + g1_policy()->clear_incremental_cset(); + g1_policy()->stop_incremental_cset_building(); + + tear_down_region_sets(false /* free_list_only */); + g1_policy()->set_gcs_are_young(true); + + // See the comments in g1CollectedHeap.hpp and + // G1CollectedHeap::ref_processing_init() about + // how reference processing currently works in G1. + + // Temporarily make discovery by the STW ref processor single threaded (non-MT). + ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false); + + // Temporarily clear the STW ref processor's _is_alive_non_header field. + ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL); + + ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); + ref_processor_stw()->setup_policy(do_clear_all_soft_refs); + + // Do collection work + { + HandleMark hm; // Discard invalid handles created during gc + G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); + } + + assert(free_regions() == 0, "we should not have added any free regions"); + rebuild_region_sets(false /* free_list_only */); + + // Enqueue any discovered reference objects that have + // not been removed from the discovered lists. + ref_processor_stw()->enqueue_discovered_references(); + + COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); + + MemoryService::track_memory_usage(); + + assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); + ref_processor_stw()->verify_no_references_recorded(); + + // Delete metaspaces for unloaded class loaders and clean up loader_data graph + ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); + + // Note: since we've just done a full GC, concurrent + // marking is no longer active. Therefore we need not + // re-enable reference discovery for the CM ref processor. + // That will be done at the start of the next marking cycle. + assert(!ref_processor_cm()->discovery_enabled(), "Postcondition"); + ref_processor_cm()->verify_no_references_recorded(); + + reset_gc_time_stamp(); + // Since everything potentially moved, we will clear all remembered + // sets, and clear all cards. Later we will rebuild remembered + // sets. We will also reset the GC time stamps of the regions. + clear_rsets_post_compaction(); + check_gc_time_stamps(); + + // Resize the heap if necessary. + resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); + + if (_hr_printer.is_active()) { + // We should do this after we potentially resize the heap so + // that all the COMMIT / UNCOMMIT events are generated before + // the end GC event. + + print_hrs_post_compaction(); + _hr_printer.end_gc(true /* full */, (size_t) total_collections()); + } + + G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); + if (hot_card_cache->use_cache()) { + hot_card_cache->reset_card_counts(); + hot_card_cache->reset_hot_cache(); + } + + // Rebuild remembered sets of all regions. + if (G1CollectedHeap::use_parallel_gc_threads()) { + uint n_workers = + AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), + workers()->active_workers(), + Threads::number_of_non_daemon_threads()); + assert(UseDynamicNumberOfGCThreads || + n_workers == workers()->total_workers(), + "If not dynamic should be using all the workers"); + workers()->set_active_workers(n_workers); + // Set parallel threads in the heap (_n_par_threads) only + // before a parallel phase and always reset it to 0 after + // the phase so that the number of parallel threads does + // no get carried forward to a serial phase where there + // may be code that is "possibly_parallel". + set_par_threads(n_workers); + + ParRebuildRSTask rebuild_rs_task(this); + assert(check_heap_region_claim_values( + HeapRegion::InitialClaimValue), "sanity check"); + assert(UseDynamicNumberOfGCThreads || + workers()->active_workers() == workers()->total_workers(), + "Unless dynamic should use total workers"); + // Use the most recent number of active workers + assert(workers()->active_workers() > 0, + "Active workers not properly set"); + set_par_threads(workers()->active_workers()); + workers()->run_task(&rebuild_rs_task); + set_par_threads(0); + assert(check_heap_region_claim_values( + HeapRegion::RebuildRSClaimValue), "sanity check"); + reset_heap_region_claim_values(); + } else { + RebuildRSOutOfRegionClosure rebuild_rs(this); + heap_region_iterate(&rebuild_rs); + } + + if (true) { // FIXME + MetaspaceGC::compute_new_size(); + } #ifdef TRACESPINNING - ParallelTaskTerminator::print_termination_counts(); + ParallelTaskTerminator::print_termination_counts(); #endif - gc_epilogue(true); - - // Discard all rset updates - JavaThread::dirty_card_queue_set().abandon_logs(); - assert(!G1DeferredRSUpdate - || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); - - _young_list->reset_sampled_info(); - // At this point there should be no regions in the - // entire heap tagged as young. - assert( check_young_list_empty(true /* check_heap */), - "young list should be empty at this point"); - - // Update the number of full collections that have been completed. - increment_old_marking_cycles_completed(false /* concurrent */); - - _hrs.verify_optional(); - verify_region_sets_optional(); + // Discard all rset updates + JavaThread::dirty_card_queue_set().abandon_logs(); + assert(!G1DeferredRSUpdate + || (G1DeferredRSUpdate && + (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); + + _young_list->reset_sampled_info(); + // At this point there should be no regions in the + // entire heap tagged as young. + assert(check_young_list_empty(true /* check_heap */), + "young list should be empty at this point"); + + // Update the number of full collections that have been completed. + increment_old_marking_cycles_completed(false /* concurrent */); + + _hrs.verify_optional(); + verify_region_sets_optional(); + + verify_after_gc(); + + // Start a new incremental collection set for the next pause + assert(g1_policy()->collection_set() == NULL, "must be"); + g1_policy()->start_incremental_cset_building(); + + // Clear the _cset_fast_test bitmap in anticipation of adding + // regions to the incremental collection set for the next + // evacuation pause. + clear_cset_fast_test(); + + init_mutator_alloc_region(); + + double end = os::elapsedTime(); + g1_policy()->record_full_collection_end(); + + if (G1Log::fine()) { + g1_policy()->print_heap_transition(); + } + + // We must call G1MonitoringSupport::update_sizes() in the same scoping level + // as an active TraceMemoryManagerStats object (i.e. before the destructor for the + // TraceMemoryManagerStats is called) so that the G1 memory pools are updated + // before any GC notifications are raised. + g1mm()->update_sizes(); + + gc_epilogue(true); + } + + if (G1Log::finer()) { + g1_policy()->print_detailed_heap_transition(true /* full */); + } print_heap_after_gc(); - - // We must call G1MonitoringSupport::update_sizes() in the same scoping level - // as an active TraceMemoryManagerStats object (i.e. before the destructor for the - // TraceMemoryManagerStats is called) so that the G1 memory pools are updated - // before any GC notifications are raised. - g1mm()->update_sizes(); - } - - post_full_gc_dump(); + trace_heap_after_gc(gc_tracer); + + post_full_gc_dump(gc_timer); + + gc_timer->register_gc_end(os::elapsed_counter()); + gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); + } return true; } @@ -1761,6 +1784,8 @@ Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); // Tell the BOT about the update. _bot_shared->resize(_g1_committed.word_size()); + // Tell the hot card cache about the update + _cg1r->hot_card_cache()->resize_card_counts(capacity()); } bool G1CollectedHeap::expand(size_t expand_bytes) { @@ -1825,7 +1850,7 @@ if (G1ExitOnExpansionFailure && _g1_storage.uncommitted_size() >= aligned_expand_bytes) { // We had head room... - vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion"); + vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion"); } } return successful; @@ -1837,33 +1862,32 @@ ReservedSpace::page_align_size_down(shrink_bytes); aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, HeapRegion::GrainBytes); - uint num_regions_deleted = 0; - MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted); + uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); + + uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove); HeapWord* old_end = (HeapWord*) _g1_storage.high(); - assert(mr.end() == old_end, "post-condition"); + size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; ergo_verbose3(ErgoHeapSizing, "shrink the heap", ergo_format_byte("requested shrinking amount") ergo_format_byte("aligned shrinking amount") ergo_format_byte("attempted shrinking amount"), - shrink_bytes, aligned_shrink_bytes, mr.byte_size()); - if (mr.byte_size() > 0) { + shrink_bytes, aligned_shrink_bytes, shrunk_bytes); + if (num_regions_removed > 0) { + _g1_storage.shrink_by(shrunk_bytes); + HeapWord* new_end = (HeapWord*) _g1_storage.high(); + if (_hr_printer.is_active()) { - HeapWord* curr = mr.end(); - while (curr > mr.start()) { + HeapWord* curr = old_end; + while (curr > new_end) { HeapWord* curr_end = curr; curr -= HeapRegion::GrainWords; _hr_printer.uncommit(curr, curr_end); } - assert(curr == mr.start(), "post-condition"); } - _g1_storage.shrink_by(mr.byte_size()); - HeapWord* new_end = (HeapWord*) _g1_storage.high(); - assert(mr.start() == new_end, "post-condition"); - - _expansion_regions += num_regions_deleted; + _expansion_regions += num_regions_removed; update_committed_space(old_end, new_end); HeapRegionRemSet::shrink_heap(n_regions()); g1_policy()->record_new_heap_size(n_regions()); @@ -1911,7 +1935,7 @@ _ref_processor_stw(NULL), _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), _bot_shared(NULL), - _evac_failure_scan_stack(NULL) , + _evac_failure_scan_stack(NULL), _mark_in_progress(false), _cg1r(NULL), _summary_bytes_used(0), _g1mm(NULL), @@ -1931,12 +1955,18 @@ _surviving_young_words(NULL), _old_marking_cycles_started(0), _old_marking_cycles_completed(0), + _concurrent_cycle_started(false), _in_cset_fast_test(NULL), _in_cset_fast_test_base(NULL), _dirty_cards_region_list(NULL), _worker_cset_start_region(NULL), - _worker_cset_start_region_time_stamp(NULL) { - _g1h = this; // To catch bugs. + _worker_cset_start_region_time_stamp(NULL), + _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), + _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), + _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()), + _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) { + + _g1h = this; if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { vm_exit_during_initialization("Failed necessary allocation."); } @@ -1949,22 +1979,16 @@ int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); assert(n_rem_sets > 0, "Invariant."); - HeapRegionRemSetIterator** iter_arr = - NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC); - for (int i = 0; i < n_queues; i++) { - iter_arr[i] = new HeapRegionRemSetIterator(); - } - _rem_set_iterator = iter_arr; - _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC); + _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC); for (int i = 0; i < n_queues; i++) { RefToScanQueue* q = new RefToScanQueue(); q->initialize(); _task_queues->register_queue(i, q); - } - + ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo(); + } clear_cset_start_regions(); // Initialize the G1EvacuationFailureALot counters and flags. @@ -2001,7 +2025,7 @@ Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); - _cg1r = new ConcurrentG1Refine(); + _cg1r = new ConcurrentG1Refine(this); // Reserve the maximum. @@ -2024,7 +2048,7 @@ HeapRegion::GrainBytes); // It is important to do this in a way such that concurrent readers can't - // temporarily think somethings in the heap. (I've actually seen this + // temporarily think something is in the heap. (I've actually seen this // happen in asserts: DLD.) _reserved.set_word_size(0); _reserved.set_start((HeapWord*)heap_rs.base()); @@ -2062,6 +2086,9 @@ (HeapWord*) _g1_reserved.end(), _expansion_regions); + // Do later initialization work for concurrent refinement. + _cg1r->init(); + // 6843694 - ensure that the maximum region index can fit // in the remembered set structures. const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; @@ -2079,20 +2106,20 @@ _g1h = this; - _in_cset_fast_test_length = max_regions(); - _in_cset_fast_test_base = + _in_cset_fast_test_length = max_regions(); + _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC); - // We're biasing _in_cset_fast_test to avoid subtracting the - // beginning of the heap every time we want to index; basically - // it's the same with what we do with the card table. - _in_cset_fast_test = _in_cset_fast_test_base - + // We're biasing _in_cset_fast_test to avoid subtracting the + // beginning of the heap every time we want to index; basically + // it's the same with what we do with the card table. + _in_cset_fast_test = _in_cset_fast_test_base - ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); - // Clear the _cset_fast_test bitmap in anticipation of adding - // regions to the incremental collection set for the first - // evacuation pause. - clear_cset_fast_test(); + // Clear the _cset_fast_test bitmap in anticipation of adding + // regions to the incremental collection set for the first + // evacuation pause. + clear_cset_fast_test(); // Create the ConcurrentMark data structure and thread. // (Must do this late, so that "max_regions" is defined.) @@ -2154,9 +2181,6 @@ // counts and that mechanism. SpecializationStats::clear(); - // Do later initialization work for concurrent refinement. - _cg1r->init(); - // Here we allocate the dummy full region that is required by the // G1AllocRegion class. If we don't pass an address in the reserved // space here, lots of asserts fire. @@ -2315,7 +2339,8 @@ bool concurrent, int worker_i) { // Clean cards in the hot card cache - concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq); + G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); + hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq); DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); int n_completed_buffers = 0; @@ -2470,7 +2495,7 @@ // We need to clear the "in_progress" flag in the CM thread before // we wake up any waiters (especially when ExplicitInvokesConcurrent // is set) so that if a waiter requests another System.gc() it doesn't - // incorrectly see that a marking cyle is still in progress. + // incorrectly see that a marking cycle is still in progress. if (concurrent) { _cmThread->clear_in_progress(); } @@ -2482,6 +2507,49 @@ FullGCCount_lock->notify_all(); } +void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) { + _concurrent_cycle_started = true; + _gc_timer_cm->register_gc_start(start_time); + + _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start()); + trace_heap_before_gc(_gc_tracer_cm); +} + +void G1CollectedHeap::register_concurrent_cycle_end() { + if (_concurrent_cycle_started) { + _gc_timer_cm->register_gc_end(os::elapsed_counter()); + + if (_cm->has_aborted()) { + _gc_tracer_cm->report_concurrent_mode_failure(); + } + _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); + + _concurrent_cycle_started = false; + } +} + +void G1CollectedHeap::trace_heap_after_concurrent_cycle() { + if (_concurrent_cycle_started) { + trace_heap_after_gc(_gc_tracer_cm); + } +} + +G1YCType G1CollectedHeap::yc_type() { + bool is_young = g1_policy()->gcs_are_young(); + bool is_initial_mark = g1_policy()->during_initial_mark_pause(); + bool is_during_mark = mark_in_progress(); + + if (is_initial_mark) { + return InitialMark; + } else if (is_during_mark) { + return DuringMark; + } else if (is_young) { + return Normal; + } else { + return Mixed; + } +} + void G1CollectedHeap::collect(GCCause::Cause cause) { assert_heap_not_locked(); @@ -2684,13 +2752,13 @@ break; } - // Noone should have claimed it directly. We can given + // No one should have claimed it directly. We can given // that we claimed its "starts humongous" region. assert(chr->claim_value() != claim_value, "sanity"); assert(chr->humongous_start_region() == r, "sanity"); if (chr->claimHeapRegion(claim_value)) { - // we should always be able to claim it; noone else should + // we should always be able to claim it; no one else should // be trying to claim this region bool res2 = cl->doHeapRegion(chr); @@ -2984,7 +3052,7 @@ // the min TLAB size. // Also, this value can be at most the humongous object threshold, - // since we can't allow tlabs to grow big enough to accomodate + // since we can't allow tlabs to grow big enough to accommodate // humongous objects. HeapRegion* hr = _mutator_alloc_region.get(); @@ -3547,6 +3615,14 @@ } void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { + + if (G1SummarizeRSetStats && + (G1SummarizeRSetStatsPeriod > 0) && + // we are at the end of the GC. Total collections has already been increased. + ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) { + g1_rem_set()->print_periodic_summary_info(); + } + // FIXME: what is this about? // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" // is set. @@ -3618,7 +3694,7 @@ uint array_length = g1_policy()->young_cset_region_length(); _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC); if (_surviving_young_words == NULL) { - vm_exit_out_of_memory(sizeof(size_t) * array_length, + vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR, "Not enough space for young surv words summary."); } memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t)); @@ -3743,10 +3819,15 @@ return false; } + _gc_timer_stw->register_gc_start(os::elapsed_counter()); + + _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start()); + SvcGCMarker sgcm(SvcGCMarker::MINOR); ResourceMark rm; print_heap_before_gc(); + trace_heap_before_gc(_gc_tracer_stw); HRSPhaseSetter x(HRSPhaseEvacuation); verify_region_sets_optional(); @@ -3771,11 +3852,17 @@ // Inner scope for scope based logging, timers, and stats collection { + EvacuationInfo evacuation_info; + if (g1_policy()->during_initial_mark_pause()) { // We are about to start a marking cycle, so we increment the // full collection counter. increment_old_marking_cycles_started(); + register_concurrent_cycle_start(_gc_timer_stw->gc_start()); } + + _gc_tracer_stw->report_yc_type(yc_type()); + TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? @@ -3848,7 +3935,6 @@ // The elapsed time induced by the start time below deliberately elides // the possible verification above. double sample_start_time_sec = os::elapsedTime(); - size_t start_used_bytes = used(); #if YOUNG_LIST_VERBOSE gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); @@ -3856,8 +3942,7 @@ g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE - g1_policy()->record_collection_pause_start(sample_start_time_sec, - start_used_bytes); + g1_policy()->record_collection_pause_start(sample_start_time_sec); double scan_wait_start = os::elapsedTime(); // We have to wait until the CM threads finish scanning the @@ -3887,7 +3972,7 @@ g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE - g1_policy()->finalize_cset(target_pause_time_ms); + g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info); _cm->note_start_of_gc(); // We should not verify the per-thread SATB buffers given that @@ -3923,10 +4008,10 @@ setup_surviving_young_words(); // Initialize the GC alloc regions. - init_gc_alloc_regions(); + init_gc_alloc_regions(evacuation_info); // Actually do the work... - evacuate_collection_set(); + evacuate_collection_set(evacuation_info); // We do this to mainly verify the per-thread SATB buffers // (which have been filtered by now) since we didn't verify @@ -3938,7 +4023,7 @@ true /* verify_thread_buffers */, true /* verify_fingers */); - free_collection_set(g1_policy()->collection_set()); + free_collection_set(g1_policy()->collection_set(), evacuation_info); g1_policy()->clear_collection_set(); cleanup_surviving_young_words(); @@ -3966,13 +4051,19 @@ #endif // YOUNG_LIST_VERBOSE g1_policy()->record_survivor_regions(_young_list->survivor_length(), - _young_list->first_survivor_region(), - _young_list->last_survivor_region()); + _young_list->first_survivor_region(), + _young_list->last_survivor_region()); _young_list->reset_auxilary_lists(); if (evacuation_failed()) { _summary_bytes_used = recalculate_used(); + uint n_queues = MAX2((int)ParallelGCThreads, 1); + for (uint i = 0; i < n_queues; i++) { + if (_evacuation_failed_info_array[i].has_failed()) { + _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); + } + } } else { // The "used" of the the collection set have already been subtracted // when they were freed. Add in the bytes evacuated. @@ -4015,7 +4106,7 @@ } } - // We redo the verificaiton but now wrt to the new CSet which + // We redo the verification but now wrt to the new CSet which // has just got initialized after the previous CSet was freed. _cm->verify_no_cset_oops(true /* verify_stacks */, true /* verify_enqueued_buffers */, @@ -4028,7 +4119,7 @@ // investigate this in CR 7178365. double sample_end_time_sec = os::elapsedTime(); double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; - g1_policy()->record_collection_pause_end(pause_time_ms); + g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info); MemoryService::track_memory_usage(); @@ -4095,20 +4186,19 @@ TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); print_heap_after_gc(); + trace_heap_after_gc(_gc_tracer_stw); // We must call G1MonitoringSupport::update_sizes() in the same scoping level // as an active TraceMemoryManagerStats object (i.e. before the destructor for the // TraceMemoryManagerStats is called) so that the G1 memory pools are updated // before any GC notifications are raised. g1mm()->update_sizes(); - } - - if (G1SummarizeRSetStats && - (G1SummarizeRSetStatsPeriod > 0) && - (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { - g1_rem_set()->print_summary_info(); - } - + + _gc_tracer_stw->report_evacuation_info(&evacuation_info); + _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold()); + _gc_timer_stw->register_gc_end(os::elapsed_counter()); + _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions()); + } // It should now be safe to tell the concurrent mark thread to start // without its logging output interfering with the logging output // that came from the pause. @@ -4160,7 +4250,7 @@ assert(_mutator_alloc_region.get() == NULL, "post-condition"); } -void G1CollectedHeap::init_gc_alloc_regions() { +void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { assert_at_safepoint(true /* should_be_vm_thread */); _survivor_gc_alloc_region.init(); @@ -4175,7 +4265,7 @@ // a cleanup and it should be on the free list now), or // d) it's humongous (this means that it was emptied // during a cleanup and was added to the free list, but - // has been subseqently used to allocate a humongous + // has been subsequently used to allocate a humongous // object that may be less than the region size). if (retained_region != NULL && !retained_region->in_collection_set() && @@ -4192,10 +4282,13 @@ retained_region->note_start_of_copying(during_im); _old_gc_alloc_region.set(retained_region); _hr_printer.reuse(retained_region); - } -} - -void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) { + evacuation_info.set_alloc_regions_used_before(retained_region->used()); + } +} + +void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) { + evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() + + _old_gc_alloc_region.count()); _survivor_gc_alloc_region.release(); // If we have an old GC alloc region to release, we'll save it in // _retained_old_gc_alloc_region. If we don't @@ -4278,7 +4371,7 @@ } oop -G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, +G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, oop old) { assert(obj_in_cs(old), err_msg("obj: "PTR_FORMAT" should still be in the CSet", @@ -4287,7 +4380,12 @@ oop forward_ptr = old->forward_to_atomic(old); if (forward_ptr == NULL) { // Forward-to-self succeeded. - + assert(_par_scan_state != NULL, "par scan state"); + OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); + uint queue_num = _par_scan_state->queue_num(); + + _evacuation_failed = true; + _evacuation_failed_info_array[queue_num].register_copy_failure(old->size()); if (_evac_failure_closure != cl) { MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); assert(!_drain_in_progress, @@ -4318,8 +4416,6 @@ } void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { - set_evacuation_failed(true); - preserve_mark_if_necessary(old, m); HeapRegion* r = heap_region_containing(old); @@ -4403,7 +4499,7 @@ PADDING_ELEM_NUM; _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); if (_surviving_young_words_base == NULL) - vm_exit_out_of_memory(array_length * sizeof(size_t), + vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR, "Not enough space for young surv histo."); _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t)); @@ -4569,8 +4665,7 @@ if (obj_ptr == NULL) { // This will either forward-to-self, or detect that someone else has // installed a forwarding pointer. - OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); - return _g1->handle_evacuation_failure_par(cl, old); + return _g1->handle_evacuation_failure_par(_par_scan_state, old); } oop obj = oop(obj_ptr); @@ -5085,10 +5180,9 @@ } void -G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, - OopClosure* non_root_closure) { +G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) { CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); - SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); + SharedHeap::process_weak_roots(root_closure, &roots_in_blobs); } // Weak Reference Processing support @@ -5101,7 +5195,6 @@ G1CollectedHeap* _g1; public: G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} - void do_object(oop p) { assert(false, "Do not call."); } bool do_object_b(oop p) { if (p != NULL) { return true; @@ -5176,7 +5269,7 @@ // will be copied, the reference field set to point to the // new location, and the RSet updated. Otherwise we need to // use the the non-heap or metadata closures directly to copy - // the refernt object and update the pointer, while avoiding + // the referent object and update the pointer, while avoiding // updating the RSet. if (_g1h->is_in_g1_reserved(p)) { @@ -5344,7 +5437,7 @@ } }; -// Driver routine for parallel reference enqueing. +// Driver routine for parallel reference enqueueing. // Creates an instance of the ref enqueueing gang // task and has the worker threads execute it. @@ -5473,7 +5566,7 @@ // processor would have seen that the reference object had already // been 'discovered' and would have skipped discovering the reference, // but would not have treated the reference object as a regular oop. - // As a reult the copy closure would not have been applied to the + // As a result the copy closure would not have been applied to the // referent object. // // We need to explicitly copy these referent objects - the references @@ -5549,21 +5642,28 @@ // Setup the soft refs policy... rp->setup_policy(false); + ReferenceProcessorStats stats; if (!rp->processing_is_mt()) { // Serial reference processing... - rp->process_discovered_references(&is_alive, - &keep_alive, - &drain_queue, - NULL); + stats = rp->process_discovered_references(&is_alive, + &keep_alive, + &drain_queue, + NULL, + _gc_timer_stw); } else { // Parallel reference processing assert(rp->num_q() == no_of_gc_workers, "sanity"); assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers); - rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor); - } - + stats = rp->process_discovered_references(&is_alive, + &keep_alive, + &drain_queue, + &par_task_executor, + _gc_timer_stw); + } + + _gc_tracer_stw->report_gc_reference_stats(stats); // We have completed copying any necessary live referent objects // (that were not copied during the actual pause) so we can // retire any active alloc buffers @@ -5587,7 +5687,7 @@ // Serial reference processing... rp->enqueue_discovered_references(); } else { - // Parallel reference enqueuing + // Parallel reference enqueueing assert(no_of_gc_workers == workers()->active_workers(), "Need to reset active workers"); @@ -5604,22 +5704,25 @@ // FIXME // CM's reference processing also cleans up the string and symbol tables. // Should we do that here also? We could, but it is a serial operation - // and could signicantly increase the pause time. + // and could significantly increase the pause time. double ref_enq_time = os::elapsedTime() - ref_enq_start; g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0); } -void G1CollectedHeap::evacuate_collection_set() { +void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) { _expand_heap_after_alloc_failure = true; - set_evacuation_failed(false); + _evacuation_failed = false; // Should G1EvacuationFailureALot be in effect for this GC? NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();) g1_rem_set()->prepare_for_oops_into_collection_set_do(); - concurrent_g1_refine()->set_use_cache(false); - concurrent_g1_refine()->clear_hot_cache_claimed_index(); + + // Disable the hot card cache. + G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); + hot_card_cache->reset_hot_cache_claimed_index(); + hot_card_cache->set_use_cache(false); uint n_workers; if (G1CollectedHeap::use_parallel_gc_threads()) { @@ -5698,11 +5801,14 @@ JNIHandles::weak_oops_do(&is_alive, &keep_alive); } - release_gc_alloc_regions(n_workers); + release_gc_alloc_regions(n_workers, evacuation_info); g1_rem_set()->cleanup_after_oops_into_collection_set_do(); - concurrent_g1_refine()->clear_hot_cache(); - concurrent_g1_refine()->set_use_cache(true); + // Reset and re-enable the hot card cache. + // Note the counts for the cards in the regions in the + // collection set are reset when the collection set is freed. + hot_card_cache->reset_hot_cache(); + hot_card_cache->set_use_cache(true); finalize_for_evac_failure(); @@ -5718,7 +5824,7 @@ // Enqueue any remaining references remaining on the STW // reference processor's discovered lists. We need to do // this after the card table is cleaned (and verified) as - // the act of enqueuing entries on to the pending list + // the act of enqueueing entries on to the pending list // will log these updates (and dirty their associated // cards). We need these updates logged to update any // RSets. @@ -5764,6 +5870,12 @@ assert(!hr->is_empty(), "the region should not be empty"); assert(free_list != NULL, "pre-condition"); + // Clear the card counts for this region. + // Note: we only need to do this if the region is not young + // (since we don't refine cards in young regions). + if (!hr->is_young()) { + _cg1r->hot_card_cache()->reset_card_counts(hr); + } *pre_used += hr->used(); hr->hr_clear(par, true /* clear_space */); free_list->add_as_head(hr); @@ -5940,7 +6052,7 @@ g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0); } -void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { +void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) { size_t pre_used = 0; FreeRegionList local_free_list("Local List for CSet Freeing"); @@ -6026,10 +6138,12 @@ cur->set_evacuation_failed(false); // The region is now considered to be old. _old_set.add(cur); + evacuation_info.increment_collectionset_used_after(cur->used()); } cur = next; } + evacuation_info.set_regions_freed(local_free_list.length()); policy->record_max_rs_lengths(rs_lengths); policy->cset_regions_freed(); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,10 +26,12 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP #include "gc_implementation/g1/concurrentMark.hpp" +#include "gc_implementation/g1/evacuationInfo.hpp" #include "gc_implementation/g1/g1AllocRegion.hpp" #include "gc_implementation/g1/g1HRPrinter.hpp" +#include "gc_implementation/g1/g1MonitoringSupport.hpp" #include "gc_implementation/g1/g1RemSet.hpp" -#include "gc_implementation/g1/g1MonitoringSupport.hpp" +#include "gc_implementation/g1/g1YCTypes.hpp" #include "gc_implementation/g1/heapRegionSeq.hpp" #include "gc_implementation/g1/heapRegionSets.hpp" #include "gc_implementation/shared/hSpaceCounters.hpp" @@ -61,7 +63,12 @@ class ConcurrentMark; class ConcurrentMarkThread; class ConcurrentG1Refine; +class ConcurrentGCTimer; class GenerationCounters; +class STWGCTimer; +class G1NewTracer; +class G1OldTracer; +class EvacuationFailedInfo; typedef OverflowTaskQueue RefToScanQueue; typedef GenericTaskQueueSet RefToScanQueueSet; @@ -160,12 +167,11 @@ // An instance is embedded into the G1CH and used as the // (optional) _is_alive_non_header closure in the STW // reference processor. It is also extensively used during -// refence processing during STW evacuation pauses. +// reference processing during STW evacuation pauses. class G1STWIsAliveClosure: public BoolObjectClosure { G1CollectedHeap* _g1; public: G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} - void do_object(oop p) { assert(false, "Do not call."); } bool do_object_b(oop p); }; @@ -324,10 +330,10 @@ void release_mutator_alloc_region(); // It initializes the GC alloc regions at the start of a GC. - void init_gc_alloc_regions(); + void init_gc_alloc_regions(EvacuationInfo& evacuation_info); // It releases the GC alloc regions at the end of a GC. - void release_gc_alloc_regions(uint no_of_gc_workers); + void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info); // It does any cleanup that needs to be done on the GC alloc regions // before a Full GC. @@ -390,6 +396,8 @@ // concurrent cycles) we have completed. volatile unsigned int _old_marking_cycles_completed; + bool _concurrent_cycle_started; + // This is a non-product method that is helpful for testing. It is // called at the end of a GC and artificially expands the heap by // allocating a number of dead regions. This way we can induce very @@ -594,11 +602,6 @@ // may not be a humongous - it must fit into a single heap region. HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); - HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, - HeapRegion* alloc_region, - bool par, - size_t word_size); - // Ensure that no further allocations can happen in "r", bearing in mind // that parallel threads might be attempting allocations. void par_allocate_remaining_space(HeapRegion* r); @@ -746,6 +749,12 @@ return _old_marking_cycles_completed; } + void register_concurrent_cycle_start(jlong start_time); + void register_concurrent_cycle_end(); + void trace_heap_after_concurrent_cycle(); + + G1YCType yc_type(); + G1HRPrinter* hr_printer() { return &_hr_printer; } protected: @@ -781,7 +790,7 @@ bool do_collection_pause_at_safepoint(double target_pause_time_ms); // Actually do the work of evacuating the collection set. - void evacuate_collection_set(); + void evacuate_collection_set(EvacuationInfo& evacuation_info); // The g1 remembered set of the heap. G1RemSet* _g1_rem_set; @@ -792,9 +801,6 @@ // concurrently after the collection. DirtyCardQueueSet _dirty_card_queue_set; - // The Heap Region Rem Set Iterator. - HeapRegionRemSetIterator** _rem_set_iterator; - // The closure used to refine a single card. RefineCardTableEntryClosure* _refine_cte_cl; @@ -809,7 +815,7 @@ // After a collection pause, make the regions in the CS into free // regions. - void free_collection_set(HeapRegion* cs_head); + void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info); // Abandon the current collection set without recording policy // statistics or updating free lists. @@ -833,8 +839,7 @@ // Apply "blk" to all the weak roots of the system. These include // JNI weak roots, the code cache, system dictionary, symbol table, // string table, and referents of reachable weak refs. - void g1_process_weak_roots(OopClosure* root_closure, - OopClosure* non_root_closure); + void g1_process_weak_roots(OopClosure* root_closure); // Frees a non-humongous region by initializing its contents and // adding it to the free list that's passed as a parameter (this is @@ -879,9 +884,7 @@ // True iff a evacuation has failed in the current collection. bool _evacuation_failed; - // Set the attribute indicating whether evacuation has failed in the - // current collection. - void set_evacuation_failed(bool b) { _evacuation_failed = b; } + EvacuationFailedInfo* _evacuation_failed_info_array; // Failed evacuations cause some logical from-space objects to have // forwarding pointers to themselves. Reset them. @@ -923,7 +926,7 @@ void finalize_for_evac_failure(); // An attempt to evacuate "obj" has failed; take necessary steps. - oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); + oop handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, oop obj); void handle_evacuation_failure_common(oop obj, markOop m); #ifndef PRODUCT @@ -955,13 +958,13 @@ inline bool evacuation_should_fail(); // Reset the G1EvacuationFailureALot counters. Should be called at - // the end of an evacuation pause in which an evacuation failure ocurred. + // the end of an evacuation pause in which an evacuation failure occurred. inline void reset_evacuation_should_fail(); #endif // !PRODUCT // ("Weak") Reference processing support. // - // G1 has 2 instances of the referece processor class. One + // G1 has 2 instances of the reference processor class. One // (_ref_processor_cm) handles reference object discovery // and subsequent processing during concurrent marking cycles. // @@ -1011,6 +1014,12 @@ // The (stw) reference processor... ReferenceProcessor* _ref_processor_stw; + STWGCTimer* _gc_timer_stw; + ConcurrentGCTimer* _gc_timer_cm; + + G1OldTracer* _gc_tracer_cm; + G1NewTracer* _gc_tracer_stw; + // During reference object discovery, the _is_alive_non_header // closure (if non-null) is applied to the referent object to // determine whether the referent is live. If so then the @@ -1120,15 +1129,6 @@ G1RemSet* g1_rem_set() const { return _g1_rem_set; } ModRefBarrierSet* mr_bs() const { return _mr_bs; } - // The rem set iterator. - HeapRegionRemSetIterator* rem_set_iterator(int i) { - return _rem_set_iterator[i]; - } - - HeapRegionRemSetIterator* rem_set_iterator() { - return _rem_set_iterator[0]; - } - unsigned get_gc_time_stamp() { return _gc_time_stamp; } @@ -1165,9 +1165,12 @@ // The STW reference processor.... ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; } - // The Concurent Marking reference processor... + // The Concurrent Marking reference processor... ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; } + ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } + G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; } + virtual size_t capacity() const; virtual size_t used() const; // This should be called when we're not holding the heap lock. The @@ -1225,7 +1228,7 @@ // verify_region_sets_optional() is planted in the code for // list verification in non-product builds (and it can be enabled in - // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1). + // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1). #if HEAP_REGION_SET_FORCE_VERIFY void verify_region_sets_optional() { verify_region_sets(); @@ -1291,7 +1294,7 @@ // The same as above but assume that the caller holds the Heap_lock. void collect_locked(GCCause::Cause cause); - // True iff a evacuation has failed in the most-recent collection. + // True iff an evacuation has failed in the most-recent collection. bool evacuation_failed() { return _evacuation_failed; } // It will free a region if it has allocated objects in it that are @@ -1579,6 +1582,7 @@ // Override; it uses the "prev" marking information virtual void verify(bool silent); + virtual void print_on(outputStream* st) const; virtual void print_extended_on(outputStream* st) const; virtual void print_on_error(outputStream* st) const; @@ -1753,6 +1757,95 @@ ParGCAllocBuffer::retire(end_of_gc, retain); _retired = true; } + + bool is_retired() { + return _retired; + } +}; + +class G1ParGCAllocBufferContainer { +protected: + static int const _priority_max = 2; + G1ParGCAllocBuffer* _priority_buffer[_priority_max]; + +public: + G1ParGCAllocBufferContainer(size_t gclab_word_size) { + for (int pr = 0; pr < _priority_max; ++pr) { + _priority_buffer[pr] = new G1ParGCAllocBuffer(gclab_word_size); + } + } + + ~G1ParGCAllocBufferContainer() { + for (int pr = 0; pr < _priority_max; ++pr) { + assert(_priority_buffer[pr]->is_retired(), "alloc buffers should all retire at this point."); + delete _priority_buffer[pr]; + } + } + + HeapWord* allocate(size_t word_sz) { + HeapWord* obj; + for (int pr = 0; pr < _priority_max; ++pr) { + obj = _priority_buffer[pr]->allocate(word_sz); + if (obj != NULL) return obj; + } + return obj; + } + + bool contains(void* addr) { + for (int pr = 0; pr < _priority_max; ++pr) { + if (_priority_buffer[pr]->contains(addr)) return true; + } + return false; + } + + void undo_allocation(HeapWord* obj, size_t word_sz) { + bool finish_undo; + for (int pr = 0; pr < _priority_max; ++pr) { + if (_priority_buffer[pr]->contains(obj)) { + _priority_buffer[pr]->undo_allocation(obj, word_sz); + finish_undo = true; + } + } + if (!finish_undo) ShouldNotReachHere(); + } + + size_t words_remaining() { + size_t result = 0; + for (int pr = 0; pr < _priority_max; ++pr) { + result += _priority_buffer[pr]->words_remaining(); + } + return result; + } + + size_t words_remaining_in_retired_buffer() { + G1ParGCAllocBuffer* retired = _priority_buffer[0]; + return retired->words_remaining(); + } + + void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) { + for (int pr = 0; pr < _priority_max; ++pr) { + _priority_buffer[pr]->flush_stats_and_retire(stats, end_of_gc, retain); + } + } + + void update(bool end_of_gc, bool retain, HeapWord* buf, size_t word_sz) { + G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0]; + retired_and_set->retire(end_of_gc, retain); + retired_and_set->set_buf(buf); + retired_and_set->set_word_size(word_sz); + adjust_priority_order(); + } + +private: + void adjust_priority_order() { + G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0]; + + int last = _priority_max - 1; + for (int pr = 0; pr < last; ++pr) { + _priority_buffer[pr] = _priority_buffer[pr + 1]; + } + _priority_buffer[last] = retired_and_set; + } }; class G1ParScanThreadState : public StackObj { @@ -1763,9 +1856,9 @@ CardTableModRefBS* _ct_bs; G1RemSet* _g1_rem; - G1ParGCAllocBuffer _surviving_alloc_buffer; - G1ParGCAllocBuffer _tenured_alloc_buffer; - G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount]; + G1ParGCAllocBufferContainer _surviving_alloc_buffer; + G1ParGCAllocBufferContainer _tenured_alloc_buffer; + G1ParGCAllocBufferContainer* _alloc_buffers[GCAllocPurposeCount]; ageTable _age_table; size_t _alloc_buffer_waste; @@ -1775,7 +1868,7 @@ G1ParScanHeapEvacClosure* _evac_cl; G1ParScanPartialArrayClosure* _partial_scan_cl; - int _hash_seed; + int _hash_seed; uint _queue_num; size_t _term_attempts; @@ -1829,7 +1922,7 @@ RefToScanQueue* refs() { return _refs; } ageTable* age_table() { return &_age_table; } - G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { + G1ParGCAllocBufferContainer* alloc_buffer(GCAllocPurpose purpose) { return _alloc_buffers[purpose]; } @@ -1859,15 +1952,13 @@ HeapWord* obj = NULL; size_t gclab_word_size = _g1h->desired_plab_sz(purpose); if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { - G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); - add_to_alloc_buffer_waste(alloc_buf->words_remaining()); - alloc_buf->retire(false /* end_of_gc */, false /* retain */); + G1ParGCAllocBufferContainer* alloc_buf = alloc_buffer(purpose); HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size); if (buf == NULL) return NULL; // Let caller handle allocation failure. - // Otherwise. - alloc_buf->set_word_size(gclab_word_size); - alloc_buf->set_buf(buf); + + add_to_alloc_buffer_waste(alloc_buf->words_remaining_in_retired_buffer()); + alloc_buf->update(false /* end_of_gc */, false /* retain */, buf, gclab_word_size); obj = alloc_buf->allocate(word_sz); assert(obj != NULL, "buffer was definitely big enough..."); @@ -1979,7 +2070,6 @@ } } -public: void trim_queue(); }; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -124,9 +124,12 @@ _last_young_gc(false), _last_gc_was_young(false), - _eden_bytes_before_gc(0), - _survivor_bytes_before_gc(0), - _capacity_before_gc(0), + _eden_used_bytes_before_gc(0), + _survivor_used_bytes_before_gc(0), + _heap_used_bytes_before_gc(0), + _metaspace_used_bytes_before_gc(0), + _eden_capacity_bytes_before_gc(0), + _heap_capacity_bytes_before_gc(0), _eden_cset_region_length(0), _survivor_cset_region_length(0), @@ -309,7 +312,8 @@ void G1CollectorPolicy::initialize_flags() { set_min_alignment(HeapRegion::GrainBytes); - set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name())); + size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name()); + set_max_alignment(MAX2(card_table_alignment, min_alignment())); if (SurvivorRatio < 1) { vm_exit_during_initialization("Invalid survivor ratio specified"); } @@ -406,7 +410,6 @@ } _free_regions_at_end_of_collection = _g1->free_regions(); update_young_list_target_length(); - _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes; // We may immediately start allocating regions and placing them on the // collection set list. Initialize the per-collection set info @@ -746,6 +749,7 @@ void G1CollectorPolicy::record_full_collection_start() { _full_collection_start_sec = os::elapsedTime(); + record_heap_size_info_at_start(true /* full */); // Release the future to-space so that it is available for compaction into. _g1->set_full_collection(); } @@ -788,8 +792,7 @@ _stop_world_start = os::elapsedTime(); } -void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, - size_t start_used) { +void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { // We only need to do this here as the policy will only be applied // to the GC we're about to start. so, no point is calculating this // every time we calculate / recalculate the target young length. @@ -803,19 +806,14 @@ _trace_gen0_time_data.record_start_collection(s_w_t_ms); _stop_world_start = 0.0; + record_heap_size_info_at_start(false /* full */); + phase_times()->record_cur_collection_start_sec(start_time_sec); - _cur_collection_pause_used_at_start_bytes = start_used; - _cur_collection_pause_used_regions_at_start = _g1->used_regions(); _pending_cards = _g1->pending_card_num(); _collection_set_bytes_used_before = 0; _bytes_copied_during_gc = 0; - YoungList* young_list = _g1->young_list(); - _eden_bytes_before_gc = young_list->eden_used_bytes(); - _survivor_bytes_before_gc = young_list->survivor_used_bytes(); - _capacity_before_gc = _g1->capacity(); - _last_gc_was_young = false; // do that for any other surv rate groups @@ -911,7 +909,7 @@ // Anything below that is considered to be zero #define MIN_TIMER_GRANULARITY 0.0000001 -void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) { +void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) { double end_time_sec = os::elapsedTime(); assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(), "otherwise, the subtraction below does not make sense"); @@ -943,13 +941,8 @@ _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec, false); - size_t freed_bytes = - _cur_collection_pause_used_at_start_bytes - cur_used_bytes; - size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes; - - double survival_fraction = - (double)surviving_bytes/ - (double)_collection_set_bytes_used_before; + evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before); + evacuation_info.set_bytes_copied(_bytes_copied_during_gc); if (update_stats) { _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times()); @@ -1003,6 +996,7 @@ } } } + bool new_in_marking_window = _in_marking_window; bool new_in_marking_window_im = false; if (during_initial_mark_pause()) { @@ -1088,8 +1082,10 @@ } _rs_length_diff_seq->add((double) rs_length_diff); - size_t copied_bytes = surviving_bytes; + size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes; + size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes; double cost_per_byte_ms = 0.0; + if (copied_bytes > 0) { cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes; if (_in_marking_window) { @@ -1153,38 +1149,61 @@ byte_size_in_proper_unit((double)(bytes)), \ proper_unit_for_byte_size((bytes)) +void G1CollectorPolicy::record_heap_size_info_at_start(bool full) { + YoungList* young_list = _g1->young_list(); + _eden_used_bytes_before_gc = young_list->eden_used_bytes(); + _survivor_used_bytes_before_gc = young_list->survivor_used_bytes(); + _heap_capacity_bytes_before_gc = _g1->capacity(); + _heap_used_bytes_before_gc = _g1->used(); + _cur_collection_pause_used_regions_at_start = _g1->used_regions(); + + _eden_capacity_bytes_before_gc = + (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc; + + if (full) { + _metaspace_used_bytes_before_gc = MetaspaceAux::allocated_used_bytes(); + } +} + void G1CollectorPolicy::print_heap_transition() { _g1->print_size_transition(gclog_or_tty, - _cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity()); + _heap_used_bytes_before_gc, + _g1->used(), + _g1->capacity()); } -void G1CollectorPolicy::print_detailed_heap_transition() { - YoungList* young_list = _g1->young_list(); - size_t eden_bytes = young_list->eden_used_bytes(); - size_t survivor_bytes = young_list->survivor_used_bytes(); - size_t used_before_gc = _cur_collection_pause_used_at_start_bytes; - size_t used = _g1->used(); - size_t capacity = _g1->capacity(); - size_t eden_capacity = - (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes; +void G1CollectorPolicy::print_detailed_heap_transition(bool full) { + YoungList* young_list = _g1->young_list(); + + size_t eden_used_bytes_after_gc = young_list->eden_used_bytes(); + size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes(); + size_t heap_used_bytes_after_gc = _g1->used(); + + size_t heap_capacity_bytes_after_gc = _g1->capacity(); + size_t eden_capacity_bytes_after_gc = + (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc; - gclog_or_tty->print_cr( - " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") " - "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" " - "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->" - EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]", - EXT_SIZE_PARAMS(_eden_bytes_before_gc), - EXT_SIZE_PARAMS(_prev_eden_capacity), - EXT_SIZE_PARAMS(eden_bytes), - EXT_SIZE_PARAMS(eden_capacity), - EXT_SIZE_PARAMS(_survivor_bytes_before_gc), - EXT_SIZE_PARAMS(survivor_bytes), - EXT_SIZE_PARAMS(used_before_gc), - EXT_SIZE_PARAMS(_capacity_before_gc), - EXT_SIZE_PARAMS(used), - EXT_SIZE_PARAMS(capacity)); + gclog_or_tty->print( + " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") " + "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" " + "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->" + EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]", + EXT_SIZE_PARAMS(_eden_used_bytes_before_gc), + EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc), + EXT_SIZE_PARAMS(eden_used_bytes_after_gc), + EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc), + EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc), + EXT_SIZE_PARAMS(survivor_used_bytes_after_gc), + EXT_SIZE_PARAMS(_heap_used_bytes_before_gc), + EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc), + EXT_SIZE_PARAMS(heap_used_bytes_after_gc), + EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc)); - _prev_eden_capacity = eden_capacity; + if (full) { + MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc); + } + + gclog_or_tty->cr(); } void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, @@ -1880,7 +1899,7 @@ } -void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { +void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) { double young_start_time_sec = os::elapsedTime(); YoungList* young_list = _g1->young_list(); @@ -2086,6 +2105,7 @@ double non_young_end_time_sec = os::elapsedTime(); phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); + evacuation_info.set_collectionset_regions(cset_region_length()); } void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -175,7 +175,6 @@ CollectionSetChooser* _collectionSetChooser; double _full_collection_start_sec; - size_t _cur_collection_pause_used_at_start_bytes; uint _cur_collection_pause_used_regions_at_start; // These exclude marking times. @@ -194,7 +193,6 @@ uint _young_list_target_length; uint _young_list_fixed_length; - size_t _prev_eden_capacity; // used for logging // The max number of regions we can extend the eden by while the GC // locker is active. This should be >= _young_list_target_length; @@ -671,34 +669,36 @@ bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); - // Update the heuristic info to record a collection pause of the given - // start time, where the given number of bytes were used at the start. - // This may involve changing the desired size of a collection set. + // Record the start and end of an evacuation pause. + void record_collection_pause_start(double start_time_sec); + void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info); - void record_stop_world_start(); - - void record_collection_pause_start(double start_time_sec, size_t start_used); + // Record the start and end of a full collection. + void record_full_collection_start(); + void record_full_collection_end(); // Must currently be called while the world is stopped. - void record_concurrent_mark_init_end(double - mark_init_elapsed_time_ms); + void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms); + // Record start and end of remark. void record_concurrent_mark_remark_start(); void record_concurrent_mark_remark_end(); + // Record start, end, and completion of cleanup. void record_concurrent_mark_cleanup_start(); void record_concurrent_mark_cleanup_end(int no_of_gc_threads); void record_concurrent_mark_cleanup_completed(); - void record_concurrent_pause(); + // Records the information about the heap size for reporting in + // print_detailed_heap_transition + void record_heap_size_info_at_start(bool full); - void record_collection_pause_end(double pause_time); + // Print heap sizing transition (with less and more detail). void print_heap_transition(); - void print_detailed_heap_transition(); + void print_detailed_heap_transition(bool full = false); - // Record the fact that a full collection occurred. - void record_full_collection_start(); - void record_full_collection_end(); + void record_stop_world_start(); + void record_concurrent_pause(); // Record how much space we copied during a GC. This is typically // called when a GC alloc region is being retired. @@ -720,7 +720,7 @@ // Choose a new collection set. Marks the chosen regions as being // "in_collection_set", and links them together. The head and number of // the collection set are available via access methods. - void finalize_cset(double target_pause_time_ms); + void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info); // The head of the list (via "next_in_collection_set()") representing the // current collection set. @@ -859,9 +859,16 @@ uint _max_survivor_regions; // For reporting purposes. - size_t _eden_bytes_before_gc; - size_t _survivor_bytes_before_gc; - size_t _capacity_before_gc; + // The value of _heap_bytes_before_gc is also used to calculate + // the cost of copying. + + size_t _eden_used_bytes_before_gc; // Eden occupancy before GC + size_t _survivor_used_bytes_before_gc; // Survivor occupancy before GC + size_t _heap_used_bytes_before_gc; // Heap occupancy before GC + size_t _metaspace_used_bytes_before_gc; // Metaspace occupancy before GC + + size_t _eden_capacity_bytes_before_gc; // Eden capacity before GC + size_t _heap_capacity_bytes_before_gc; // Heap capacity before GC // The amount of survivor regions after a collection. uint _recorded_survivor_regions; @@ -872,6 +879,7 @@ ageTable _survivors_age_table; public: + uint tenuring_threshold() const { return _tenuring_threshold; } inline GCAllocPurpose evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -155,11 +155,6 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) : _max_gc_threads(max_gc_threads), - _min_clear_cc_time_ms(-1.0), - _max_clear_cc_time_ms(-1.0), - _cur_clear_cc_time_ms(0.0), - _cum_clear_cc_time_ms(0.0), - _num_cc_clears(0L), _last_gc_worker_start_times_ms(_max_gc_threads, "%.1lf", false), _last_ext_root_scan_times_ms(_max_gc_threads, "%.1lf"), _last_satb_filtering_times_ms(_max_gc_threads, "%.1lf"), @@ -212,11 +207,11 @@ _last_gc_worker_times_ms.set(i, worker_time); double worker_known_time = _last_ext_root_scan_times_ms.get(i) + - _last_satb_filtering_times_ms.get(i) + - _last_update_rs_times_ms.get(i) + - _last_scan_rs_times_ms.get(i) + - _last_obj_copy_times_ms.get(i) + - _last_termination_times_ms.get(i); + _last_satb_filtering_times_ms.get(i) + + _last_update_rs_times_ms.get(i) + + _last_scan_rs_times_ms.get(i) + + _last_obj_copy_times_ms.get(i) + + _last_termination_times_ms.get(i); double worker_other_time = worker_time - worker_known_time; _last_gc_worker_other_times_ms.set(i, worker_other_time); @@ -285,15 +280,6 @@ } print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms); print_stats(1, "Clear CT", _cur_clear_ct_time_ms); - if (Verbose && G1Log::finest()) { - print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms); - print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms); - print_stats(1, "Min Clear CC", _min_clear_cc_time_ms); - print_stats(1, "Max Clear CC", _max_clear_cc_time_ms); - if (_num_cc_clears > 0) { - print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears)); - } - } double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms(); print_stats(1, "Other", misc_time_ms); if (_cur_verify_before_time_ms > 0.0) { @@ -311,19 +297,3 @@ print_stats(2, "Verify After", _cur_verify_after_time_ms); } } - -void G1GCPhaseTimes::record_cc_clear_time_ms(double ms) { - if (!(Verbose && G1Log::finest())) { - return; - } - - if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms) { - _min_clear_cc_time_ms = ms; - } - if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms) { - _max_clear_cc_time_ms = ms; - } - _cur_clear_cc_time_ms = ms; - _cum_clear_cc_time_ms += ms; - _num_cc_clears++; -} diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,7 @@ NOT_PRODUCT(static const T _uninitialized;) // We are caching the sum and average to only have to calculate them once. - // This is not done in an MT-safe way. It is intetened to allow single + // This is not done in an MT-safe way. It is intended to allow single // threaded code to call sum() and average() multiple times in any order // without having to worry about the cost. bool _has_new_data; @@ -133,13 +133,6 @@ double _cur_ref_proc_time_ms; double _cur_ref_enq_time_ms; - // Card Table Count Cache stats - double _min_clear_cc_time_ms; // min - double _max_clear_cc_time_ms; // max - double _cur_clear_cc_time_ms; // clearing time during current pause - double _cum_clear_cc_time_ms; // cummulative clearing time - jlong _num_cc_clears; // number of times the card count cache has been cleared - double _cur_collection_start_sec; double _root_region_scan_wait_time_ms; @@ -227,8 +220,6 @@ _root_region_scan_wait_time_ms = time_ms; } - void record_cc_clear_time_ms(double ms); - void record_young_free_cset_time_ms(double time_ms) { _recorded_young_free_cset_time_ms = time_ms; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1HotCardCache.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/dirtyCardQueue.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" +#include "gc_implementation/g1/g1HotCardCache.hpp" +#include "gc_implementation/g1/g1RemSet.hpp" +#include "gc_implementation/g1/heapRegion.hpp" +#include "runtime/atomic.hpp" + +G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h): + _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {} + +void G1HotCardCache::initialize() { + if (default_use_cache()) { + _use_cache = true; + + _hot_cache_size = (1 << G1ConcRSLogCacheSize); + _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC); + + _n_hot = 0; + _hot_cache_idx = 0; + + // For refining the cards in the hot cache in parallel + int n_workers = (ParallelGCThreads > 0 ? + _g1h->workers()->total_workers() : 1); + _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers); + _hot_cache_par_claimed_idx = 0; + + _card_counts.initialize(); + } +} + +G1HotCardCache::~G1HotCardCache() { + if (default_use_cache()) { + assert(_hot_cache != NULL, "Logic"); + FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC); + } +} + +jbyte* G1HotCardCache::insert(jbyte* card_ptr) { + uint count = _card_counts.add_card_count(card_ptr); + if (!_card_counts.is_hot(count)) { + // The card is not hot so do not store it in the cache; + // return it for immediate refining. + return card_ptr; + } + + // Otherwise, the card is hot. + jbyte* res = NULL; + MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag); + if (_n_hot == _hot_cache_size) { + res = _hot_cache[_hot_cache_idx]; + _n_hot--; + } + + // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx. + _hot_cache[_hot_cache_idx] = card_ptr; + _hot_cache_idx++; + + if (_hot_cache_idx == _hot_cache_size) { + // Wrap around + _hot_cache_idx = 0; + } + _n_hot++; + + return res; +} + +void G1HotCardCache::drain(int worker_i, + G1RemSet* g1rs, + DirtyCardQueue* into_cset_dcq) { + if (!default_use_cache()) { + assert(_hot_cache == NULL, "Logic"); + return; + } + + assert(_hot_cache != NULL, "Logic"); + assert(!use_cache(), "cache should be disabled"); + int start_idx; + + while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once + int end_idx = start_idx + _hot_cache_par_chunk_size; + + if (start_idx == + Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) { + // The current worker has successfully claimed the chunk [start_idx..end_idx) + end_idx = MIN2(end_idx, _n_hot); + for (int i = start_idx; i < end_idx; i++) { + jbyte* card_ptr = _hot_cache[i]; + if (card_ptr != NULL) { + if (g1rs->refine_card(card_ptr, worker_i, true)) { + // The part of the heap spanned by the card contains references + // that point into the current collection set. + // We need to record the card pointer in the DirtyCardQueueSet + // that we use for such cards. + // + // The only time we care about recording cards that contain + // references that point into the collection set is during + // RSet updating while within an evacuation pause. + // In this case worker_i should be the id of a GC worker thread + assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint"); + assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), + err_msg("incorrect worker id: "INT32_FORMAT, worker_i)); + + into_cset_dcq->enqueue(card_ptr); + } + } + } + } + } + // The existing entries in the hot card cache, which were just refined + // above, are discarded prior to re-enabling the cache near the end of the GC. +} + +void G1HotCardCache::resize_card_counts(size_t heap_capacity) { + _card_counts.resize(heap_capacity); +} + +void G1HotCardCache::reset_card_counts(HeapRegion* hr) { + _card_counts.clear_region(hr); +} + +void G1HotCardCache::reset_card_counts() { + _card_counts.clear_all(); +} diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1HotCardCache.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP + +#include "gc_implementation/g1/g1_globals.hpp" +#include "gc_implementation/g1/g1CardCounts.hpp" +#include "memory/allocation.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/thread.inline.hpp" +#include "utilities/globalDefinitions.hpp" + +class DirtyCardQueue; +class G1CollectedHeap; +class G1RemSet; +class HeapRegion; + +// An evicting cache of cards that have been logged by the G1 post +// write barrier. Placing a card in the cache delays the refinement +// of the card until the card is evicted, or the cache is drained +// during the next evacuation pause. +// +// The first thing the G1 post write barrier does is to check whether +// the card containing the updated pointer is already dirty and, if +// so, skips the remaining code in the barrier. +// +// Delaying the refinement of a card will make the card fail the +// first is_dirty check in the write barrier, skipping the remainder +// of the write barrier. +// +// This can significantly reduce the overhead of the write barrier +// code, increasing throughput. + +class G1HotCardCache: public CHeapObj { + G1CollectedHeap* _g1h; + + // The card cache table + jbyte** _hot_cache; + + int _hot_cache_size; + int _n_hot; + int _hot_cache_idx; + + int _hot_cache_par_chunk_size; + volatile int _hot_cache_par_claimed_idx; + + bool _use_cache; + + G1CardCounts _card_counts; + + bool default_use_cache() const { + return (G1ConcRSLogCacheSize > 0); + } + + public: + G1HotCardCache(G1CollectedHeap* g1h); + ~G1HotCardCache(); + + void initialize(); + + bool use_cache() { return _use_cache; } + + void set_use_cache(bool b) { + _use_cache = (b ? default_use_cache() : false); + } + + // Returns the card to be refined or NULL. + // + // Increments the count for given the card. if the card is not 'hot', + // it is returned for immediate refining. Otherwise the card is + // added to the hot card cache. + // If there is enough room in the hot card cache for the card we're + // adding, NULL is returned and no further action in needed. + // If we evict a card from the cache to make room for the new card, + // the evicted card is then returned for refinement. + jbyte* insert(jbyte* card_ptr); + + // Refine the cards that have delayed as a result of + // being in the cache. + void drain(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq); + + // Set up for parallel processing of the cards in the hot cache + void reset_hot_cache_claimed_index() { + _hot_cache_par_claimed_idx = 0; + } + + // Resets the hot card cache and discards the entries. + void reset_hot_cache() { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint"); + assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread"); + _hot_cache_idx = 0; _n_hot = 0; + } + + bool hot_cache_is_empty() { return _n_hot == 0; } + + // Resizes the card counts table to match the given capacity + void resize_card_counts(size_t heap_capacity); + + // Zeros the values in the card counts table for entire committed heap + void reset_card_counts(); + + // Zeros the values in the card counts table for the given region + void reset_card_counts(HeapRegion* hr); +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1MarkSweep.cpp --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,10 @@ #include "code/icBuffer.hpp" #include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/g1MarkSweep.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "memory/gcLocker.hpp" #include "memory/genCollectedHeap.hpp" #include "memory/modRefBarrierSet.hpp" @@ -119,7 +123,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - TraceTime tm("phase 1", G1Log::fine() && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer()); GenMarkSweep::trace(" 1"); SharedHeap* sh = SharedHeap::heap(); @@ -139,38 +143,36 @@ assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity"); rp->setup_policy(clear_all_softrefs); - rp->process_discovered_references(&GenMarkSweep::is_alive, - &GenMarkSweep::keep_alive, - &GenMarkSweep::follow_stack_closure, - NULL); + const ReferenceProcessorStats& stats = + rp->process_discovered_references(&GenMarkSweep::is_alive, + &GenMarkSweep::keep_alive, + &GenMarkSweep::follow_stack_closure, + NULL, + gc_timer()); + gc_tracer()->report_gc_reference_stats(stats); - // Follow system dictionary roots and unload classes - bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive); - assert(GenMarkSweep::_marking_stack.is_empty(), - "stack should be empty by now"); + + // This is the point where the entire marking should have completed. + assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed"); - // Follow code cache roots (has to be done after system dictionary, - // assumes all live klasses are marked) + // Unload classes and purge the SystemDictionary. + bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive); + + // Unload nmethods. CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class); - GenMarkSweep::follow_stack(); - // Update subklass/sibling/implementor links of live klasses + // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&GenMarkSweep::is_alive); - assert(GenMarkSweep::_marking_stack.is_empty(), - "stack should be empty by now"); - // Visit interned string tables and delete unmarked oops + // Delete entries for dead interned strings. StringTable::unlink(&GenMarkSweep::is_alive); + // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); - assert(GenMarkSweep::_marking_stack.is_empty(), - "stack should be empty by now"); - if (VerifyDuringGC) { HandleMark hm; // handle scope COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact); - gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying "); Universe::heap()->prepare_for_verify(); // Note: we can verify only the heap here. When an object is // marked, the previous value of the mark word (including @@ -182,12 +184,16 @@ // fail. At the end of the GC, the orginal mark word values // (including hash values) are restored to the appropriate // objects. - Universe::heap()->verify(/* silent */ false, - /* option */ VerifyOption_G1UseMarkWord); + if (!VerifySilently) { + gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying "); + } + Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord); + if (!VerifySilently) { + gclog_or_tty->print_cr("]"); + } + } - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - gclog_or_tty->print_cr("]"); - } + gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive); } class G1PrepareCompactClosure: public HeapRegionClosure { @@ -260,7 +266,7 @@ G1CollectedHeap* g1h = G1CollectedHeap::heap(); - TraceTime tm("phase 2", G1Log::fine() && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer()); GenMarkSweep::trace("2"); // find the first region @@ -297,7 +303,7 @@ G1CollectedHeap* g1h = G1CollectedHeap::heap(); // Adjust the pointers to reflect the new locations - TraceTime tm("phase 3", G1Log::fine() && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer()); GenMarkSweep::trace("3"); SharedHeap* sh = SharedHeap::heap(); @@ -308,17 +314,16 @@ sh->process_strong_roots(true, // activate StrongRootsScope false, // not scavenging. SharedHeap::SO_AllClasses, - &GenMarkSweep::adjust_root_pointer_closure, + &GenMarkSweep::adjust_pointer_closure, NULL, // do not touch code cache here &GenMarkSweep::adjust_klass_closure); assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity"); - g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure); + g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) - g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure, - &GenMarkSweep::adjust_pointer_closure); + g1h->g1_process_weak_roots(&GenMarkSweep::adjust_pointer_closure); GenMarkSweep::adjust_marks(); @@ -357,7 +362,7 @@ // to use a higher index (saved from phase2) when verifying perm_gen. G1CollectedHeap* g1h = G1CollectedHeap::heap(); - TraceTime tm("phase 4", G1Log::fine() && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer()); GenMarkSweep::trace("4"); G1SpaceCompactClosure blk; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1MarkSweep.hpp --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,6 +54,9 @@ static void invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs); + static STWGCTimer* gc_timer() { return GenMarkSweep::_gc_timer; } + static SerialOldTracer* gc_tracer() { return GenMarkSweep::_gc_tracer; } + private: // Mark live objects diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp --- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -224,6 +224,7 @@ // Monitoring support used by // MemoryService // jstat counters + // Tracing size_t overall_reserved() { return _overall_reserved; } size_t overall_committed() { return _overall_committed; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1RemSet.cpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,10 +29,12 @@ #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" +#include "gc_implementation/g1/g1HotCardCache.hpp" #include "gc_implementation/g1/g1GCPhaseTimes.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1RemSet.inline.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/g1/heapRegionRemSet.hpp" #include "memory/iterator.hpp" #include "oops/oop.inline.hpp" #include "utilities/intHisto.hpp" @@ -72,7 +74,8 @@ _ct_bs(ct_bs), _g1p(_g1->g1_policy()), _cg1r(g1->concurrent_g1_refine()), _cset_rs_update_cl(NULL), - _cards_scanned(NULL), _total_cards_scanned(0) + _cards_scanned(NULL), _total_cards_scanned(0), + _prev_period_summary() { _seq_task = new SubTasksDone(NumSeqTasks); guarantee(n_workers() > 0, "There should be some workers"); @@ -80,6 +83,7 @@ for (uint i = 0; i < n_workers(); i++) { _cset_rs_update_cl[i] = NULL; } + _prev_period_summary.initialize(this, n_workers()); } G1RemSet::~G1RemSet() { @@ -169,14 +173,13 @@ // _try_claimed || r->claim_iter() // is true: either we're supposed to work on claimed-but-not-complete // regions, or we successfully claimed the region. - HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i); - hrrs->init_iterator(iter); + HeapRegionRemSetIterator iter(hrrs); size_t card_index; // We claim cards in block so as to recude the contention. The block size is determined by // the G1RSetScanBlockSize parameter. size_t jump_to_card = hrrs->iter_claimed_next(_block_size); - for (size_t current_card = 0; iter->has_next(card_index); current_card++) { + for (size_t current_card = 0; iter.has_next(card_index); current_card++) { if (current_card >= jump_to_card + _block_size) { jump_to_card = hrrs->iter_claimed_next(_block_size); } @@ -248,7 +251,7 @@ assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker"); - if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) { + if (_g1rs->refine_card(card_ptr, worker_i, true)) { // 'card_ptr' contains references that point into the collection // set. We need to record the card in the DCQS // (G1CollectedHeap::into_cset_dirty_card_queue_set()) @@ -289,9 +292,6 @@ #if CARD_REPEAT_HISTO ct_freq_update_histo_and_reset(); #endif - if (worker_i == 0) { - _cg1r->clear_and_record_card_counts(); - } // We cache the value of 'oc' closure into the appropriate slot in the // _cset_rs_update_cl for this worker @@ -397,7 +397,7 @@ // RSet updating, // * the post-write barrier shouldn't be logging updates to young // regions (but there is a situation where this can happen - see - // the comment in G1RemSet::concurrentRefineOneCard below - + // the comment in G1RemSet::refine_card() below - // that should not be applicable here), and // * during actual RSet updating, the filtering of cards in young // regions in HeapRegion::oops_on_card_seq_iterate_careful is @@ -503,8 +503,6 @@ claim_val); } - - G1TriggerClosure::G1TriggerClosure() : _triggered(false) { } @@ -525,13 +523,91 @@ _record_refs_into_cset(record_refs_into_cset), _push_ref_cl(push_ref_cl), _worker_i(worker_i) { } -bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i, - bool check_for_refs_into_cset) { +// Returns true if the given card contains references that point +// into the collection set, if we're checking for such references; +// false otherwise. + +bool G1RemSet::refine_card(jbyte* card_ptr, int worker_i, + bool check_for_refs_into_cset) { + + // If the card is no longer dirty, nothing to do. + if (*card_ptr != CardTableModRefBS::dirty_card_val()) { + // No need to return that this card contains refs that point + // into the collection set. + return false; + } + // Construct the region representing the card. HeapWord* start = _ct_bs->addr_for(card_ptr); // And find the region containing it. HeapRegion* r = _g1->heap_region_containing(start); - assert(r != NULL, "unexpected null"); + if (r == NULL) { + // Again no need to return that this card contains refs that + // point into the collection set. + return false; // Not in the G1 heap (might be in perm, for example.) + } + + // Why do we have to check here whether a card is on a young region, + // given that we dirty young regions and, as a result, the + // post-barrier is supposed to filter them out and never to enqueue + // them? When we allocate a new region as the "allocation region" we + // actually dirty its cards after we release the lock, since card + // dirtying while holding the lock was a performance bottleneck. So, + // as a result, it is possible for other threads to actually + // allocate objects in the region (after the acquire the lock) + // before all the cards on the region are dirtied. This is unlikely, + // and it doesn't happen often, but it can happen. So, the extra + // check below filters out those cards. + if (r->is_young()) { + return false; + } + + // While we are processing RSet buffers during the collection, we + // actually don't want to scan any cards on the collection set, + // since we don't want to update remebered sets with entries that + // point into the collection set, given that live objects from the + // collection set are about to move and such entries will be stale + // very soon. This change also deals with a reliability issue which + // involves scanning a card in the collection set and coming across + // an array that was being chunked and looking malformed. Note, + // however, that if evacuation fails, we have to scan any objects + // that were not moved and create any missing entries. + if (r->in_collection_set()) { + return false; + } + + // The result from the hot card cache insert call is either: + // * pointer to the current card + // (implying that the current card is not 'hot'), + // * null + // (meaning we had inserted the card ptr into the "hot" card cache, + // which had some headroom), + // * a pointer to a "hot" card that was evicted from the "hot" cache. + // + + G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); + if (hot_card_cache->use_cache()) { + assert(!check_for_refs_into_cset, "sanity"); + assert(!SafepointSynchronize::is_at_safepoint(), "sanity"); + + card_ptr = hot_card_cache->insert(card_ptr); + if (card_ptr == NULL) { + // There was no eviction. Nothing to do. + return false; + } + + start = _ct_bs->addr_for(card_ptr); + r = _g1->heap_region_containing(start); + if (r == NULL) { + // Not in the G1 heap + return false; + } + + // Checking whether the region we got back from the cache + // is young here is inappropriate. The region could have been + // freed, reallocated and tagged as young while in the cache. + // Hence we could see its young type change at any time. + } // Don't use addr_for(card_ptr + 1) which can ask for // a card beyond the heap. This is not safe without a perm @@ -611,183 +687,42 @@ _conc_refine_cards++; } - return trigger_cl.triggered(); + // This gets set to true if the card being refined has + // references that point into the collection set. + bool has_refs_into_cset = trigger_cl.triggered(); + + // We should only be detecting that the card contains references + // that point into the collection set if the current thread is + // a GC worker thread. + assert(!has_refs_into_cset || SafepointSynchronize::is_at_safepoint(), + "invalid result at non safepoint"); + + return has_refs_into_cset; } -bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i, - bool check_for_refs_into_cset) { - // If the card is no longer dirty, nothing to do. - if (*card_ptr != CardTableModRefBS::dirty_card_val()) { - // No need to return that this card contains refs that point - // into the collection set. - return false; - } - - // Construct the region representing the card. - HeapWord* start = _ct_bs->addr_for(card_ptr); - // And find the region containing it. - HeapRegion* r = _g1->heap_region_containing(start); - if (r == NULL) { - // Again no need to return that this card contains refs that - // point into the collection set. - return false; // Not in the G1 heap (might be in perm, for example.) - } - // Why do we have to check here whether a card is on a young region, - // given that we dirty young regions and, as a result, the - // post-barrier is supposed to filter them out and never to enqueue - // them? When we allocate a new region as the "allocation region" we - // actually dirty its cards after we release the lock, since card - // dirtying while holding the lock was a performance bottleneck. So, - // as a result, it is possible for other threads to actually - // allocate objects in the region (after the acquire the lock) - // before all the cards on the region are dirtied. This is unlikely, - // and it doesn't happen often, but it can happen. So, the extra - // check below filters out those cards. - if (r->is_young()) { - return false; - } - // While we are processing RSet buffers during the collection, we - // actually don't want to scan any cards on the collection set, - // since we don't want to update remebered sets with entries that - // point into the collection set, given that live objects from the - // collection set are about to move and such entries will be stale - // very soon. This change also deals with a reliability issue which - // involves scanning a card in the collection set and coming across - // an array that was being chunked and looking malformed. Note, - // however, that if evacuation fails, we have to scan any objects - // that were not moved and create any missing entries. - if (r->in_collection_set()) { - return false; - } +void G1RemSet::print_periodic_summary_info() { + G1RemSetSummary current; + current.initialize(this, n_workers()); - // Should we defer processing the card? - // - // Previously the result from the insert_cache call would be - // either card_ptr (implying that card_ptr was currently "cold"), - // null (meaning we had inserted the card ptr into the "hot" - // cache, which had some headroom), or a "hot" card ptr - // extracted from the "hot" cache. - // - // Now that the _card_counts cache in the ConcurrentG1Refine - // instance is an evicting hash table, the result we get back - // could be from evicting the card ptr in an already occupied - // bucket (in which case we have replaced the card ptr in the - // bucket with card_ptr and "defer" is set to false). To avoid - // having a data structure (updates to which would need a lock) - // to hold these unprocessed dirty cards, we need to immediately - // process card_ptr. The actions needed to be taken on return - // from cache_insert are summarized in the following table: - // - // res defer action - // -------------------------------------------------------------- - // null false card evicted from _card_counts & replaced with - // card_ptr; evicted ptr added to hot cache. - // No need to process res; immediately process card_ptr - // - // null true card not evicted from _card_counts; card_ptr added - // to hot cache. - // Nothing to do. - // - // non-null false card evicted from _card_counts & replaced with - // card_ptr; evicted ptr is currently "cold" or - // caused an eviction from the hot cache. - // Immediately process res; process card_ptr. - // - // non-null true card not evicted from _card_counts; card_ptr is - // currently cold, or caused an eviction from hot - // cache. - // Immediately process res; no need to process card_ptr. - - - jbyte* res = card_ptr; - bool defer = false; + _prev_period_summary.subtract_from(¤t); + print_summary_info(&_prev_period_summary); - // This gets set to true if the card being refined has references - // that point into the collection set. - bool oops_into_cset = false; - - if (_cg1r->use_cache()) { - jbyte* res = _cg1r->cache_insert(card_ptr, &defer); - if (res != NULL && (res != card_ptr || defer)) { - start = _ct_bs->addr_for(res); - r = _g1->heap_region_containing(start); - if (r != NULL) { - // Checking whether the region we got back from the cache - // is young here is inappropriate. The region could have been - // freed, reallocated and tagged as young while in the cache. - // Hence we could see its young type change at any time. - // - // Process card pointer we get back from the hot card cache. This - // will check whether the region containing the card is young - // _after_ checking that the region has been allocated from. - oops_into_cset = concurrentRefineOneCard_impl(res, worker_i, - false /* check_for_refs_into_cset */); - // The above call to concurrentRefineOneCard_impl is only - // performed if the hot card cache is enabled. This cache is - // disabled during an evacuation pause - which is the only - // time when we need know if the card contains references - // that point into the collection set. Also when the hot card - // cache is enabled, this code is executed by the concurrent - // refine threads - rather than the GC worker threads - and - // concurrentRefineOneCard_impl will return false. - assert(!oops_into_cset, "should not see true here"); - } - } - } - - if (!defer) { - oops_into_cset = - concurrentRefineOneCard_impl(card_ptr, worker_i, check_for_refs_into_cset); - // We should only be detecting that the card contains references - // that point into the collection set if the current thread is - // a GC worker thread. - assert(!oops_into_cset || SafepointSynchronize::is_at_safepoint(), - "invalid result at non safepoint"); - } - return oops_into_cset; + _prev_period_summary.set(¤t); } -class HRRSStatsIter: public HeapRegionClosure { - size_t _occupied; - size_t _total_mem_sz; - size_t _max_mem_sz; - HeapRegion* _max_mem_sz_region; -public: - HRRSStatsIter() : - _occupied(0), - _total_mem_sz(0), - _max_mem_sz(0), - _max_mem_sz_region(NULL) - {} +void G1RemSet::print_summary_info() { + G1RemSetSummary current; + current.initialize(this, n_workers()); + + print_summary_info(¤t, " Cumulative RS summary"); +} - bool doHeapRegion(HeapRegion* r) { - if (r->continuesHumongous()) return false; - size_t mem_sz = r->rem_set()->mem_size(); - if (mem_sz > _max_mem_sz) { - _max_mem_sz = mem_sz; - _max_mem_sz_region = r; - } - _total_mem_sz += mem_sz; - size_t occ = r->rem_set()->occupied(); - _occupied += occ; - return false; +void G1RemSet::print_summary_info(G1RemSetSummary * summary, const char * header) { + assert(summary != NULL, "just checking"); + + if (header != NULL) { + gclog_or_tty->print_cr("%s", header); } - size_t total_mem_sz() { return _total_mem_sz; } - size_t max_mem_sz() { return _max_mem_sz; } - size_t occupied() { return _occupied; } - HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; } -}; - -class PrintRSThreadVTimeClosure : public ThreadClosure { -public: - virtual void do_thread(Thread *t) { - ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t; - gclog_or_tty->print(" %5.2f", crt->vtime_accum()); - } -}; - -void G1RemSet::print_summary_info() { - G1CollectedHeap* g1 = G1CollectedHeap::heap(); #if CARD_REPEAT_HISTO gclog_or_tty->print_cr("\nG1 card_repeat count histogram: "); @@ -795,65 +730,29 @@ card_repeat_count.print_on(gclog_or_tty); #endif - gclog_or_tty->print_cr("\n Concurrent RS processed %d cards", - _conc_refine_cards); - DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); - jint tot_processed_buffers = - dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread(); - gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers); - gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS threads.", - dcqs.processed_buffers_rs_thread(), - 100.0*(float)dcqs.processed_buffers_rs_thread()/ - (float)tot_processed_buffers); - gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.", - dcqs.processed_buffers_mut(), - 100.0*(float)dcqs.processed_buffers_mut()/ - (float)tot_processed_buffers); - gclog_or_tty->print_cr(" Conc RS threads times(s)"); - PrintRSThreadVTimeClosure p; - gclog_or_tty->print(" "); - g1->concurrent_g1_refine()->threads_do(&p); - gclog_or_tty->print_cr(""); - - HRRSStatsIter blk; - g1->heap_region_iterate(&blk); - gclog_or_tty->print_cr(" Total heap region rem set sizes = "SIZE_FORMAT"K." - " Max = "SIZE_FORMAT"K.", - blk.total_mem_sz()/K, blk.max_mem_sz()/K); - gclog_or_tty->print_cr(" Static structures = "SIZE_FORMAT"K," - " free_lists = "SIZE_FORMAT"K.", - HeapRegionRemSet::static_mem_size() / K, - HeapRegionRemSet::fl_mem_size() / K); - gclog_or_tty->print_cr(" "SIZE_FORMAT" occupied cards represented.", - blk.occupied()); - HeapRegion* max_mem_sz_region = blk.max_mem_sz_region(); - HeapRegionRemSet* rem_set = max_mem_sz_region->rem_set(); - gclog_or_tty->print_cr(" Max size region = "HR_FORMAT", " - "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.", - HR_FORMAT_PARAMS(max_mem_sz_region), - (rem_set->mem_size() + K - 1)/K, - (rem_set->occupied() + K - 1)/K); - gclog_or_tty->print_cr(" Did %d coarsenings.", - HeapRegionRemSet::n_coarsenings()); + summary->print_on(gclog_or_tty); } void G1RemSet::prepare_for_verify() { if (G1HRRSFlushLogBuffersOnVerify && (VerifyBeforeGC || VerifyAfterGC) - && !_g1->full_collection()) { + && (!_g1->full_collection() || G1VerifyRSetsDuringFullGC)) { cleanupHRRS(); _g1->set_refine_cte_cl_concurrency(false); if (SafepointSynchronize::is_at_safepoint()) { DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); dcqs.concatenate_logs(); } - bool cg1r_use_cache = _cg1r->use_cache(); - _cg1r->set_use_cache(false); + + G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); + bool use_hot_card_cache = hot_card_cache->use_cache(); + hot_card_cache->set_use_cache(false); + DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set()); updateRS(&into_cset_dcq, 0); _g1->into_cset_dirty_card_queue_set().clear(); - _cg1r->set_use_cache(cg1r_use_cache); + hot_card_cache->set_use_cache(use_hot_card_cache); assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); } } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1RemSet.hpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,8 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP +#include "gc_implementation/g1/g1RemSetSummary.hpp" + // A G1RemSet provides ways of iterating over pointers into a selected // collection set. @@ -37,9 +39,11 @@ // so that they can be used to update the individual region remsets. class G1RemSet: public CHeapObj { +private: + G1RemSetSummary _prev_period_summary; protected: G1CollectedHeap* _g1; - unsigned _conc_refine_cards; + size_t _conc_refine_cards; uint n_workers(); protected: @@ -53,27 +57,21 @@ NumSeqTasks = 1 }; - CardTableModRefBS* _ct_bs; - SubTasksDone* _seq_task; - G1CollectorPolicy* _g1p; + CardTableModRefBS* _ct_bs; + SubTasksDone* _seq_task; + G1CollectorPolicy* _g1p; - ConcurrentG1Refine* _cg1r; + ConcurrentG1Refine* _cg1r; - size_t* _cards_scanned; - size_t _total_cards_scanned; + size_t* _cards_scanned; + size_t _total_cards_scanned; // Used for caching the closure that is responsible for scanning // references into the collection set. OopsInHeapRegionClosure** _cset_rs_update_cl; - // The routine that performs the actual work of refining a dirty - // card. - // If check_for_refs_into_refs is true then a true result is returned - // if the card contains oops that have references into the current - // collection set. - bool concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i, - bool check_for_refs_into_cset); - + // Print the given summary info + virtual void print_summary_info(G1RemSetSummary * summary, const char * header = NULL); public: // This is called to reset dual hash tables after the gc pause // is finished and the initial hash table is no longer being @@ -90,8 +88,7 @@ // function can be helpful in partitioning the work to be done. It // should be the same as the "i" passed to the calling thread's // work(i) function. In the sequential case this param will be ingored. - void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, - int worker_i); + void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, int worker_i); // Prepare for and cleanup after an oops_into_collection_set_do // call. Must call each of these once before and after (in sequential @@ -124,20 +121,26 @@ void scrub_par(BitMap* region_bm, BitMap* card_bm, uint worker_num, int claim_val); - // Refine the card corresponding to "card_ptr". If "sts" is non-NULL, - // join and leave around parts that must be atomic wrt GC. (NULL means - // being done at a safepoint.) + // Refine the card corresponding to "card_ptr". // If check_for_refs_into_cset is true, a true result is returned // if the given card contains oops that have references into the // current collection set. - virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i, - bool check_for_refs_into_cset); + virtual bool refine_card(jbyte* card_ptr, + int worker_i, + bool check_for_refs_into_cset); - // Print any relevant summary info. + // Print accumulated summary info from the start of the VM. virtual void print_summary_info(); + // Print accumulated summary info from the last time called. + virtual void print_periodic_summary_info(); + // Prepare remembered set for verification. virtual void prepare_for_verify(); + + size_t conc_refine_cards() const { + return _conc_refine_cards; + } }; class CountNonCleanMemRegionClosure: public MemRegionClosure { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/concurrentG1Refine.hpp" +#include "gc_implementation/g1/concurrentG1RefineThread.hpp" +#include "gc_implementation/g1/heapRegion.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" +#include "gc_implementation/g1/g1RemSet.inline.hpp" +#include "gc_implementation/g1/g1RemSetSummary.hpp" +#include "gc_implementation/g1/heapRegionRemSet.hpp" +#include "runtime/thread.inline.hpp" + +class GetRSThreadVTimeClosure : public ThreadClosure { +private: + G1RemSetSummary* _summary; + uint _counter; + +public: + GetRSThreadVTimeClosure(G1RemSetSummary * summary) : ThreadClosure(), _summary(summary), _counter(0) { + assert(_summary != NULL, "just checking"); + } + + virtual void do_thread(Thread* t) { + ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t; + _summary->set_rs_thread_vtime(_counter, crt->vtime_accum()); + _counter++; + } +}; + +void G1RemSetSummary::update() { + _num_refined_cards = remset()->conc_refine_cards(); + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); + _num_processed_buf_mutator = dcqs.processed_buffers_mut(); + _num_processed_buf_rs_threads = dcqs.processed_buffers_rs_thread(); + + _num_coarsenings = HeapRegionRemSet::n_coarsenings(); + + ConcurrentG1Refine * cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); + if (_rs_threads_vtimes != NULL) { + GetRSThreadVTimeClosure p(this); + cg1r->worker_threads_do(&p); + } + set_sampling_thread_vtime(cg1r->sampling_thread()->vtime_accum()); +} + +void G1RemSetSummary::set_rs_thread_vtime(uint thread, double value) { + assert(_rs_threads_vtimes != NULL, "just checking"); + assert(thread < _num_vtimes, "just checking"); + _rs_threads_vtimes[thread] = value; +} + +double G1RemSetSummary::rs_thread_vtime(uint thread) const { + assert(_rs_threads_vtimes != NULL, "just checking"); + assert(thread < _num_vtimes, "just checking"); + return _rs_threads_vtimes[thread]; +} + +void G1RemSetSummary::initialize(G1RemSet* remset, uint num_workers) { + assert(_rs_threads_vtimes == NULL, "just checking"); + assert(remset != NULL, "just checking"); + + _remset = remset; + _num_vtimes = num_workers; + _rs_threads_vtimes = NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC); + memset(_rs_threads_vtimes, 0, sizeof(double) * _num_vtimes); + + update(); +} + +void G1RemSetSummary::set(G1RemSetSummary* other) { + assert(other != NULL, "just checking"); + assert(remset() == other->remset(), "just checking"); + assert(_num_vtimes == other->_num_vtimes, "just checking"); + + _num_refined_cards = other->num_concurrent_refined_cards(); + + _num_processed_buf_mutator = other->num_processed_buf_mutator(); + _num_processed_buf_rs_threads = other->num_processed_buf_rs_threads(); + + _num_coarsenings = other->_num_coarsenings; + + memcpy(_rs_threads_vtimes, other->_rs_threads_vtimes, sizeof(double) * _num_vtimes); + + set_sampling_thread_vtime(other->sampling_thread_vtime()); +} + +void G1RemSetSummary::subtract_from(G1RemSetSummary* other) { + assert(other != NULL, "just checking"); + assert(remset() == other->remset(), "just checking"); + assert(_num_vtimes == other->_num_vtimes, "just checking"); + + _num_refined_cards = other->num_concurrent_refined_cards() - _num_refined_cards; + + _num_processed_buf_mutator = other->num_processed_buf_mutator() - _num_processed_buf_mutator; + _num_processed_buf_rs_threads = other->num_processed_buf_rs_threads() - _num_processed_buf_rs_threads; + + _num_coarsenings = other->num_coarsenings() - _num_coarsenings; + + for (uint i = 0; i < _num_vtimes; i++) { + set_rs_thread_vtime(i, other->rs_thread_vtime(i) - rs_thread_vtime(i)); + } + + _sampling_thread_vtime = other->sampling_thread_vtime() - _sampling_thread_vtime; +} + +class HRRSStatsIter: public HeapRegionClosure { + size_t _occupied; + size_t _total_mem_sz; + size_t _max_mem_sz; + HeapRegion* _max_mem_sz_region; +public: + HRRSStatsIter() : + _occupied(0), + _total_mem_sz(0), + _max_mem_sz(0), + _max_mem_sz_region(NULL) + {} + + bool doHeapRegion(HeapRegion* r) { + size_t mem_sz = r->rem_set()->mem_size(); + if (mem_sz > _max_mem_sz) { + _max_mem_sz = mem_sz; + _max_mem_sz_region = r; + } + _total_mem_sz += mem_sz; + size_t occ = r->rem_set()->occupied(); + _occupied += occ; + return false; + } + size_t total_mem_sz() { return _total_mem_sz; } + size_t max_mem_sz() { return _max_mem_sz; } + size_t occupied() { return _occupied; } + HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; } +}; + +double calc_percentage(size_t numerator, size_t denominator) { + if (denominator != 0) { + return (double)numerator / denominator * 100.0; + } else { + return 0.0f; + } +} + +void G1RemSetSummary::print_on(outputStream* out) { + out->print_cr("\n Concurrent RS processed "SIZE_FORMAT" cards", + num_concurrent_refined_cards()); + out->print_cr(" Of %d completed buffers:", num_processed_buf_total()); + out->print_cr(" %8d (%5.1f%%) by concurrent RS threads.", + num_processed_buf_total(), + calc_percentage(num_processed_buf_rs_threads(), num_processed_buf_total())); + out->print_cr(" %8d (%5.1f%%) by mutator threads.", + num_processed_buf_mutator(), + calc_percentage(num_processed_buf_mutator(), num_processed_buf_total())); + out->print_cr(" Concurrent RS threads times (s)"); + out->print(" "); + for (uint i = 0; i < _num_vtimes; i++) { + out->print(" %5.2f", rs_thread_vtime(i)); + } + out->cr(); + out->print_cr(" Concurrent sampling threads times (s)"); + out->print_cr(" %5.2f", sampling_thread_vtime()); + + HRRSStatsIter blk; + G1CollectedHeap::heap()->heap_region_iterate(&blk); + out->print_cr(" Total heap region rem set sizes = "SIZE_FORMAT"K." + " Max = "SIZE_FORMAT"K.", + blk.total_mem_sz()/K, blk.max_mem_sz()/K); + out->print_cr(" Static structures = "SIZE_FORMAT"K," + " free_lists = "SIZE_FORMAT"K.", + HeapRegionRemSet::static_mem_size() / K, + HeapRegionRemSet::fl_mem_size() / K); + out->print_cr(" "SIZE_FORMAT" occupied cards represented.", + blk.occupied()); + HeapRegion* max_mem_sz_region = blk.max_mem_sz_region(); + HeapRegionRemSet* rem_set = max_mem_sz_region->rem_set(); + out->print_cr(" Max size region = "HR_FORMAT", " + "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.", + HR_FORMAT_PARAMS(max_mem_sz_region), + (rem_set->mem_size() + K - 1)/K, + (rem_set->occupied() + K - 1)/K); + + out->print_cr(" Did %d coarsenings.", num_coarsenings()); +} diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSETSUMMARY_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSETSUMMARY_HPP + +#include "utilities/ostream.hpp" + +class G1RemSet; + +// A G1RemSetSummary manages statistical information about the G1RemSet + +class G1RemSetSummary VALUE_OBJ_CLASS_SPEC { +private: + friend class GetRSThreadVTimeClosure; + + G1RemSet* _remset; + + G1RemSet* remset() const { + return _remset; + } + + size_t _num_refined_cards; + size_t _num_processed_buf_mutator; + size_t _num_processed_buf_rs_threads; + + size_t _num_coarsenings; + + double* _rs_threads_vtimes; + size_t _num_vtimes; + + double _sampling_thread_vtime; + + void set_rs_thread_vtime(uint thread, double value); + void set_sampling_thread_vtime(double value) { + _sampling_thread_vtime = value; + } + + void free_and_null() { + if (_rs_threads_vtimes) { + FREE_C_HEAP_ARRAY(double, _rs_threads_vtimes, mtGC); + _rs_threads_vtimes = NULL; + _num_vtimes = 0; + } + } + + // update this summary with current data from various places + void update(); + +public: + G1RemSetSummary() : _remset(NULL), _num_refined_cards(0), + _num_processed_buf_mutator(0), _num_processed_buf_rs_threads(0), _num_coarsenings(0), + _rs_threads_vtimes(NULL), _num_vtimes(0), _sampling_thread_vtime(0.0f) { + } + + ~G1RemSetSummary() { + free_and_null(); + } + + // set the counters in this summary to the values of the others + void set(G1RemSetSummary* other); + // subtract all counters from the other summary, and set them in the current + void subtract_from(G1RemSetSummary* other); + + // initialize and get the first sampling + void initialize(G1RemSet* remset, uint num_workers); + + void print_on(outputStream* out); + + double rs_thread_vtime(uint thread) const; + + double sampling_thread_vtime() const { + return _sampling_thread_vtime; + } + + size_t num_concurrent_refined_cards() const { + return _num_refined_cards; + } + + size_t num_processed_buf_mutator() const { + return _num_processed_buf_mutator; + } + + size_t num_processed_buf_rs_threads() const { + return _num_processed_buf_rs_threads; + } + + size_t num_processed_buf_total() const { + return num_processed_buf_mutator() + num_processed_buf_rs_threads(); + } + + size_t num_coarsenings() const { + return _num_coarsenings; + } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSETSUMMARY_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1YCTypes.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1YCTypes.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP + +#include "utilities/debug.hpp" + +enum G1YCType { + Normal, + InitialMark, + DuringMark, + Mixed, + G1YCTypeEndSentinel +}; + +class G1YCTypeHelper { + public: + static const char* to_string(G1YCType type) { + switch(type) { + case Normal: return "Normal"; + case InitialMark: return "Initial Mark"; + case DuringMark: return "During Mark"; + case Mixed: return "Mixed"; + default: ShouldNotReachHere(); return NULL; + } + } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/g1_globals.hpp --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -163,16 +163,12 @@ "Select green, yellow and red zones adaptively to meet the " \ "the pause requirements.") \ \ - develop(intx, G1ConcRSLogCacheSize, 10, \ + product(uintx, G1ConcRSLogCacheSize, 10, \ "Log base 2 of the length of conc RS hot-card cache.") \ \ - develop(intx, G1ConcRSHotCardLimit, 4, \ + product(uintx, G1ConcRSHotCardLimit, 4, \ "The threshold that defines (>=) a hot card.") \ \ - develop(intx, G1MaxHotCardCountSizePercent, 25, \ - "The maximum size of the hot card count cache as a " \ - "percentage of the number of cards for the maximum heap.") \ - \ develop(bool, G1PrintOopAppls, false, \ "When true, print applications of closures to external locs.") \ \ @@ -247,10 +243,6 @@ "If non-0 is the number of parallel rem set update threads, " \ "otherwise the value is determined ergonomically.") \ \ - develop(intx, G1CardCountCacheExpandThreshold, 16, \ - "Expand the card count cache if the number of collisions for " \ - "a particular entry exceeds this value.") \ - \ develop(bool, G1VerifyCTCleanup, false, \ "Verify card table cleanup.") \ \ @@ -337,7 +329,11 @@ \ develop(bool, G1EvacuationFailureALotDuringMixedGC, true, \ "Force use of evacuation failure handling during mixed " \ - "evacuation pauses") + "evacuation pauses") \ + \ + diagnostic(bool, G1VerifyRSetsDuringFullGC, false, \ + "If true, perform verification of each heap region's " \ + "remembered set when verifying the heap during a full GC.") G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG) diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/heapRegion.cpp --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -139,7 +139,7 @@ _n_failures++; } - if (!_g1h->full_collection()) { + if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) { HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); HeapRegion* to = _g1h->heap_region_containing(obj); if (from != NULL && to != NULL && diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -242,11 +242,13 @@ PerRegionTable* cur = _free_list; size_t res = 0; while (cur != NULL) { - res += sizeof(PerRegionTable); + res += cur->mem_size(); cur = cur->next(); } return res; } + + static void test_fl_mem_size(); }; PerRegionTable* PerRegionTable::_free_list = NULL; @@ -282,10 +284,11 @@ _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size; } - _fine_grain_regions = new PerRegionTablePtr[_max_fine_entries]; + _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries, + mtGC, 0, AllocFailStrategy::RETURN_NULL); if (_fine_grain_regions == NULL) { - vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, + vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR, "Failed to allocate _fine_grain_entries."); } @@ -706,10 +709,11 @@ // Cast away const in this case. MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag); size_t sum = 0; - PerRegionTable * cur = _first_all_fine_prts; - while (cur != NULL) { - sum += cur->mem_size(); - cur = cur->next(); + // all PRTs are of the same size so it is sufficient to query only one of them. + if (_first_all_fine_prts != NULL) { + assert(_last_all_fine_prts != NULL && + _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant"); + sum += _first_all_fine_prts->mem_size() * _n_fine_entries; } sum += (sizeof(PerRegionTable*) * _max_fine_entries); sum += (_coarse_map.size_in_words() * HeapWordSize); @@ -877,14 +881,9 @@ return _iter_state == Complete; } -void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const { - iter->initialize(this); -} - #ifndef PRODUCT void HeapRegionRemSet::print() const { - HeapRegionRemSetIterator iter; - init_iterator(&iter); + HeapRegionRemSetIterator iter(this); size_t card_index; while (iter.has_next(card_index)) { HeapWord* card_start = @@ -928,35 +927,23 @@ //-------------------- Iteration -------------------- -HeapRegionRemSetIterator:: -HeapRegionRemSetIterator() : - _hrrs(NULL), +HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) : + _hrrs(hrrs), _g1h(G1CollectedHeap::heap()), - _bosa(NULL), - _sparse_iter() { } - -void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) { - _hrrs = hrrs; - _coarse_map = &_hrrs->_other_regions._coarse_map; - _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions; - _bosa = _hrrs->bosa(); - - _is = Sparse; + _coarse_map(&hrrs->_other_regions._coarse_map), + _fine_grain_regions(hrrs->_other_regions._fine_grain_regions), + _bosa(hrrs->bosa()), + _is(Sparse), // Set these values so that we increment to the first region. - _coarse_cur_region_index = -1; - _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1); - - _cur_region_cur_card = 0; - - _fine_array_index = -1; - _fine_cur_prt = NULL; - - _n_yielded_coarse = 0; - _n_yielded_fine = 0; - _n_yielded_sparse = 0; - - _sparse_iter.init(&hrrs->_other_regions._sparse_table); -} + _coarse_cur_region_index(-1), + _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1), + _cur_region_cur_card(0), + _fine_array_index(-1), + _fine_cur_prt(NULL), + _n_yielded_coarse(0), + _n_yielded_fine(0), + _n_yielded_sparse(0), + _sparse_iter(&hrrs->_other_regions._sparse_table) {} bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) { if (_hrrs->_other_regions._n_coarse_entries == 0) return false; @@ -1164,6 +1151,19 @@ } #ifndef PRODUCT +void PerRegionTable::test_fl_mem_size() { + PerRegionTable* dummy = alloc(NULL); + free(dummy); + guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size"); + // try to reset the state + _free_list = NULL; + delete dummy; +} + +void HeapRegionRemSet::test_prt() { + PerRegionTable::test_fl_mem_size(); +} + void HeapRegionRemSet::test() { os::sleep(Thread::current(), (jlong)5000, false); G1CollectedHeap* g1h = G1CollectedHeap::heap(); @@ -1209,8 +1209,7 @@ hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom()); // Now, does iteration yield these three? - HeapRegionRemSetIterator iter; - hrrs->init_iterator(&iter); + HeapRegionRemSetIterator iter(hrrs); size_t sum = 0; size_t card_index; while (iter.has_next(card_index)) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -281,9 +281,6 @@ return (_iter_state == Unclaimed) && (_iter_claimed == 0); } - // Initialize the given iterator to iterate over this rem set. - void init_iterator(HeapRegionRemSetIterator* iter) const; - // The actual # of bytes this hr_remset takes up. size_t mem_size() { return _other_regions.mem_size() @@ -341,13 +338,14 @@ // Run unit tests. #ifndef PRODUCT + static void test_prt(); static void test(); #endif }; -class HeapRegionRemSetIterator : public CHeapObj { +class HeapRegionRemSetIterator : public StackObj { - // The region over which we're iterating. + // The region RSet over which we're iterating. const HeapRegionRemSet* _hrrs; // Local caching of HRRS fields. @@ -362,8 +360,10 @@ size_t _n_yielded_coarse; size_t _n_yielded_sparse; - // If true we're iterating over the coarse table; if false the fine - // table. + // Indicates what granularity of table that we're currently iterating over. + // We start iterating over the sparse table, progress to the fine grain + // table, and then finish with the coarse table. + // See HeapRegionRemSetIterator::has_next(). enum IterState { Sparse, Fine, @@ -403,9 +403,7 @@ public: // We require an iterator to be initialized before use, so the // constructor does little. - HeapRegionRemSetIterator(); - - void initialize(const HeapRegionRemSet* hrrs); + HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs); // If there remains one or more cards to be yielded, returns true and // sets "card_index" to one of those cards (which is then considered diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/heapRegionSeq.cpp --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -124,11 +124,11 @@ } assert(_regions[index] == NULL, "invariant"); _regions[index] = new_hr; - increment_length(&_allocated_length); + increment_allocated_length(); } // Have to increment the length first, otherwise we will get an // assert failure at(index) below. - increment_length(&_length); + increment_length(); HeapRegion* hr = at(index); list->add_as_tail(hr); @@ -201,45 +201,29 @@ } } -MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes, - uint* num_regions_deleted) { +uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) { // Reset this in case it's currently pointing into the regions that // we just removed. _next_search_index = 0; - assert(shrink_bytes % os::vm_page_size() == 0, "unaligned"); - assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned"); assert(length() > 0, "the region sequence should not be empty"); assert(length() <= _allocated_length, "invariant"); assert(_allocated_length > 0, "we should have at least one region committed"); + assert(num_regions_to_remove < length(), "We should never remove all regions"); - // around the loop, i will be the next region to be removed - uint i = length() - 1; - assert(i > 0, "we should never remove all regions"); - // [last_start, end) is the MemRegion that covers the regions we will remove. - HeapWord* end = at(i)->end(); - HeapWord* last_start = end; - *num_regions_deleted = 0; - while (shrink_bytes > 0) { - HeapRegion* cur = at(i); - // We should leave the humongous regions where they are. - if (cur->isHumongous()) break; - // We should stop shrinking if we come across a non-empty region. - if (!cur->is_empty()) break; + uint i = 0; + for (; i < num_regions_to_remove; i++) { + HeapRegion* cur = at(length() - 1); - i -= 1; - *num_regions_deleted += 1; - shrink_bytes -= cur->capacity(); - last_start = cur->bottom(); - decrement_length(&_length); - // We will reclaim the HeapRegion. _allocated_length should be - // covering this index. So, even though we removed the region from - // the active set by decreasing _length, we still have it - // available in the future if we need to re-use it. - assert(i > 0, "we should never remove all regions"); - assert(length() > 0, "we should never remove all regions"); + if (!cur->is_empty()) { + // We have to give up if the region can not be moved + break; } - return MemRegion(last_start, end); + assert(!cur->isHumongous(), "Humongous regions should not be empty"); + + decrement_length(); + } + return i; } #ifndef PRODUCT diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/heapRegionSeq.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -92,14 +92,19 @@ // address is valid. inline uintx addr_to_index_biased(HeapWord* addr) const; - void increment_length(uint* length) { - assert(*length < _max_length, "pre-condition"); - *length += 1; + void increment_allocated_length() { + assert(_allocated_length < _max_length, "pre-condition"); + _allocated_length++; } - void decrement_length(uint* length) { - assert(*length > 0, "pre-condition"); - *length -= 1; + void increment_length() { + assert(_length < _max_length, "pre-condition"); + _length++; + } + + void decrement_length() { + assert(_length > 0, "pre-condition"); + _length--; } public: @@ -153,11 +158,9 @@ void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const; // Tag as uncommitted as many regions that are completely free as - // possible, up to shrink_bytes, from the suffix of the committed - // sequence. Return a MemRegion that corresponds to the address - // range of the uncommitted regions. Assume shrink_bytes is page and - // heap region aligned. - MemRegion shrink_by(size_t shrink_bytes, uint* num_regions_deleted); + // possible, up to num_regions_to_remove, from the suffix of the committed + // sequence. Return the actual number of removed regions. + uint shrink_by(uint num_regions_to_remove); // Do some sanity checking. void verify_optional() PRODUCT_RETURN; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/sparsePRT.cpp --- a/src/share/vm/gc_implementation/g1/sparsePRT.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/sparsePRT.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -35,10 +35,6 @@ #define UNROLL_CARD_LOOPS 1 -void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) { - sprt_iter->init(this); -} - void SparsePRTEntry::init(RegionIdx_t region_ind) { _region_ind = region_ind; _next_index = NullEntry; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/sparsePRT.hpp --- a/src/share/vm/gc_implementation/g1/sparsePRT.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/sparsePRT.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -192,18 +192,11 @@ size_t compute_card_ind(CardIdx_t ci); public: - RSHashTableIter() : - _tbl_ind(RSHashTable::NullEntry), + RSHashTableIter(RSHashTable* rsht) : + _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0. _bl_ind(RSHashTable::NullEntry), _card_ind((SparsePRTEntry::cards_num() - 1)), - _rsht(NULL) {} - - void init(RSHashTable* rsht) { - _rsht = rsht; - _tbl_ind = -1; // So that first increment gets to 0. - _bl_ind = RSHashTable::NullEntry; - _card_ind = (SparsePRTEntry::cards_num() - 1); - } + _rsht(rsht) {} bool has_next(size_t& card_index); }; @@ -284,8 +277,6 @@ static void cleanup_all(); RSHashTable* cur() const { return _cur; } - void init_iterator(SparsePRTIter* sprt_iter); - static void add_to_expanded_list(SparsePRT* sprt); static SparsePRT* get_from_expanded_list(); @@ -321,9 +312,9 @@ class SparsePRTIter: public RSHashTableIter { public: - void init(const SparsePRT* sprt) { - RSHashTableIter::init(sprt->cur()); - } + SparsePRTIter(const SparsePRT* sprt) : + RSHashTableIter(sprt->cur()) {} + bool has_next(size_t& card_index) { return RSHashTableIter::has_next(card_index); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/g1/vm_operations_g1.cpp --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,8 @@ #include "gc_implementation/g1/g1CollectorPolicy.hpp" #include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp" #include "runtime/interfaceSupport.hpp" @@ -227,7 +229,7 @@ void VM_CGC_Operation::doit() { gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); - TraceTime t(_printGCMessage, G1Log::fine(), true, gclog_or_tty); + GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm()); SharedHeap* sh = SharedHeap::heap(); // This could go away if CollectedHeap gave access to _gc_is_active... if (sh != NULL) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp --- a/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -585,8 +585,7 @@ size_policy->avg_young_live()->sample(used()); size_policy->avg_eden_live()->sample(eden()->used()); - size_policy->compute_young_generation_free_space(eden()->capacity(), - max_gen_size()); + size_policy->compute_eden_space_size(eden()->capacity(), max_gen_size()); resize(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parNew/parNewGeneration.cpp --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,11 @@ #include "gc_implementation/shared/adaptiveSizePolicy.hpp" #include "gc_implementation/shared/ageTable.hpp" #include "gc_implementation/shared/parGCAllocBuffer.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" +#include "gc_implementation/shared/copyFailedInfo.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "memory/defNewGeneration.inline.hpp" #include "memory/genCollectedHeap.hpp" @@ -75,7 +80,6 @@ work_queue_set_, &term_), _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), _keep_alive_closure(&_scan_weak_ref_closure), - _promotion_failure_size(0), _strong_roots_time(0.0), _term_time(0.0) { #if TASKQUEUE_STATS @@ -279,13 +283,10 @@ } } -void ParScanThreadState::print_and_clear_promotion_failure_size() { - if (_promotion_failure_size != 0) { - if (PrintPromotionFailure) { - gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", - _thread_num, _promotion_failure_size); - } - _promotion_failure_size = 0; +void ParScanThreadState::print_promotion_failure_size() { + if (_promotion_failed_info.has_failed() && PrintPromotionFailure) { + gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", + _thread_num, _promotion_failed_info.first_size()); } } @@ -305,6 +306,7 @@ inline ParScanThreadState& thread_state(int i); + void trace_promotion_failed(YoungGCTracer& gc_tracer); void reset(int active_workers, bool promotion_failed); void flush(); @@ -353,13 +355,21 @@ return ((ParScanThreadState*)_data)[i]; } +void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) { + for (int i = 0; i < length(); ++i) { + if (thread_state(i).promotion_failed()) { + gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info()); + thread_state(i).promotion_failed_info().reset(); + } + } +} void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed) { _term.reset_for_reuse(active_threads); if (promotion_failed) { for (int i = 0; i < length(); ++i) { - thread_state(i).print_and_clear_promotion_failure_size(); + thread_state(i).print_promotion_failure_size(); } } } @@ -583,14 +593,6 @@ gch->set_n_termination(active_workers); } -// The "i" passed to this method is the part of the work for -// this thread. It is not the worker ID. The "i" is derived -// from _started_workers which is incremented in internal_note_start() -// called in GangWorker loop() and which is called under the -// which is called under the protection of the gang monitor and is -// called after a task is started. So "i" is based on -// first-come-first-served. - void ParNewGenTask::work(uint worker_id) { GenCollectedHeap* gch = GenCollectedHeap::heap(); // Since this is being done in a separate thread, need new resource @@ -876,16 +878,45 @@ } +// A Generation that does parallel young-gen collection. + bool ParNewGeneration::_avoid_promotion_undo = false; -// A Generation that does parallel young-gen collection. +void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) { + assert(_promo_failure_scan_stack.is_empty(), "post condition"); + _promo_failure_scan_stack.clear(true); // Clear cached segments. + + remove_forwarding_pointers(); + if (PrintGCDetails) { + gclog_or_tty->print(" (promotion failed)"); + } + // All the spaces are in play for mark-sweep. + swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. + from()->set_next_compaction_space(to()); + gch->set_incremental_collection_failed(); + // Inform the next generation that a promotion failure occurred. + _next_gen->promotion_failure_occurred(); + + // Trace promotion failure in the parallel GC threads + thread_state_set.trace_promotion_failed(gc_tracer); + // Single threaded code may have reported promotion failure to the global state + if (_promotion_failed_info.has_failed()) { + gc_tracer.report_promotion_failed(_promotion_failed_info); + } + // Reset the PromotionFailureALot counters. + NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) +} void ParNewGeneration::collect(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); + GenCollectedHeap* gch = GenCollectedHeap::heap(); + + _gc_timer->register_gc_start(os::elapsed_counter()); + assert(gch->kind() == CollectedHeap::GenCollectedHeap, "not a CMS generational heap"); AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); @@ -906,7 +937,7 @@ set_avoid_promotion_undo(true); } - // If the next generation is too full to accomodate worst-case promotion + // If the next generation is too full to accommodate worst-case promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { @@ -915,6 +946,10 @@ } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); + ParNewTracer gc_tracer; + gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); + gch->trace_heap_before_gc(&gc_tracer); + init_assuming_no_promotion_failure(); if (UseAdaptiveSizePolicy) { @@ -922,7 +957,7 @@ size_policy->minor_collection_begin(); } - TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty); + GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); @@ -975,17 +1010,21 @@ rp->setup_policy(clear_all_soft_refs); // Can the mt_degree be set later (at run_task() time would be best)? rp->set_active_mt_degree(active_workers); + ReferenceProcessorStats stats; if (rp->processing_is_mt()) { ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); - rp->process_discovered_references(&is_alive, &keep_alive, - &evacuate_followers, &task_executor); + stats = rp->process_discovered_references(&is_alive, &keep_alive, + &evacuate_followers, &task_executor, + _gc_timer); } else { thread_state_set.flush(); gch->set_par_threads(0); // 0 ==> non-parallel. gch->save_marks(); - rp->process_discovered_references(&is_alive, &keep_alive, - &evacuate_followers, NULL); + stats = rp->process_discovered_references(&is_alive, &keep_alive, + &evacuate_followers, NULL, + _gc_timer); } + gc_tracer.report_gc_reference_stats(stats); if (!promotion_failed()) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); @@ -1010,22 +1049,7 @@ adjust_desired_tenuring_threshold(); } else { - assert(_promo_failure_scan_stack.is_empty(), "post condition"); - _promo_failure_scan_stack.clear(true); // Clear cached segments. - - remove_forwarding_pointers(); - if (PrintGCDetails) { - gclog_or_tty->print(" (promotion failed)"); - } - // All the spaces are in play for mark-sweep. - swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. - from()->set_next_compaction_space(to()); - gch->set_incremental_collection_failed(); - // Inform the next generation that a promotion failure occurred. - _next_gen->promotion_failure_occurred(); - - // Reset the PromotionFailureALot counters. - NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) + handle_promotion_failed(gch, thread_state_set, gc_tracer); } // set new iteration safe limit for the survivor spaces from()->set_concurrent_iteration_safe_limit(from()->top()); @@ -1065,6 +1089,13 @@ rp->enqueue_discovered_references(NULL); } rp->verify_no_references_recorded(); + + gch->trace_heap_after_gc(&gc_tracer); + gc_tracer.report_tenuring_threshold(tenuring_threshold()); + + _gc_timer->register_gc_end(os::elapsed_counter()); + + gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); } static int sum; @@ -1174,8 +1205,7 @@ new_obj = old; preserve_mark_if_necessary(old, m); - // Log the size of the maiden promotion failure - par_scan_state->log_promotion_failure(sz); + par_scan_state->register_promotion_failure(sz); } old->forward_to(new_obj); @@ -1300,8 +1330,7 @@ failed_to_promote = true; preserve_mark_if_necessary(old, m); - // Log the size of the maiden promotion failure - par_scan_state->log_promotion_failure(sz); + par_scan_state->register_promotion_failure(sz); } } else { // Is in to-space; do copying ourselves. @@ -1599,8 +1628,7 @@ } #undef BUSY -void ParNewGeneration::ref_processor_init() -{ +void ParNewGeneration::ref_processor_init() { if (_ref_processor == NULL) { // Allocate and initialize a reference processor _ref_processor = diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parNew/parNewGeneration.hpp --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -25,7 +25,9 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP +#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/parGCAllocBuffer.hpp" +#include "gc_implementation/shared/copyFailedInfo.hpp" #include "memory/defNewGeneration.hpp" #include "utilities/taskqueue.hpp" @@ -105,7 +107,7 @@ #endif // TASKQUEUE_STATS // Stats for promotion failure - size_t _promotion_failure_size; + PromotionFailedInfo _promotion_failed_info; // Timing numbers. double _start; @@ -180,13 +182,16 @@ void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz); // Promotion failure stats - size_t promotion_failure_size() { return promotion_failure_size(); } - void log_promotion_failure(size_t sz) { - if (_promotion_failure_size == 0) { - _promotion_failure_size = sz; - } + void register_promotion_failure(size_t sz) { + _promotion_failed_info.register_copy_failure(sz); } - void print_and_clear_promotion_failure_size(); + PromotionFailedInfo& promotion_failed_info() { + return _promotion_failed_info; + } + bool promotion_failed() { + return _promotion_failed_info.has_failed(); + } + void print_promotion_failure_size(); #if TASKQUEUE_STATS TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; } @@ -337,6 +342,8 @@ // word being overwritten with a self-forwarding-pointer. void preserve_mark_if_necessary(oop obj, markOop m); + void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer); + protected: bool _survivor_overflow; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -567,7 +567,7 @@ MemRegion(new_start_aligned, new_end_for_commit); if (!os::commit_memory((char*)new_committed.start(), new_committed.byte_size())) { - vm_exit_out_of_memory(new_committed.byte_size(), + vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR, "card table expansion"); } } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -43,7 +43,7 @@ _time_stamp_index(0) { if (!os::create_thread(this, os::pgc_thread)) - vm_exit_out_of_memory(0, "Cannot create GC thread. Out of system resources."); + vm_exit_out_of_memory(0, OOM_MALLOC_ERROR, "Cannot create GC thread. Out of system resources."); if (PrintGCTaskTimeStamps) { _time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -99,7 +99,7 @@ // Expand size_t expand_by = requested_blocks_size_in_bytes - current_blocks_size_in_bytes; if (!_virtual_space.expand_by(expand_by)) { - vm_exit_out_of_memory(expand_by, "object start array expansion"); + vm_exit_out_of_memory(expand_by, OOM_MMAP_ERROR, "object start array expansion"); } // Clear *only* the newly allocated region memset(_blocks_region.end(), clean_block, expand_by); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,7 +24,6 @@ #include "precompiled.hpp" #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp" -#include "gc_implementation/parallelScavenge/parMarkBitMap.inline.hpp" #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" #include "oops/oop.inline.hpp" #include "runtime/os.hpp" @@ -55,18 +54,18 @@ const size_t raw_bytes = words * sizeof(idx_t); const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10); const size_t granularity = os::vm_allocation_granularity(); - const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity)); + _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity)); const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : MAX2(page_sz, granularity); - ReservedSpace rs(bytes, rs_align, rs_align > 0); + ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0); os::trace_page_sizes("par bitmap", raw_bytes, raw_bytes, page_sz, rs.base(), rs.size()); MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); _virtual_space = new PSVirtualSpace(rs, page_sz); - if (_virtual_space != NULL && _virtual_space->expand_by(bytes)) { + if (_virtual_space != NULL && _virtual_space->expand_by(_reserved_byte_size)) { _region_start = covered_region.start(); _region_size = covered_region.word_size(); idx_t* map = (idx_t*)_virtual_space->reserved_low_addr(); @@ -108,31 +107,6 @@ return false; } -size_t -ParMarkBitMap::live_words_in_range(HeapWord* beg_addr, HeapWord* end_addr) const -{ - assert(beg_addr <= end_addr, "bad range"); - - idx_t live_bits = 0; - - // The bitmap routines require the right boundary to be word-aligned. - const idx_t end_bit = addr_to_bit(end_addr); - const idx_t range_end = BitMap::word_align_up(end_bit); - - idx_t beg_bit = find_obj_beg(addr_to_bit(beg_addr), range_end); - while (beg_bit < end_bit) { - idx_t tmp_end = find_obj_end(beg_bit, range_end); - if (tmp_end < end_bit) { - live_bits += tmp_end - beg_bit + 1; - beg_bit = find_obj_beg(tmp_end + 1, range_end); - } else { - live_bits += end_bit - beg_bit; // No + 1 here; end_bit is not counted. - return bits_to_words(live_bits); - } - } - return bits_to_words(live_bits); -} - size_t ParMarkBitMap::live_words_in_range(HeapWord* beg_addr, oop end_obj) const { assert(beg_addr <= (HeapWord*)end_obj, "bad range"); @@ -244,13 +218,6 @@ return complete; } -#ifndef PRODUCT -void ParMarkBitMap::reset_counters() -{ - _cas_tries = _cas_retries = _cas_by_another = 0; -} -#endif // #ifndef PRODUCT - #ifdef ASSERT void ParMarkBitMap::verify_clear() const { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,11 +26,11 @@ #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARMARKBITMAP_HPP #include "memory/memRegion.hpp" -#include "gc_implementation/parallelScavenge/psVirtualspace.hpp" -#include "utilities/bitMap.inline.hpp" +#include "oops/oop.hpp" +#include "utilities/bitMap.hpp" -class oopDesc; class ParMarkBitMapClosure; +class PSVirtualSpace; class ParMarkBitMap: public CHeapObj { @@ -41,13 +41,11 @@ enum IterationStatus { incomplete, complete, full, would_overflow }; inline ParMarkBitMap(); - inline ParMarkBitMap(MemRegion covered_region); bool initialize(MemRegion covered_region); // Atomically mark an object as live. bool mark_obj(HeapWord* addr, size_t size); inline bool mark_obj(oop obj, int size); - inline bool mark_obj(oop obj); // Return whether the specified begin or end bit is set. inline bool is_obj_beg(idx_t bit) const; @@ -77,11 +75,6 @@ // Return the size in words of the object (a search is done for the end bit). inline size_t obj_size(idx_t beg_bit) const; inline size_t obj_size(HeapWord* addr) const; - inline size_t obj_size(oop obj) const; - - // Synonyms for the above. - size_t obj_size_in_words(oop obj) const { return obj_size((HeapWord*)obj); } - size_t obj_size_in_words(HeapWord* addr) const { return obj_size(addr); } // Apply live_closure to each live object that lies completely within the // range [live_range_beg, live_range_end). This is used to iterate over the @@ -124,15 +117,12 @@ HeapWord* range_end, HeapWord* dead_range_end) const; - // Return the number of live words in the range [beg_addr, end_addr) due to + // Return the number of live words in the range [beg_addr, end_obj) due to // objects that start in the range. If a live object extends onto the range, // the caller must detect and account for any live words due to that object. // If a live object extends beyond the end of the range, only the words within - // the range are included in the result. - size_t live_words_in_range(HeapWord* beg_addr, HeapWord* end_addr) const; - - // Same as the above, except the end of the range must be a live object, which - // is the case when updating pointers. This allows a branch to be removed + // the range are included in the result. The end of the range must be a live object, + // which is the case when updating pointers. This allows a branch to be removed // from inside the loop. size_t live_words_in_range(HeapWord* beg_addr, oop end_obj) const; @@ -141,6 +131,8 @@ inline size_t region_size() const; inline size_t size() const; + size_t reserved_byte_size() const { return _reserved_byte_size; } + // Convert a heap address to/from a bit index. inline idx_t addr_to_bit(HeapWord* addr) const; inline HeapWord* bit_to_addr(idx_t bit) const; @@ -156,22 +148,11 @@ // Clear a range of bits or the entire bitmap (both begin and end bits are // cleared). inline void clear_range(idx_t beg, idx_t end); - inline void clear() { clear_range(0, size()); } // Return the number of bits required to represent the specified number of // HeapWords, or the specified region. static inline idx_t bits_required(size_t words); static inline idx_t bits_required(MemRegion covered_region); - static inline idx_t words_required(MemRegion covered_region); - -#ifndef PRODUCT - // CAS statistics. - size_t cas_tries() { return _cas_tries; } - size_t cas_retries() { return _cas_retries; } - size_t cas_by_another() { return _cas_by_another; } - - void reset_counters(); -#endif // #ifndef PRODUCT void print_on_error(outputStream* st) const { st->print_cr("Marking Bits: (ParMarkBitMap*) " PTR_FORMAT, this); @@ -197,28 +178,12 @@ BitMap _beg_bits; BitMap _end_bits; PSVirtualSpace* _virtual_space; - -#ifndef PRODUCT - size_t _cas_tries; - size_t _cas_retries; - size_t _cas_by_another; -#endif // #ifndef PRODUCT + size_t _reserved_byte_size; }; inline ParMarkBitMap::ParMarkBitMap(): - _beg_bits(), - _end_bits() -{ - _region_start = 0; - _virtual_space = 0; -} - -inline ParMarkBitMap::ParMarkBitMap(MemRegion covered_region): - _beg_bits(), - _end_bits() -{ - initialize(covered_region); -} + _beg_bits(), _end_bits(), _region_start(NULL), _region_size(0), _virtual_space(NULL), _reserved_byte_size(0) +{ } inline void ParMarkBitMap::clear_range(idx_t beg, idx_t end) { @@ -240,12 +205,6 @@ return bits_required(covered_region.word_size()); } -inline ParMarkBitMap::idx_t -ParMarkBitMap::words_required(MemRegion covered_region) -{ - return bits_required(covered_region) / BitsPerWord; -} - inline HeapWord* ParMarkBitMap::region_start() const { @@ -350,11 +309,6 @@ return obj_size(addr_to_bit(addr)); } -inline size_t ParMarkBitMap::obj_size(oop obj) const -{ - return obj_size((HeapWord*)obj); -} - inline ParMarkBitMap::IterationStatus ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure, HeapWord* range_beg, @@ -435,8 +389,10 @@ inline void ParMarkBitMap::verify_addr(HeapWord* addr) const { // Allow one past the last valid address; useful for loop bounds. - assert(addr >= region_start(), "addr too small"); - assert(addr <= region_start() + region_size(), "addr too big"); + assert(addr >= region_start(), + err_msg("addr too small, addr: " PTR_FORMAT " region start: " PTR_FORMAT, addr, region_start())); + assert(addr <= region_end(), + err_msg("addr too big, addr: " PTR_FORMAT " region end: " PTR_FORMAT, addr, region_end())); } #endif // #ifdef ASSERT diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.inline.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.inline.hpp Tue Jun 18 14:23:29 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARMARKBITMAP_INLINE_HPP -#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARMARKBITMAP_INLINE_HPP - -#include "oops/oop.hpp" - -inline bool -ParMarkBitMap::mark_obj(oop obj) -{ - return mark_obj(obj, obj->size()); -} - -#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARMARKBITMAP_INLINE_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,6 +35,8 @@ #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" #include "gc_implementation/parallelScavenge/vmPSOperations.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcWhen.hpp" #include "memory/gcLocker.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" @@ -642,6 +644,29 @@ ensure_parsability(false); // no need to retire TLABs for verification } +PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() { + PSOldGen* old = old_gen(); + HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr(); + VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end()); + SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes()); + + PSYoungGen* young = young_gen(); + VirtualSpaceSummary young_summary(young->reserved().start(), + (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end()); + + MutableSpace* eden = young_gen()->eden_space(); + SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes()); + + MutableSpace* from = young_gen()->from_space(); + SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes()); + + MutableSpace* to = young_gen()->to_space(); + SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes()); + + VirtualSpaceSummary heap_summary = create_heap_space_summary(); + return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space); +} + void ParallelScavengeHeap::print_on(outputStream* st) const { young_gen()->print_on(st); old_gen()->print_on(st); @@ -706,6 +731,12 @@ } } +void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) { + const PSHeapSummary& heap_summary = create_ps_heap_summary(); + const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); + gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary); +} + ParallelScavengeHeap* ParallelScavengeHeap::heap() { assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap"); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,14 +30,18 @@ #include "gc_implementation/parallelScavenge/psOldGen.hpp" #include "gc_implementation/parallelScavenge/psYoungGen.hpp" #include "gc_implementation/shared/gcPolicyCounters.hpp" +#include "gc_implementation/shared/gcWhen.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "utilities/ostream.hpp" class AdjoiningGenerations; +class CollectorPolicy; +class GCHeapSummary; class GCTaskManager; -class PSAdaptiveSizePolicy; class GenerationSizer; class CollectorPolicy; +class PSAdaptiveSizePolicy; +class PSHeapSummary; class ParallelScavengeHeap : public CollectedHeap { friend class VMStructs; @@ -65,6 +69,8 @@ static GCTaskManager* _gc_task_manager; // The task manager. + void trace_heap(GCWhen::Type when, GCTracer* tracer); + protected: static inline size_t total_invocations(); HeapWord* allocate_new_tlab(size_t size); @@ -116,7 +122,7 @@ // The alignment used for eden and survivors within the young gen // and for boundary between young gen and old gen. - size_t intra_heap_alignment() const { return 64 * K; } + size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; } size_t capacity() const; size_t used() const; @@ -219,6 +225,7 @@ jlong millis_since_last_gc(); void prepare_for_verify(); + PSHeapSummary create_ps_heap_summary(); virtual void print_on(outputStream* st) const; virtual void print_on_error(outputStream* st) const; virtual void print_gc_threads_on(outputStream* st) const; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,8 @@ #include "code/codeCache.hpp" #include "gc_implementation/parallelScavenge/pcTasks.hpp" #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_interface/collectedHeap.hpp" #include "memory/universe.hpp" #include "oops/objArrayKlass.inline.hpp" @@ -48,8 +50,8 @@ ResourceMark rm; - NOT_PRODUCT(TraceTime tm("ThreadRootsMarkingTask", - PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); + NOT_PRODUCT(GCTraceTime tm("ThreadRootsMarkingTask", + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -77,8 +79,8 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) { assert(Universe::heap()->is_gc_active(), "called outside gc"); - NOT_PRODUCT(TraceTime tm("MarkFromRootsTask", - PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); + NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask", + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); @@ -120,6 +122,9 @@ case system_dictionary: SystemDictionary::always_strong_oops_do(&mark_and_push_closure); + break; + + case class_loader_data: ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, &follow_klass_closure, true); break; @@ -145,8 +150,8 @@ { assert(Universe::heap()->is_gc_active(), "called outside gc"); - NOT_PRODUCT(TraceTime tm("RefProcTask", - PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); + NOT_PRODUCT(GCTraceTime tm("RefProcTask", + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); @@ -201,8 +206,8 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) { assert(Universe::heap()->is_gc_active(), "called outside gc"); - NOT_PRODUCT(TraceTime tm("StealMarkingTask", - PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); + NOT_PRODUCT(GCTraceTime tm("StealMarkingTask", + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -213,7 +218,7 @@ int random_seed = 17; do { while (ParCompactionManager::steal_objarray(which, &random_seed, task)) { - ObjArrayKlass* const k = (ObjArrayKlass*)task.obj()->klass(); + ObjArrayKlass* k = (ObjArrayKlass*)task.obj()->klass(); k->oop_follow_contents(cm, task.obj(), task.index()); cm->follow_marking_stacks(); } @@ -234,8 +239,8 @@ void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) { assert(Universe::heap()->is_gc_active(), "called outside gc"); - NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask", - PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); + NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask", + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -301,8 +306,8 @@ void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) { - NOT_PRODUCT(TraceTime tm("UpdateDensePrefixTask", - PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); + NOT_PRODUCT(GCTraceTime tm("UpdateDensePrefixTask", + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -316,8 +321,8 @@ void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) { assert(Universe::heap()->is_gc_active(), "called outside gc"); - NOT_PRODUCT(TraceTime tm("DrainStacksCompactionTask", - PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); + NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask", + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -98,7 +98,8 @@ management = 6, jvmti = 7, system_dictionary = 8, - code_cache = 9 + class_loader_data = 9, + code_cache = 10 }; private: RootType _root_type; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -194,22 +194,239 @@ // If this is not a full GC, only test and modify the young generation. -void PSAdaptiveSizePolicy::compute_generation_free_space( +void PSAdaptiveSizePolicy::compute_generations_free_space( size_t young_live, size_t eden_live, size_t old_live, size_t cur_eden, size_t max_old_gen_size, size_t max_eden_size, - bool is_full_gc, - GCCause::Cause gc_cause, - CollectorPolicy* collector_policy) { + bool is_full_gc) { + compute_eden_space_size(young_live, + eden_live, + cur_eden, + max_eden_size, + is_full_gc); + + compute_old_gen_free_space(old_live, + cur_eden, + max_old_gen_size, + is_full_gc); +} + +void PSAdaptiveSizePolicy::compute_eden_space_size( + size_t young_live, + size_t eden_live, + size_t cur_eden, + size_t max_eden_size, + bool is_full_gc) { // Update statistics // Time statistics are updated as we go, update footprint stats here _avg_base_footprint->sample(BaseFootPrintEstimate); avg_young_live()->sample(young_live); avg_eden_live()->sample(eden_live); + + // This code used to return if the policy was not ready , i.e., + // policy_is_ready() returning false. The intent was that + // decisions below needed major collection times and so could + // not be made before two major collections. A consequence was + // adjustments to the young generation were not done until after + // two major collections even if the minor collections times + // exceeded the requested goals. Now let the young generation + // adjust for the minor collection times. Major collection times + // will be zero for the first collection and will naturally be + // ignored. Tenured generation adjustments are only made at the + // full collections so until the second major collection has + // been reached, no tenured generation adjustments will be made. + + // Until we know better, desired promotion size uses the last calculation + size_t desired_promo_size = _promo_size; + + // Start eden at the current value. The desired value that is stored + // in _eden_size is not bounded by constraints of the heap and can + // run away. + // + // As expected setting desired_eden_size to the current + // value of desired_eden_size as a starting point + // caused desired_eden_size to grow way too large and caused + // an overflow down stream. It may have improved performance in + // some case but is dangerous. + size_t desired_eden_size = cur_eden; + + // Cache some values. There's a bit of work getting these, so + // we might save a little time. + const double major_cost = major_gc_cost(); + const double minor_cost = minor_gc_cost(); + + // This method sets the desired eden size. That plus the + // desired survivor space sizes sets the desired young generation + // size. This methods does not know what the desired survivor + // size is but expects that other policy will attempt to make + // the survivor sizes compatible with the live data in the + // young generation. This limit is an estimate of the space left + // in the young generation after the survivor spaces have been + // subtracted out. + size_t eden_limit = max_eden_size; + + const double gc_cost_limit = GCTimeLimit/100.0; + + // Which way should we go? + // if pause requirement is not met + // adjust size of any generation with average paus exceeding + // the pause limit. Adjust one pause at a time (the larger) + // and only make adjustments for the major pause at full collections. + // else if throughput requirement not met + // adjust the size of the generation with larger gc time. Only + // adjust one generation at a time. + // else + // adjust down the total heap size. Adjust down the larger of the + // generations. + + // Add some checks for a threshold for a change. For example, + // a change less than the necessary alignment is probably not worth + // attempting. + + + if ((_avg_minor_pause->padded_average() > gc_pause_goal_sec()) || + (_avg_major_pause->padded_average() > gc_pause_goal_sec())) { + // + // Check pauses + // + // Make changes only to affect one of the pauses (the larger) + // at a time. + adjust_eden_for_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size); + + } else if (_avg_minor_pause->padded_average() > gc_minor_pause_goal_sec()) { + // Adjust only for the minor pause time goal + adjust_eden_for_minor_pause_time(is_full_gc, &desired_eden_size); + + } else if(adjusted_mutator_cost() < _throughput_goal) { + // This branch used to require that (mutator_cost() > 0.0 in 1.4.2. + // This sometimes resulted in skipping to the minimize footprint + // code. Change this to try and reduce GC time if mutator time is + // negative for whatever reason. Or for future consideration, + // bail out of the code if mutator time is negative. + // + // Throughput + // + assert(major_cost >= 0.0, "major cost is < 0.0"); + assert(minor_cost >= 0.0, "minor cost is < 0.0"); + // Try to reduce the GC times. + adjust_eden_for_throughput(is_full_gc, &desired_eden_size); + + } else { + + // Be conservative about reducing the footprint. + // Do a minimum number of major collections first. + // Have reasonable averages for major and minor collections costs. + if (UseAdaptiveSizePolicyFootprintGoal && + young_gen_policy_is_ready() && + avg_major_gc_cost()->average() >= 0.0 && + avg_minor_gc_cost()->average() >= 0.0) { + size_t desired_sum = desired_eden_size + desired_promo_size; + desired_eden_size = adjust_eden_for_footprint(desired_eden_size, desired_sum); + } + } + + // Note we make the same tests as in the code block below; the code + // seems a little easier to read with the printing in another block. + if (PrintAdaptiveSizePolicy) { + if (desired_eden_size > eden_limit) { + gclog_or_tty->print_cr( + "PSAdaptiveSizePolicy::compute_eden_space_size limits:" + " desired_eden_size: " SIZE_FORMAT + " old_eden_size: " SIZE_FORMAT + " eden_limit: " SIZE_FORMAT + " cur_eden: " SIZE_FORMAT + " max_eden_size: " SIZE_FORMAT + " avg_young_live: " SIZE_FORMAT, + desired_eden_size, _eden_size, eden_limit, cur_eden, + max_eden_size, (size_t)avg_young_live()->average()); + } + if (gc_cost() > gc_cost_limit) { + gclog_or_tty->print_cr( + "PSAdaptiveSizePolicy::compute_eden_space_size: gc time limit" + " gc_cost: %f " + " GCTimeLimit: %d", + gc_cost(), GCTimeLimit); + } + } + + // Align everything and make a final limit check + const size_t alignment = _intra_generation_alignment; + desired_eden_size = align_size_up(desired_eden_size, alignment); + desired_eden_size = MAX2(desired_eden_size, alignment); + + eden_limit = align_size_down(eden_limit, alignment); + + // And one last limit check, now that we've aligned things. + if (desired_eden_size > eden_limit) { + // If the policy says to get a larger eden but + // is hitting the limit, don't decrease eden. + // This can lead to a general drifting down of the + // eden size. Let the tenuring calculation push more + // into the old gen. + desired_eden_size = MAX2(eden_limit, cur_eden); + } + + if (PrintAdaptiveSizePolicy) { + // Timing stats + gclog_or_tty->print( + "PSAdaptiveSizePolicy::compute_eden_space_size: costs" + " minor_time: %f" + " major_cost: %f" + " mutator_cost: %f" + " throughput_goal: %f", + minor_gc_cost(), major_gc_cost(), mutator_cost(), + _throughput_goal); + + // We give more details if Verbose is set + if (Verbose) { + gclog_or_tty->print( " minor_pause: %f" + " major_pause: %f" + " minor_interval: %f" + " major_interval: %f" + " pause_goal: %f", + _avg_minor_pause->padded_average(), + _avg_major_pause->padded_average(), + _avg_minor_interval->average(), + _avg_major_interval->average(), + gc_pause_goal_sec()); + } + + // Footprint stats + gclog_or_tty->print( " live_space: " SIZE_FORMAT + " free_space: " SIZE_FORMAT, + live_space(), free_space()); + // More detail + if (Verbose) { + gclog_or_tty->print( " base_footprint: " SIZE_FORMAT + " avg_young_live: " SIZE_FORMAT + " avg_old_live: " SIZE_FORMAT, + (size_t)_avg_base_footprint->average(), + (size_t)avg_young_live()->average(), + (size_t)avg_old_live()->average()); + } + + // And finally, our old and new sizes. + gclog_or_tty->print(" old_eden_size: " SIZE_FORMAT + " desired_eden_size: " SIZE_FORMAT, + _eden_size, desired_eden_size); + gclog_or_tty->cr(); + } + + set_eden_size(desired_eden_size); +} + +void PSAdaptiveSizePolicy::compute_old_gen_free_space( + size_t old_live, + size_t cur_eden, + size_t max_old_gen_size, + bool is_full_gc) { + + // Update statistics + // Time statistics are updated as we go, update footprint stats here if (is_full_gc) { // old_live is only accurate after a full gc avg_old_live()->sample(old_live); @@ -242,32 +459,14 @@ // some case but is dangerous. size_t desired_eden_size = cur_eden; -#ifdef ASSERT - size_t original_promo_size = desired_promo_size; - size_t original_eden_size = desired_eden_size; -#endif - // Cache some values. There's a bit of work getting these, so // we might save a little time. const double major_cost = major_gc_cost(); const double minor_cost = minor_gc_cost(); - // Used for diagnostics - clear_generation_free_space_flags(); - // Limits on our growth size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average()); - // This method sets the desired eden size. That plus the - // desired survivor space sizes sets the desired young generation - // size. This methods does not know what the desired survivor - // size is but expects that other policy will attempt to make - // the survivor sizes compatible with the live data in the - // young generation. This limit is an estimate of the space left - // in the young generation after the survivor spaces have been - // subtracted out. - size_t eden_limit = max_eden_size; - // But don't force a promo size below the current promo size. Otherwise, // the promo size will shrink for no good reason. promo_limit = MAX2(promo_limit, _promo_size); @@ -290,7 +489,6 @@ // a change less than the necessary alignment is probably not worth // attempting. - if ((_avg_minor_pause->padded_average() > gc_pause_goal_sec()) || (_avg_major_pause->padded_average() > gc_pause_goal_sec())) { // @@ -298,12 +496,13 @@ // // Make changes only to affect one of the pauses (the larger) // at a time. - adjust_for_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size); - + if (is_full_gc) { + set_decide_at_full_gc(decide_at_full_gc_true); + adjust_promo_for_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size); + } } else if (_avg_minor_pause->padded_average() > gc_minor_pause_goal_sec()) { // Adjust only for the minor pause time goal - adjust_for_minor_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size); - + adjust_promo_for_minor_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size); } else if(adjusted_mutator_cost() < _throughput_goal) { // This branch used to require that (mutator_cost() > 0.0 in 1.4.2. // This sometimes resulted in skipping to the minimize footprint @@ -316,8 +515,10 @@ assert(major_cost >= 0.0, "major cost is < 0.0"); assert(minor_cost >= 0.0, "minor cost is < 0.0"); // Try to reduce the GC times. - adjust_for_throughput(is_full_gc, &desired_promo_size, &desired_eden_size); - + if (is_full_gc) { + set_decide_at_full_gc(decide_at_full_gc_true); + adjust_promo_for_throughput(is_full_gc, &desired_promo_size); + } } else { // Be conservative about reducing the footprint. @@ -327,13 +528,10 @@ young_gen_policy_is_ready() && avg_major_gc_cost()->average() >= 0.0 && avg_minor_gc_cost()->average() >= 0.0) { - size_t desired_sum = desired_eden_size + desired_promo_size; - desired_eden_size = adjust_eden_for_footprint(desired_eden_size, - desired_sum); if (is_full_gc) { set_decide_at_full_gc(decide_at_full_gc_true); - desired_promo_size = adjust_promo_for_footprint(desired_promo_size, - desired_sum); + size_t desired_sum = desired_eden_size + desired_promo_size; + desired_promo_size = adjust_promo_for_footprint(desired_promo_size, desired_sum); } } } @@ -345,7 +543,7 @@ // "free_in_old_gen" was the original value for used for promo_limit size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average()); gclog_or_tty->print_cr( - "PSAdaptiveSizePolicy::compute_generation_free_space limits:" + "PSAdaptiveSizePolicy::compute_old_gen_free_space limits:" " desired_promo_size: " SIZE_FORMAT " promo_limit: " SIZE_FORMAT " free_in_old_gen: " SIZE_FORMAT @@ -354,21 +552,9 @@ desired_promo_size, promo_limit, free_in_old_gen, max_old_gen_size, (size_t) avg_old_live()->average()); } - if (desired_eden_size > eden_limit) { - gclog_or_tty->print_cr( - "AdaptiveSizePolicy::compute_generation_free_space limits:" - " desired_eden_size: " SIZE_FORMAT - " old_eden_size: " SIZE_FORMAT - " eden_limit: " SIZE_FORMAT - " cur_eden: " SIZE_FORMAT - " max_eden_size: " SIZE_FORMAT - " avg_young_live: " SIZE_FORMAT, - desired_eden_size, _eden_size, eden_limit, cur_eden, - max_eden_size, (size_t)avg_young_live()->average()); - } if (gc_cost() > gc_cost_limit) { gclog_or_tty->print_cr( - "AdaptiveSizePolicy::compute_generation_free_space: gc time limit" + "PSAdaptiveSizePolicy::compute_old_gen_free_space: gc time limit" " gc_cost: %f " " GCTimeLimit: %d", gc_cost(), GCTimeLimit); @@ -377,46 +563,18 @@ // Align everything and make a final limit check const size_t alignment = _intra_generation_alignment; - desired_eden_size = align_size_up(desired_eden_size, alignment); - desired_eden_size = MAX2(desired_eden_size, alignment); desired_promo_size = align_size_up(desired_promo_size, alignment); desired_promo_size = MAX2(desired_promo_size, alignment); - eden_limit = align_size_down(eden_limit, alignment); promo_limit = align_size_down(promo_limit, alignment); - // Is too much time being spent in GC? - // Is the heap trying to grow beyond it's limits? - - const size_t free_in_old_gen = - (size_t)(max_old_gen_size - avg_old_live()->average()); - if (desired_promo_size > free_in_old_gen && desired_eden_size > eden_limit) { - check_gc_overhead_limit(young_live, - eden_live, - max_old_gen_size, - max_eden_size, - is_full_gc, - gc_cause, - collector_policy); - } - - // And one last limit check, now that we've aligned things. - if (desired_eden_size > eden_limit) { - // If the policy says to get a larger eden but - // is hitting the limit, don't decrease eden. - // This can lead to a general drifting down of the - // eden size. Let the tenuring calculation push more - // into the old gen. - desired_eden_size = MAX2(eden_limit, cur_eden); - } desired_promo_size = MIN2(desired_promo_size, promo_limit); - if (PrintAdaptiveSizePolicy) { // Timing stats gclog_or_tty->print( - "PSAdaptiveSizePolicy::compute_generation_free_space: costs" + "PSAdaptiveSizePolicy::compute_old_gen_free_space: costs" " minor_time: %f" " major_cost: %f" " mutator_cost: %f" @@ -454,19 +612,13 @@ // And finally, our old and new sizes. gclog_or_tty->print(" old_promo_size: " SIZE_FORMAT - " old_eden_size: " SIZE_FORMAT - " desired_promo_size: " SIZE_FORMAT - " desired_eden_size: " SIZE_FORMAT, - _promo_size, _eden_size, - desired_promo_size, desired_eden_size); + " desired_promo_size: " SIZE_FORMAT, + _promo_size, desired_promo_size); gclog_or_tty->cr(); } - decay_supplemental_growth(is_full_gc); - set_promo_size(desired_promo_size); - set_eden_size(desired_eden_size); -}; +} void PSAdaptiveSizePolicy::decay_supplemental_growth(bool is_full_gc) { // Decay the supplemental increment? Decay the supplement growth @@ -490,9 +642,39 @@ } } -void PSAdaptiveSizePolicy::adjust_for_minor_pause_time(bool is_full_gc, +void PSAdaptiveSizePolicy::adjust_promo_for_minor_pause_time(bool is_full_gc, size_t* desired_promo_size_ptr, size_t* desired_eden_size_ptr) { + if (PSAdjustTenuredGenForMinorPause) { + if (is_full_gc) { + set_decide_at_full_gc(decide_at_full_gc_true); + } + // If the desired eden size is as small as it will get, + // try to adjust the old gen size. + if (*desired_eden_size_ptr <= _intra_generation_alignment) { + // Vary the old gen size to reduce the young gen pause. This + // may not be a good idea. This is just a test. + if (minor_pause_old_estimator()->decrement_will_decrease()) { + set_change_old_gen_for_min_pauses(decrease_old_gen_for_min_pauses_true); + *desired_promo_size_ptr = + _promo_size - promo_decrement_aligned_down(*desired_promo_size_ptr); + } else { + set_change_old_gen_for_min_pauses(increase_old_gen_for_min_pauses_true); + size_t promo_heap_delta = + promo_increment_with_supplement_aligned_up(*desired_promo_size_ptr); + if ((*desired_promo_size_ptr + promo_heap_delta) > + *desired_promo_size_ptr) { + *desired_promo_size_ptr = + _promo_size + promo_heap_delta; + } + } + } + } +} + +void PSAdaptiveSizePolicy::adjust_eden_for_minor_pause_time(bool is_full_gc, + size_t* desired_eden_size_ptr) { + // Adjust the young generation size to reduce pause time of // of collections. // @@ -512,49 +694,19 @@ set_change_young_gen_for_min_pauses( increase_young_gen_for_min_pauses_true); } - if (PSAdjustTenuredGenForMinorPause) { - // If the desired eden size is as small as it will get, - // try to adjust the old gen size. - if (*desired_eden_size_ptr <= _intra_generation_alignment) { - // Vary the old gen size to reduce the young gen pause. This - // may not be a good idea. This is just a test. - if (minor_pause_old_estimator()->decrement_will_decrease()) { - set_change_old_gen_for_min_pauses( - decrease_old_gen_for_min_pauses_true); - *desired_promo_size_ptr = - _promo_size - promo_decrement_aligned_down(*desired_promo_size_ptr); - } else { - set_change_old_gen_for_min_pauses( - increase_old_gen_for_min_pauses_true); - size_t promo_heap_delta = - promo_increment_with_supplement_aligned_up(*desired_promo_size_ptr); - if ((*desired_promo_size_ptr + promo_heap_delta) > - *desired_promo_size_ptr) { - *desired_promo_size_ptr = - _promo_size + promo_heap_delta; - } - } - } - } } -void PSAdaptiveSizePolicy::adjust_for_pause_time(bool is_full_gc, +void PSAdaptiveSizePolicy::adjust_promo_for_pause_time(bool is_full_gc, size_t* desired_promo_size_ptr, size_t* desired_eden_size_ptr) { size_t promo_heap_delta = 0; - size_t eden_heap_delta = 0; - // Add some checks for a threshhold for a change. For example, + // Add some checks for a threshold for a change. For example, // a change less than the required alignment is probably not worth // attempting. - if (is_full_gc) { - set_decide_at_full_gc(decide_at_full_gc_true); - } if (_avg_minor_pause->padded_average() > _avg_major_pause->padded_average()) { - adjust_for_minor_pause_time(is_full_gc, - desired_promo_size_ptr, - desired_eden_size_ptr); + adjust_promo_for_minor_pause_time(is_full_gc, desired_promo_size_ptr, desired_eden_size_ptr); // major pause adjustments } else if (is_full_gc) { // Adjust for the major pause time only at full gc's because the @@ -573,6 +725,33 @@ // promo_increment_aligned_up(*desired_promo_size_ptr); set_change_old_gen_for_maj_pauses(increase_old_gen_for_maj_pauses_true); } + } + + if (PrintAdaptiveSizePolicy && Verbose) { + gclog_or_tty->print_cr( + "PSAdaptiveSizePolicy::adjust_promo_for_pause_time " + "adjusting gen sizes for major pause (avg %f goal %f). " + "desired_promo_size " SIZE_FORMAT " promo delta " SIZE_FORMAT, + _avg_major_pause->average(), gc_pause_goal_sec(), + *desired_promo_size_ptr, promo_heap_delta); + } +} + +void PSAdaptiveSizePolicy::adjust_eden_for_pause_time(bool is_full_gc, + size_t* desired_promo_size_ptr, + size_t* desired_eden_size_ptr) { + + size_t eden_heap_delta = 0; + // Add some checks for a threshold for a change. For example, + // a change less than the required alignment is probably not worth + // attempting. + if (_avg_minor_pause->padded_average() > _avg_major_pause->padded_average()) { + adjust_eden_for_minor_pause_time(is_full_gc, + desired_eden_size_ptr); + // major pause adjustments + } else if (is_full_gc) { + // Adjust for the major pause time only at full gc's because the + // affects of a change can only be seen at full gc's. if (PSAdjustYoungGenForMajorPause) { // If the promo size is at the minimum (i.e., the old gen // size will not actually decrease), consider changing the @@ -607,43 +786,35 @@ if (PrintAdaptiveSizePolicy && Verbose) { gclog_or_tty->print_cr( - "AdaptiveSizePolicy::compute_generation_free_space " + "PSAdaptiveSizePolicy::adjust_eden_for_pause_time " "adjusting gen sizes for major pause (avg %f goal %f). " - "desired_promo_size " SIZE_FORMAT "desired_eden_size " - SIZE_FORMAT - " promo delta " SIZE_FORMAT " eden delta " SIZE_FORMAT, + "desired_eden_size " SIZE_FORMAT " eden delta " SIZE_FORMAT, _avg_major_pause->average(), gc_pause_goal_sec(), - *desired_promo_size_ptr, *desired_eden_size_ptr, - promo_heap_delta, eden_heap_delta); + *desired_eden_size_ptr, eden_heap_delta); } } -void PSAdaptiveSizePolicy::adjust_for_throughput(bool is_full_gc, - size_t* desired_promo_size_ptr, - size_t* desired_eden_size_ptr) { +void PSAdaptiveSizePolicy::adjust_promo_for_throughput(bool is_full_gc, + size_t* desired_promo_size_ptr) { - // Add some checks for a threshhold for a change. For example, + // Add some checks for a threshold for a change. For example, // a change less than the required alignment is probably not worth // attempting. - if (is_full_gc) { - set_decide_at_full_gc(decide_at_full_gc_true); - } if ((gc_cost() + mutator_cost()) == 0.0) { return; } if (PrintAdaptiveSizePolicy && Verbose) { - gclog_or_tty->print("\nPSAdaptiveSizePolicy::adjust_for_throughput(" - "is_full: %d, promo: " SIZE_FORMAT ", cur_eden: " SIZE_FORMAT "): ", - is_full_gc, *desired_promo_size_ptr, *desired_eden_size_ptr); + gclog_or_tty->print("\nPSAdaptiveSizePolicy::adjust_promo_for_throughput(" + "is_full: %d, promo: " SIZE_FORMAT "): ", + is_full_gc, *desired_promo_size_ptr); gclog_or_tty->print_cr("mutator_cost %f major_gc_cost %f " "minor_gc_cost %f", mutator_cost(), major_gc_cost(), minor_gc_cost()); } // Tenured generation if (is_full_gc) { - // Calculate the change to use for the tenured gen. size_t scaled_promo_heap_delta = 0; // Can the increment to the generation be scaled? @@ -720,6 +891,26 @@ *desired_promo_size_ptr, scaled_promo_heap_delta); } } +} + +void PSAdaptiveSizePolicy::adjust_eden_for_throughput(bool is_full_gc, + size_t* desired_eden_size_ptr) { + + // Add some checks for a threshold for a change. For example, + // a change less than the required alignment is probably not worth + // attempting. + + if ((gc_cost() + mutator_cost()) == 0.0) { + return; + } + + if (PrintAdaptiveSizePolicy && Verbose) { + gclog_or_tty->print("\nPSAdaptiveSizePolicy::adjust_eden_for_throughput(" + "is_full: %d, cur_eden: " SIZE_FORMAT "): ", + is_full_gc, *desired_eden_size_ptr); + gclog_or_tty->print_cr("mutator_cost %f major_gc_cost %f " + "minor_gc_cost %f", mutator_cost(), major_gc_cost(), minor_gc_cost()); + } // Young generation size_t scaled_eden_heap_delta = 0; @@ -810,7 +1001,7 @@ if (PrintAdaptiveSizePolicy && Verbose) { gclog_or_tty->print_cr( - "AdaptiveSizePolicy::compute_generation_free_space " + "AdaptiveSizePolicy::adjust_promo_for_footprint " "adjusting tenured gen for footprint. " "starting promo size " SIZE_FORMAT " reduced promo size " SIZE_FORMAT, @@ -834,7 +1025,7 @@ if (PrintAdaptiveSizePolicy && Verbose) { gclog_or_tty->print_cr( - "AdaptiveSizePolicy::compute_generation_free_space " + "AdaptiveSizePolicy::adjust_eden_for_footprint " "adjusting eden for footprint. " " starting eden size " SIZE_FORMAT " reduced eden size " SIZE_FORMAT @@ -1089,7 +1280,7 @@ if (PrintAdaptiveSizePolicy) { gclog_or_tty->print( - "AdaptiveSizePolicy::compute_survivor_space_size_and_thresh:" + "AdaptiveSizePolicy::update_averages:" " survived: " SIZE_FORMAT " promoted: " SIZE_FORMAT " overflow: %s", diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -136,18 +136,24 @@ double gc_minor_pause_goal_sec() const { return _gc_minor_pause_goal_sec; } // Change the young generation size to achieve a minor GC pause time goal - void adjust_for_minor_pause_time(bool is_full_gc, + void adjust_promo_for_minor_pause_time(bool is_full_gc, size_t* desired_promo_size_ptr, size_t* desired_eden_size_ptr); + void adjust_eden_for_minor_pause_time(bool is_full_gc, + size_t* desired_eden_size_ptr); // Change the generation sizes to achieve a GC pause time goal // Returned sizes are not necessarily aligned. - void adjust_for_pause_time(bool is_full_gc, + void adjust_promo_for_pause_time(bool is_full_gc, + size_t* desired_promo_size_ptr, + size_t* desired_eden_size_ptr); + void adjust_eden_for_pause_time(bool is_full_gc, size_t* desired_promo_size_ptr, size_t* desired_eden_size_ptr); // Change the generation sizes to achieve an application throughput goal // Returned sizes are not necessarily aligned. - void adjust_for_throughput(bool is_full_gc, - size_t* desired_promo_size_ptr, + void adjust_promo_for_throughput(bool is_full_gc, + size_t* desired_promo_size_ptr); + void adjust_eden_for_throughput(bool is_full_gc, size_t* desired_eden_size_ptr); // Change the generation sizes to achieve minimum footprint // Returned sizes are not aligned. @@ -168,9 +174,6 @@ size_t promo_decrement_aligned_down(size_t cur_promo); size_t promo_increment_with_supplement_aligned_up(size_t cur_promo); - // Decay the supplemental growth additive. - void decay_supplemental_growth(bool is_full_gc); - // Returns a change that has been scaled down. Result // is not aligned. (If useful, move to some shared // location.) @@ -336,20 +339,29 @@ // perform a Full GC? bool should_full_GC(size_t live_in_old_gen); - // Calculates optimial free space sizes for both the old and young + // Calculates optimal (free) space sizes for both the young and old // generations. Stores results in _eden_size and _promo_size. // Takes current used space in all generations as input, as well // as an indication if a full gc has just been performed, for use // in deciding if an OOM error should be thrown. - void compute_generation_free_space(size_t young_live, - size_t eden_live, - size_t old_live, - size_t cur_eden, // current eden in bytes - size_t max_old_gen_size, - size_t max_eden_size, - bool is_full_gc, - GCCause::Cause gc_cause, - CollectorPolicy* collector_policy); + void compute_generations_free_space(size_t young_live, + size_t eden_live, + size_t old_live, + size_t cur_eden, // current eden in bytes + size_t max_old_gen_size, + size_t max_eden_size, + bool is_full_gc); + + void compute_eden_space_size(size_t young_live, + size_t eden_live, + size_t cur_eden, // current eden in bytes + size_t max_eden_size, + bool is_full_gc); + + void compute_old_gen_free_space(size_t old_live, + size_t cur_eden, // current eden in bytes + size_t max_old_gen_size, + bool is_full_gc); // Calculates new survivor space size; returns a new tenuring threshold // value. Stores new survivor size in _survivor_size. @@ -390,6 +402,9 @@ // Printing support virtual bool print_adaptive_size_policy_on(outputStream* st) const; + + // Decay the supplemental growth additive. + void decay_supplemental_growth(bool is_full_gc); }; #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSADAPTIVESIZEPOLICY_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -187,11 +187,8 @@ // Process ObjArrays one at a time to avoid marking stack bloat. ObjArrayTask task; - if (_objarray_stack.pop_overflow(task)) { - ObjArrayKlass* const k = (ObjArrayKlass*)task.obj()->klass(); - k->oop_follow_contents(this, task.obj(), task.index()); - } else if (_objarray_stack.pop_local(task)) { - ObjArrayKlass* const k = (ObjArrayKlass*)task.obj()->klass(); + if (_objarray_stack.pop_overflow(task) || _objarray_stack.pop_local(task)) { + ObjArrayKlass* k = (ObjArrayKlass*)task.obj()->klass(); k->oop_follow_contents(this, task.obj(), task.index()); } } while (!marking_stacks_empty()); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -119,7 +119,7 @@ ps_size_policy()->change_old_gen_for_min_pauses()); } - // compute_generation_free_space() statistics + // compute_generations_free_space() statistics inline void update_avg_major_pause() { _avg_major_pause->set_value( diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +34,10 @@ #include "gc_implementation/parallelScavenge/psOldGen.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" #include "gc_implementation/parallelScavenge/psYoungGen.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_implementation/shared/markSweep.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" @@ -92,8 +96,8 @@ const bool clear_all_soft_refs = heap->collector_policy()->should_clear_all_soft_refs(); - int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount; - IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); + uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount; + UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction); } @@ -108,8 +112,12 @@ } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); GCCause::Cause gc_cause = heap->gc_cause(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + + _gc_timer->register_gc_start(os::elapsed_counter()); + _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start()); + PSAdaptiveSizePolicy* size_policy = heap->size_policy(); // The scope of casr should end after code that can change @@ -131,6 +139,7 @@ AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); heap->print_heap_before_gc(); + heap->trace_heap_before_gc(_gc_tracer); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); @@ -138,8 +147,7 @@ if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyBeforeGC:"); - Universe::verify(); + Universe::verify(" VerifyBeforeGC:"); } // Verify object start arrays @@ -148,7 +156,7 @@ old_gen->verify_object_start_array(); } - heap->pre_full_gc_dump(); + heap->pre_full_gc_dump(_gc_timer); // Filled in below to track the state of the young gen after the collection. bool eden_empty; @@ -160,7 +168,7 @@ gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty); + GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); @@ -177,7 +185,7 @@ size_t prev_used = heap->used(); // Capture metadata size before collection for sizing. - size_t metadata_prev_used = MetaspaceAux::used_in_bytes(); + size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); // For PrintGCDetails size_t old_gen_prev_used = old_gen->used_in_bytes(); @@ -238,6 +246,7 @@ // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); BiasedLocking::restore_marks(); Threads::gc_epilogue(); @@ -277,18 +286,36 @@ young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds"); + + size_t young_live = young_gen->used_in_bytes(); + size_t eden_live = young_gen->eden_space()->used_in_bytes(); + size_t old_live = old_gen->used_in_bytes(); + size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); + size_t max_old_gen_size = old_gen->max_gen_size(); size_t max_eden_size = young_gen->max_size() - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); - size_policy->compute_generation_free_space(young_gen->used_in_bytes(), - young_gen->eden_space()->used_in_bytes(), - old_gen->used_in_bytes(), - young_gen->eden_space()->capacity_in_bytes(), - old_gen->max_gen_size(), - max_eden_size, - true /* full gc*/, - gc_cause, - heap->collector_policy()); + + // Used for diagnostics + size_policy->clear_generation_free_space_flags(); + + size_policy->compute_generations_free_space(young_live, + eden_live, + old_live, + cur_eden, + max_old_gen_size, + max_eden_size, + true /* full gc*/); + + size_policy->check_gc_overhead_limit(young_live, + eden_live, + max_old_gen_size, + max_eden_size, + true /* full gc*/, + gc_cause, + heap->collector_policy()); + + size_policy->decay_supplemental_growth(true /* full gc*/); heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); @@ -340,8 +367,7 @@ if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyAfterGC:"); - Universe::verify(); + Universe::verify(" VerifyAfterGC:"); } // Re-verify object start arrays @@ -357,13 +383,18 @@ NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); heap->print_heap_after_gc(); + heap->trace_heap_after_gc(_gc_tracer); - heap->post_full_gc_dump(); + heap->post_full_gc_dump(_gc_timer); #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif + _gc_timer->register_gc_end(os::elapsed_counter()); + + _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); + return true; } @@ -481,7 +512,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer); trace(" 1"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); @@ -514,32 +545,35 @@ // Process reference objects found during marking { ref_processor()->setup_policy(clear_all_softrefs); - ref_processor()->process_discovered_references( - is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL); + const ReferenceProcessorStats& stats = + ref_processor()->process_discovered_references( + is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer); + gc_tracer()->report_gc_reference_stats(stats); } - // Follow system dictionary roots and unload classes + // This is the point where the entire marking should have completed. + assert(_marking_stack.is_empty(), "Marking should have completed"); + + // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); - // Follow code cache roots + // Unload nmethods. CodeCache::do_unloading(is_alive_closure(), purged_class); - follow_stack(); // Flush marking stack - // Update subklass/sibling/implementor links of live klasses - Klass::clean_weak_klass_links(&is_alive); - assert(_marking_stack.is_empty(), "just drained"); + // Prune dead klasses from subklass/sibling/implementor lists. + Klass::clean_weak_klass_links(is_alive_closure()); - // Visit interned string tables and delete unmarked oops + // Delete entries for dead interned strings. StringTable::unlink(is_alive_closure()); + // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); - - assert(_marking_stack.is_empty(), "stack should be empty by now"); + _gc_tracer->report_object_count_after_gc(is_alive_closure()); } void PSMarkSweep::mark_sweep_phase2() { - TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer); trace("2"); // Now all live objects are marked, compute the new object addresses. @@ -563,14 +597,13 @@ // This should be moved to the shared markSweep code! class PSAlwaysTrueClosure: public BoolObjectClosure { public: - void do_object(oop p) { ShouldNotReachHere(); } bool do_object_b(oop p) { return true; } }; static PSAlwaysTrueClosure always_true; void PSMarkSweep::mark_sweep_phase3() { // Adjust the pointers to reflect the new locations - TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer); trace("3"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); @@ -583,28 +616,27 @@ ClassLoaderDataGraph::clear_claimed_marks(); // General strong roots. - Universe::oops_do(adjust_root_pointer_closure()); - JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles - CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure()); - Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL); - ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); - FlatProfiler::oops_do(adjust_root_pointer_closure()); - Management::oops_do(adjust_root_pointer_closure()); - JvmtiExport::oops_do(adjust_root_pointer_closure()); + Universe::oops_do(adjust_pointer_closure()); + JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles + CLDToOopClosure adjust_from_cld(adjust_pointer_closure()); + Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL); + ObjectSynchronizer::oops_do(adjust_pointer_closure()); + FlatProfiler::oops_do(adjust_pointer_closure()); + Management::oops_do(adjust_pointer_closure()); + JvmtiExport::oops_do(adjust_pointer_closure()); // SO_AllClasses - SystemDictionary::oops_do(adjust_root_pointer_closure()); - ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true); - //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure()); + SystemDictionary::oops_do(adjust_pointer_closure()); + ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles - JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); + JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); CodeCache::oops_do(adjust_pointer_closure()); - StringTable::oops_do(adjust_root_pointer_closure()); - ref_processor()->weak_oops_do(adjust_root_pointer_closure()); - PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure()); + StringTable::oops_do(adjust_pointer_closure()); + ref_processor()->weak_oops_do(adjust_pointer_closure()); + PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); adjust_marks(); @@ -614,7 +646,7 @@ void PSMarkSweep::mark_sweep_phase4() { EventMark m("4 compact heap"); - TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer); trace("4"); // All pointers are now adjusted, move objects accordingly diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -44,7 +44,6 @@ static KlassClosure* follow_klass_closure() { return &MarkSweep::follow_klass_closure; } static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; } static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; } - static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_root_pointer_closure; } static KlassClosure* adjust_klass_closure() { return &MarkSweep::adjust_klass_closure; } static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -88,8 +88,7 @@ * by the MarkSweepAlwaysCompactCount parameter. This is a significant * performance improvement! */ - bool skip_dead = (MarkSweepAlwaysCompactCount < 1) - || ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0); + bool skip_dead = ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0); size_t allowed_deadspace = 0; if (skip_dead) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,10 @@ #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" #include "gc_implementation/parallelScavenge/psYoungGen.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_interface/gcCause.hpp" #include "memory/gcLocker.inline.hpp" @@ -59,13 +63,25 @@ #include // All sizes are in HeapWords. -const size_t ParallelCompactData::Log2RegionSize = 9; // 512 words +const size_t ParallelCompactData::Log2RegionSize = 16; // 64K words const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize; const size_t ParallelCompactData::RegionSizeBytes = RegionSize << LogHeapWordSize; const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1; const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1; -const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask; +const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask; + +const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words +const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize; +const size_t ParallelCompactData::BlockSizeBytes = + BlockSize << LogHeapWordSize; +const size_t ParallelCompactData::BlockSizeOffsetMask = BlockSize - 1; +const size_t ParallelCompactData::BlockAddrOffsetMask = BlockSizeBytes - 1; +const size_t ParallelCompactData::BlockAddrMask = ~BlockAddrOffsetMask; + +const size_t ParallelCompactData::BlocksPerRegion = RegionSize / BlockSize; +const size_t ParallelCompactData::Log2BlocksPerRegion = + Log2RegionSize - Log2BlockSize; const ParallelCompactData::RegionData::region_sz_t ParallelCompactData::RegionData::dc_shift = 27; @@ -356,8 +372,13 @@ _region_start = 0; _region_vspace = 0; + _reserved_byte_size = 0; _region_data = 0; _region_count = 0; + + _block_vspace = 0; + _block_data = 0; + _block_count = 0; } bool ParallelCompactData::initialize(MemRegion covered_region) @@ -371,8 +392,7 @@ assert((region_size & RegionSizeOffsetMask) == 0, "region size not a multiple of RegionSize"); - bool result = initialize_region_data(region_size); - + bool result = initialize_region_data(region_size) && initialize_block_data(); return result; } @@ -382,11 +402,11 @@ const size_t raw_bytes = count * element_size; const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10); const size_t granularity = os::vm_allocation_granularity(); - const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity)); + _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity)); const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : MAX2(page_sz, granularity); - ReservedSpace rs(bytes, rs_align, rs_align > 0); + ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0); os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(), rs.size()); @@ -394,7 +414,7 @@ PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz); if (vspace != 0) { - if (vspace->expand_by(bytes)) { + if (vspace->expand_by(_reserved_byte_size)) { return vspace; } delete vspace; @@ -417,17 +437,36 @@ return false; } +bool ParallelCompactData::initialize_block_data() +{ + assert(_region_count != 0, "region data must be initialized first"); + const size_t count = _region_count << Log2BlocksPerRegion; + _block_vspace = create_vspace(count, sizeof(BlockData)); + if (_block_vspace != 0) { + _block_data = (BlockData*)_block_vspace->reserved_low_addr(); + _block_count = count; + return true; + } + return false; +} + void ParallelCompactData::clear() { memset(_region_data, 0, _region_vspace->committed_size()); + memset(_block_data, 0, _block_vspace->committed_size()); } void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) { assert(beg_region <= _region_count, "beg_region out of range"); assert(end_region <= _region_count, "end_region out of range"); + assert(RegionSize % BlockSize == 0, "RegionSize not a multiple of BlockSize"); const size_t region_cnt = end_region - beg_region; memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData)); + + const size_t beg_block = beg_region * BlocksPerRegion; + const size_t block_cnt = region_cnt * BlocksPerRegion; + memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData)); } HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const @@ -706,49 +745,48 @@ HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) { assert(addr != NULL, "Should detect NULL oop earlier"); - assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap"); -#ifdef ASSERT - if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) { - gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr); - } -#endif - assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked"); + assert(PSParallelCompact::gc_heap()->is_in(addr), "not in heap"); + assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked"); // Region covering the object. - size_t region_index = addr_to_region_idx(addr); - const RegionData* const region_ptr = region(region_index); - HeapWord* const region_addr = region_align_down(addr); - - assert(addr < region_addr + RegionSize, "Region does not cover object"); - assert(addr_to_region_ptr(region_addr) == region_ptr, "sanity check"); - + RegionData* const region_ptr = addr_to_region_ptr(addr); HeapWord* result = region_ptr->destination(); - // If all the data in the region is live, then the new location of the object - // can be calculated from the destination of the region plus the offset of the - // object in the region. + // If the entire Region is live, the new location is region->destination + the + // offset of the object within in the Region. + + // Run some performance tests to determine if this special case pays off. It + // is worth it for pointers into the dense prefix. If the optimization to + // avoid pointer updates in regions that only point to the dense prefix is + // ever implemented, this should be revisited. if (region_ptr->data_size() == RegionSize) { - result += pointer_delta(addr, region_addr); - DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);) + result += region_offset(addr); return result; } - // The new location of the object is - // region destination + - // size of the partial object extending onto the region + - // sizes of the live objects in the Region that are to the left of addr - const size_t partial_obj_size = region_ptr->partial_obj_size(); - HeapWord* const search_start = region_addr + partial_obj_size; + // Otherwise, the new location is region->destination + block offset + the + // number of live words in the Block that are (a) to the left of addr and (b) + // due to objects that start in the Block. + + // Fill in the block table if necessary. This is unsynchronized, so multiple + // threads may fill the block table for a region (harmless, since it is + // idempotent). + if (!region_ptr->blocks_filled()) { + PSParallelCompact::fill_blocks(addr_to_region_idx(addr)); + region_ptr->set_blocks_filled(); + } + + HeapWord* const search_start = block_align_down(addr); + const size_t block_offset = addr_to_block_ptr(addr)->offset(); const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap(); - size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr)); - - result += partial_obj_size + live_to_left; - DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);) + const size_t live = bitmap->live_words_in_range(search_start, oop(addr)); + result += block_offset + live; + DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result)); return result; } -#ifdef ASSERT +#ifdef ASSERT void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace) { const size_t* const beg = (const size_t*)vspace->committed_low_addr(); @@ -761,16 +799,12 @@ void ParallelCompactData::verify_clear() { verify_clear(_region_vspace); + verify_clear(_block_vspace); } #endif // #ifdef ASSERT -#ifdef NOT_PRODUCT -ParallelCompactData::RegionData* debug_region(size_t region_index) { - ParallelCompactData& sd = PSParallelCompact::summary_data(); - return sd.region(region_index); -} -#endif - +STWGCTimer PSParallelCompact::_gc_timer; +ParallelOldTracer PSParallelCompact::_gc_tracer; elapsedTimer PSParallelCompact::_accumulated_time; unsigned int PSParallelCompact::_total_invocations = 0; unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0; @@ -781,18 +815,16 @@ PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure; -void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); } bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); } void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } -PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true); -PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false); +PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure; PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure; -void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } -void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } +void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); } +void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); } void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); } @@ -805,7 +837,7 @@ klass->oops_do(_mark_and_push_closure); } void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) { - klass->oops_do(&PSParallelCompact::_adjust_root_pointer_closure); + klass->oops_do(&PSParallelCompact::_adjust_pointer_closure); } void PSParallelCompact::post_initialize() { @@ -842,14 +874,18 @@ initialize_dead_wood_limiter(); if (!_mark_bitmap.initialize(mr)) { - vm_shutdown_during_initialization("Unable to allocate bit map for " - "parallel garbage collection for the requested heap size."); + vm_shutdown_during_initialization( + err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel " + "garbage collection for the requested " SIZE_FORMAT "KB heap.", + _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K)); return false; } if (!_summary_data.initialize(mr)) { - vm_shutdown_during_initialization("Unable to allocate tables for " - "parallel garbage collection for the requested heap size."); + vm_shutdown_during_initialization( + err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel " + "garbage collection for the requested " SIZE_FORMAT "KB heap.", + _summary_data.reserved_byte_size()/K, mr.byte_size()/K)); return false; } @@ -892,7 +928,7 @@ _heap_used = heap->used(); _young_gen_used = heap->young_gen()->used_in_bytes(); _old_gen_used = heap->old_gen()->used_in_bytes(); - _metadata_used = MetaspaceAux::used_in_bytes(); + _metadata_used = MetaspaceAux::allocated_used_bytes(); }; size_t heap_used() const { return _heap_used; } @@ -942,14 +978,13 @@ // at each young gen gc. Do the update unconditionally (even though a // promotion failure does not swap spaces) because an unknown number of minor // collections will have swapped the spaces an unknown number of times. - TraceTime tm("pre compact", print_phases(), true, gclog_or_tty); + GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer); ParallelScavengeHeap* heap = gc_heap(); _space_info[from_space_id].set_space(heap->young_gen()->from_space()); _space_info[to_space_id].set_space(heap->young_gen()->to_space()); pre_gc_values->fill(heap); - NOT_PRODUCT(_mark_bitmap.reset_counters()); DEBUG_ONLY(add_obj_count = add_obj_size = 0;) DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;) @@ -960,6 +995,7 @@ _total_invocations++; heap->print_heap_before_gc(); + heap->trace_heap_before_gc(&_gc_tracer); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); @@ -967,8 +1003,7 @@ if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyBeforeGC:"); - Universe::verify(); + Universe::verify(" VerifyBeforeGC:"); } // Verify object start arrays @@ -986,7 +1021,7 @@ void PSParallelCompact::post_compact() { - TraceTime tm("post compact", print_phases(), true, gclog_or_tty); + GCTraceTime tm("post compact", print_phases(), true, &_gc_timer); for (unsigned int id = old_space_id; id < last_space_id; ++id) { // Clear the marking bitmap, summary data and split info. @@ -1027,6 +1062,7 @@ // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); Threads::gc_epilogue(); CodeCache::gc_epilogue(); @@ -1811,7 +1847,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm, bool maximum_compaction) { - TraceTime tm("summary phase", print_phases(), true, gclog_or_tty); + GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer); // trace("2"); #ifdef ASSERT @@ -1959,11 +1995,6 @@ maximum_heap_compaction); } -bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) { - size_t addr_region_index = addr_to_region_idx(addr); - return region_index == addr_region_index; -} - // This method contains no policy. You should probably // be calling invoke() instead. bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { @@ -1974,11 +2005,15 @@ return false; } + ParallelScavengeHeap* heap = gc_heap(); + + _gc_timer.register_gc_start(os::elapsed_counter()); + _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); + TimeStamp marking_start; TimeStamp compaction_start; TimeStamp collection_exit; - ParallelScavengeHeap* heap = gc_heap(); GCCause::Cause gc_cause = heap->gc_cause(); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); @@ -1994,7 +2029,7 @@ heap->record_gen_tops_before_GC(); } - heap->pre_full_gc_dump(); + heap->pre_full_gc_dump(&_gc_timer); _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes; @@ -2021,7 +2056,7 @@ gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty); + GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); @@ -2041,16 +2076,7 @@ bool marked_for_unloading = false; marking_start.update(); - marking_phase(vmthread_cm, maximum_heap_compaction); - -#ifndef PRODUCT - if (TraceParallelOldGCMarkingPhase) { - gclog_or_tty->print_cr("marking_phase: cas_tries %d cas_retries %d " - "cas_by_another %d", - mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(), - mark_bitmap()->cas_by_another()); - } -#endif // #ifndef PRODUCT + marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer); bool max_on_system_gc = UseMaximumCompactionOnSystemGC && gc_cause == GCCause::_java_lang_system_gc; @@ -2095,19 +2121,36 @@ young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds"); + + size_t young_live = young_gen->used_in_bytes(); + size_t eden_live = young_gen->eden_space()->used_in_bytes(); + size_t old_live = old_gen->used_in_bytes(); + size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); + size_t max_old_gen_size = old_gen->max_gen_size(); size_t max_eden_size = young_gen->max_size() - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); - size_policy->compute_generation_free_space( - young_gen->used_in_bytes(), - young_gen->eden_space()->used_in_bytes(), - old_gen->used_in_bytes(), - young_gen->eden_space()->capacity_in_bytes(), - old_gen->max_gen_size(), - max_eden_size, - true /* full gc*/, - gc_cause, - heap->collector_policy()); + + // Used for diagnostics + size_policy->clear_generation_free_space_flags(); + + size_policy->compute_generations_free_space(young_live, + eden_live, + old_live, + cur_eden, + max_old_gen_size, + max_eden_size, + true /* full gc*/); + + size_policy->check_gc_overhead_limit(young_live, + eden_live, + max_old_gen_size, + max_eden_size, + true /* full gc*/, + gc_cause, + heap->collector_policy()); + + size_policy->decay_supplemental_growth(true /* full gc*/); heap->resize_old_gen( size_policy->calculated_old_free_size_in_bytes()); @@ -2168,8 +2211,7 @@ if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyAfterGC:"); - Universe::verify(); + Universe::verify(" VerifyAfterGC:"); } // Re-verify object start arrays @@ -2187,6 +2229,8 @@ collection_exit.update(); heap->print_heap_after_gc(); + heap->trace_heap_after_gc(&_gc_tracer); + if (PrintGCTaskTimeStamps) { gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, @@ -2195,12 +2239,17 @@ gc_task_manager()->print_task_time_stamps(); } - heap->post_full_gc_dump(); + heap->post_full_gc_dump(&_gc_timer); #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif + _gc_timer.register_gc_end(os::elapsed_counter()); + + _gc_tracer.report_dense_prefix(dense_prefix(old_space_id)); + _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); + return true; } @@ -2299,9 +2348,10 @@ } void PSParallelCompact::marking_phase(ParCompactionManager* cm, - bool maximum_heap_compaction) { + bool maximum_heap_compaction, + ParallelOldTracer *gc_tracer) { // Recursively traverse all live objects and mark them - TraceTime tm("marking phase", print_phases(), true, gclog_or_tty); + GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer); ParallelScavengeHeap* heap = gc_heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); @@ -2316,7 +2366,8 @@ ClassLoaderDataGraph::clear_claimed_marks(); { - TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty); + GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer); + ParallelScavengeHeap::ParStrongRootsScope psrs; GCTaskQueue* q = GCTaskQueue::create(); @@ -2329,6 +2380,7 @@ q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler)); q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management)); q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary)); + q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data)); q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti)); q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache)); @@ -2343,35 +2395,43 @@ // Process reference objects found during marking { - TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty); + GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer); + + ReferenceProcessorStats stats; if (ref_processor()->processing_is_mt()) { RefProcTaskExecutor task_executor; - ref_processor()->process_discovered_references( + stats = ref_processor()->process_discovered_references( is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, - &task_executor); + &task_executor, &_gc_timer); } else { - ref_processor()->process_discovered_references( - is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL); + stats = ref_processor()->process_discovered_references( + is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL, + &_gc_timer); } + + gc_tracer->report_gc_reference_stats(stats); } - TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty); + GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer); + + // This is the point where the entire marking should have completed. + assert(cm->marking_stacks_empty(), "Marking should have completed"); + // Follow system dictionary roots and unload classes. bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); - // Follow code cache roots. + // Unload nmethods. CodeCache::do_unloading(is_alive_closure(), purged_class); - cm->follow_marking_stacks(); // Flush marking stack. - - // Update subklass/sibling/implementor links of live klasses + + // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(is_alive_closure()); - // Visit interned string tables and delete unmarked oops + // Delete entries for dead interned strings. StringTable::unlink(is_alive_closure()); + // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); - - assert(cm->marking_stacks_empty(), "marking stacks should be empty"); + _gc_tracer.report_object_count_after_gc(is_alive_closure()); } void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) { @@ -2398,7 +2458,7 @@ void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm, ClassLoaderData* cld) { - cld->oops_do(PSParallelCompact::adjust_root_pointer_closure(), + cld->oops_do(PSParallelCompact::adjust_pointer_closure(), PSParallelCompact::adjust_klass_closure(), true); } @@ -2406,51 +2466,49 @@ // This should be moved to the shared markSweep code! class PSAlwaysTrueClosure: public BoolObjectClosure { public: - void do_object(oop p) { ShouldNotReachHere(); } bool do_object_b(oop p) { return true; } }; static PSAlwaysTrueClosure always_true; void PSParallelCompact::adjust_roots() { // Adjust the pointers to reflect the new locations - TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty); + GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer); // Need new claim bits when tracing through and adjusting pointers. ClassLoaderDataGraph::clear_claimed_marks(); // General strong roots. - Universe::oops_do(adjust_root_pointer_closure()); - JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles - CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure()); - Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL); - ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); - FlatProfiler::oops_do(adjust_root_pointer_closure()); - Management::oops_do(adjust_root_pointer_closure()); - JvmtiExport::oops_do(adjust_root_pointer_closure()); + Universe::oops_do(adjust_pointer_closure()); + JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles + CLDToOopClosure adjust_from_cld(adjust_pointer_closure()); + Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL); + ObjectSynchronizer::oops_do(adjust_pointer_closure()); + FlatProfiler::oops_do(adjust_pointer_closure()); + Management::oops_do(adjust_pointer_closure()); + JvmtiExport::oops_do(adjust_pointer_closure()); // SO_AllClasses - SystemDictionary::oops_do(adjust_root_pointer_closure()); - ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true); + SystemDictionary::oops_do(adjust_pointer_closure()); + ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles - JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); + JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); CodeCache::oops_do(adjust_pointer_closure()); - StringTable::oops_do(adjust_root_pointer_closure()); - ref_processor()->weak_oops_do(adjust_root_pointer_closure()); + StringTable::oops_do(adjust_pointer_closure()); + ref_processor()->weak_oops_do(adjust_pointer_closure()); // Roots were visited so references into the young gen in roots // may have been scanned. Process them also. // Should the reference processor have a span that excludes // young gen objects? - PSScavenge::reference_processor()->weak_oops_do( - adjust_root_pointer_closure()); + PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); } void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, uint parallel_gc_threads) { - TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty); + GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer); // Find the threads that are active unsigned int which = 0; @@ -2524,7 +2582,7 @@ void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q, uint parallel_gc_threads) { - TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty); + GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer); ParallelCompactData& sd = PSParallelCompact::summary_data(); @@ -2606,7 +2664,7 @@ GCTaskQueue* q, ParallelTaskTerminator* terminator_ptr, uint parallel_gc_threads) { - TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty); + GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer); // Once a thread has drained it's stack, it should try to steal regions from // other threads. @@ -2617,9 +2675,44 @@ } } +#ifdef ASSERT +// Write a histogram of the number of times the block table was filled for a +// region. +void PSParallelCompact::write_block_fill_histogram(outputStream* const out) +{ + if (!TraceParallelOldGCCompactionPhase) return; + + typedef ParallelCompactData::RegionData rd_t; + ParallelCompactData& sd = summary_data(); + + for (unsigned int id = old_space_id; id < last_space_id; ++id) { + MutableSpace* const spc = _space_info[id].space(); + if (spc->bottom() != spc->top()) { + const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom()); + HeapWord* const top_aligned_up = sd.region_align_up(spc->top()); + const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up); + + size_t histo[5] = { 0, 0, 0, 0, 0 }; + const size_t histo_len = sizeof(histo) / sizeof(size_t); + const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t)); + + for (const rd_t* cur = beg; cur < end; ++cur) { + ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)]; + } + out->print("%u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt); + for (size_t i = 0; i < histo_len; ++i) { + out->print(" " SIZE_FORMAT_W(5) " %5.1f%%", + histo[i], 100.0 * histo[i] / region_cnt); + } + out->cr(); + } + } +} +#endif // #ifdef ASSERT + void PSParallelCompact::compact() { // trace("5"); - TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty); + GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); @@ -2636,7 +2729,7 @@ enqueue_region_stealing_tasks(q, &terminator, active_gc_threads); { - TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty); + GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer); gc_task_manager()->execute_and_wait(q); @@ -2650,12 +2743,14 @@ { // Update the deferred objects, if any. Any compaction manager can be used. - TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty); + GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer); ParCompactionManager* cm = ParCompactionManager::manager_array(0); for (unsigned int id = old_space_id; id < last_space_id; ++id) { update_deferred_objects(cm, SpaceId(id)); } } + + DEBUG_ONLY(write_block_fill_histogram(gclog_or_tty)); } #ifdef ASSERT @@ -3120,6 +3215,57 @@ } while (true); } +void PSParallelCompact::fill_blocks(size_t region_idx) +{ + // Fill in the block table elements for the specified region. Each block + // table element holds the number of live words in the region that are to the + // left of the first object that starts in the block. Thus only blocks in + // which an object starts need to be filled. + // + // The algorithm scans the section of the bitmap that corresponds to the + // region, keeping a running total of the live words. When an object start is + // found, if it's the first to start in the block that contains it, the + // current total is written to the block table element. + const size_t Log2BlockSize = ParallelCompactData::Log2BlockSize; + const size_t Log2RegionSize = ParallelCompactData::Log2RegionSize; + const size_t RegionSize = ParallelCompactData::RegionSize; + + ParallelCompactData& sd = summary_data(); + const size_t partial_obj_size = sd.region(region_idx)->partial_obj_size(); + if (partial_obj_size >= RegionSize) { + return; // No objects start in this region. + } + + // Ensure the first loop iteration decides that the block has changed. + size_t cur_block = sd.block_count(); + + const ParMarkBitMap* const bitmap = mark_bitmap(); + + const size_t Log2BitsPerBlock = Log2BlockSize - LogMinObjAlignment; + assert((size_t)1 << Log2BitsPerBlock == + bitmap->words_to_bits(ParallelCompactData::BlockSize), "sanity"); + + size_t beg_bit = bitmap->words_to_bits(region_idx << Log2RegionSize); + const size_t range_end = beg_bit + bitmap->words_to_bits(RegionSize); + size_t live_bits = bitmap->words_to_bits(partial_obj_size); + beg_bit = bitmap->find_obj_beg(beg_bit + live_bits, range_end); + while (beg_bit < range_end) { + const size_t new_block = beg_bit >> Log2BitsPerBlock; + if (new_block != cur_block) { + cur_block = new_block; + sd.block(cur_block)->set_offset(bitmap->bits_to_words(live_bits)); + } + + const size_t end_bit = bitmap->find_obj_end(beg_bit, range_end); + if (end_bit < range_end - 1) { + live_bits += end_bit - beg_bit + 1; + beg_bit = bitmap->find_obj_beg(end_bit + 1, range_end); + } else { + return; + } + } +} + void PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) { const MutableSpace* sp = space(space_id); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,6 +46,8 @@ class PreGCValues; class MoveAndUpdateClosure; class RefProcTaskExecutor; +class ParallelOldTracer; +class STWGCTimer; // The SplitInfo class holds the information needed to 'split' a source region // so that the live data can be copied to two destination *spaces*. Normally, @@ -220,6 +222,17 @@ // Mask for the bits in a pointer to get the address of the start of a region. static const size_t RegionAddrMask; + static const size_t Log2BlockSize; + static const size_t BlockSize; + static const size_t BlockSizeBytes; + + static const size_t BlockSizeOffsetMask; + static const size_t BlockAddrOffsetMask; + static const size_t BlockAddrMask; + + static const size_t BlocksPerRegion; + static const size_t Log2BlocksPerRegion; + class RegionData { public: @@ -272,6 +285,12 @@ inline uint destination_count() const; inline uint destination_count_raw() const; + // Whether the block table for this region has been filled. + inline bool blocks_filled() const; + + // Number of times the block table was filled. + DEBUG_ONLY(inline size_t blocks_filled_count() const;) + // The location of the java heap data that corresponds to this region. inline HeapWord* data_location() const; @@ -296,6 +315,7 @@ void set_partial_obj_size(size_t words) { _partial_obj_size = (region_sz_t) words; } + inline void set_blocks_filled(); inline void set_destination_count(uint count); inline void set_live_obj_size(size_t words); @@ -328,7 +348,11 @@ HeapWord* _partial_obj_addr; region_sz_t _partial_obj_size; region_sz_t volatile _dc_and_los; + bool _blocks_filled; + #ifdef ASSERT + size_t _blocks_filled_count; // Number of block table fills. + // These enable optimizations that are only partially implemented. Use // debug builds to prevent the code fragments from breaking. HeapWord* _data_location; @@ -337,23 +361,40 @@ #ifdef ASSERT public: - uint _pushed; // 0 until region is pushed onto a worker's stack + uint _pushed; // 0 until region is pushed onto a stack private: #endif }; + // "Blocks" allow shorter sections of the bitmap to be searched. Each Block + // holds an offset, which is the amount of live data in the Region to the left + // of the first live object that starts in the Block. + class BlockData + { + public: + typedef unsigned short int blk_ofs_t; + + blk_ofs_t offset() const { return _offset; } + void set_offset(size_t val) { _offset = (blk_ofs_t)val; } + + private: + blk_ofs_t _offset; + }; + public: ParallelCompactData(); bool initialize(MemRegion covered_region); size_t region_count() const { return _region_count; } + size_t reserved_byte_size() const { return _reserved_byte_size; } // Convert region indices to/from RegionData pointers. inline RegionData* region(size_t region_idx) const; inline size_t region(const RegionData* const region_ptr) const; - // Returns true if the given address is contained within the region - bool region_contains(size_t region_index, HeapWord* addr); + size_t block_count() const { return _block_count; } + inline BlockData* block(size_t block_idx) const; + inline size_t block(const BlockData* block_ptr) const; void add_obj(HeapWord* addr, size_t len); void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); } @@ -393,11 +434,24 @@ inline HeapWord* region_align_up(HeapWord* addr) const; inline bool is_region_aligned(HeapWord* addr) const; + // Analogous to region_offset() for blocks. + size_t block_offset(const HeapWord* addr) const; + size_t addr_to_block_idx(const HeapWord* addr) const; + size_t addr_to_block_idx(const oop obj) const { + return addr_to_block_idx((HeapWord*) obj); + } + inline BlockData* addr_to_block_ptr(const HeapWord* addr) const; + inline HeapWord* block_to_addr(size_t block) const; + inline size_t region_to_block_idx(size_t region) const; + + inline HeapWord* block_align_down(HeapWord* addr) const; + inline HeapWord* block_align_up(HeapWord* addr) const; + inline bool is_block_aligned(HeapWord* addr) const; + // Return the address one past the end of the partial object. HeapWord* partial_obj_end(size_t region_idx) const; - // Return the new location of the object p after the - // the compaction. + // Return the location of the object after compaction. HeapWord* calc_new_pointer(HeapWord* addr); HeapWord* calc_new_pointer(oop p) { @@ -410,6 +464,7 @@ #endif // #ifdef ASSERT private: + bool initialize_block_data(); bool initialize_region_data(size_t region_size); PSVirtualSpace* create_vspace(size_t count, size_t element_size); @@ -420,8 +475,13 @@ #endif // #ifdef ASSERT PSVirtualSpace* _region_vspace; + size_t _reserved_byte_size; RegionData* _region_data; size_t _region_count; + + PSVirtualSpace* _block_vspace; + BlockData* _block_data; + size_t _block_count; }; inline uint @@ -436,6 +496,28 @@ return destination_count_raw() >> dc_shift; } +inline bool +ParallelCompactData::RegionData::blocks_filled() const +{ + return _blocks_filled; +} + +#ifdef ASSERT +inline size_t +ParallelCompactData::RegionData::blocks_filled_count() const +{ + return _blocks_filled_count; +} +#endif // #ifdef ASSERT + +inline void +ParallelCompactData::RegionData::set_blocks_filled() +{ + _blocks_filled = true; + // Debug builds count the number of times the table was filled. + DEBUG_ONLY(Atomic::inc_ptr(&_blocks_filled_count)); +} + inline void ParallelCompactData::RegionData::set_destination_count(uint count) { @@ -530,6 +612,12 @@ return pointer_delta(region_ptr, _region_data, sizeof(RegionData)); } +inline ParallelCompactData::BlockData* +ParallelCompactData::block(size_t n) const { + assert(n < block_count(), "bad arg"); + return _block_data + n; +} + inline size_t ParallelCompactData::region_offset(const HeapWord* addr) const { @@ -596,6 +684,63 @@ return region_offset(addr) == 0; } +inline size_t +ParallelCompactData::block_offset(const HeapWord* addr) const +{ + assert(addr >= _region_start, "bad addr"); + assert(addr <= _region_end, "bad addr"); + return (size_t(addr) & BlockAddrOffsetMask) >> LogHeapWordSize; +} + +inline size_t +ParallelCompactData::addr_to_block_idx(const HeapWord* addr) const +{ + assert(addr >= _region_start, "bad addr"); + assert(addr <= _region_end, "bad addr"); + return pointer_delta(addr, _region_start) >> Log2BlockSize; +} + +inline ParallelCompactData::BlockData* +ParallelCompactData::addr_to_block_ptr(const HeapWord* addr) const +{ + return block(addr_to_block_idx(addr)); +} + +inline HeapWord* +ParallelCompactData::block_to_addr(size_t block) const +{ + assert(block < _block_count, "block out of range"); + return _region_start + (block << Log2BlockSize); +} + +inline size_t +ParallelCompactData::region_to_block_idx(size_t region) const +{ + return region << Log2BlocksPerRegion; +} + +inline HeapWord* +ParallelCompactData::block_align_down(HeapWord* addr) const +{ + assert(addr >= _region_start, "bad addr"); + assert(addr < _region_end + RegionSize, "bad addr"); + return (HeapWord*)(size_t(addr) & BlockAddrMask); +} + +inline HeapWord* +ParallelCompactData::block_align_up(HeapWord* addr) const +{ + assert(addr >= _region_start, "bad addr"); + assert(addr <= _region_end, "bad addr"); + return block_align_down(addr + BlockSizeOffsetMask); +} + +inline bool +ParallelCompactData::is_block_aligned(HeapWord* addr) const +{ + return block_offset(addr) == 0; +} + // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the // do_addr() method. // @@ -773,6 +918,7 @@ // Convenient access to type names. typedef ParMarkBitMap::idx_t idx_t; typedef ParallelCompactData::RegionData RegionData; + typedef ParallelCompactData::BlockData BlockData; typedef enum { old_space_id, eden_space_id, @@ -784,7 +930,6 @@ // class IsAliveClosure: public BoolObjectClosure { public: - virtual void do_object(oop p); virtual bool do_object_b(oop p); }; @@ -799,16 +944,6 @@ virtual void do_oop(narrowOop* p); }; - // Current unused - class FollowRootClosure: public OopsInGenClosure { - private: - ParCompactionManager* _compaction_manager; - public: - FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } - virtual void do_oop(oop* p); - virtual void do_oop(narrowOop* p); - }; - class FollowStackClosure: public VoidClosure { private: ParCompactionManager* _compaction_manager; @@ -818,10 +953,7 @@ }; class AdjustPointerClosure: public OopClosure { - private: - bool _is_root; public: - AdjustPointerClosure(bool is_root) : _is_root(is_root) { } virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); // do not walk from thread stacks to the code cache on this phase @@ -838,11 +970,12 @@ friend class AdjustPointerClosure; friend class AdjustKlassClosure; friend class FollowKlassClosure; - friend class FollowRootClosure; friend class InstanceClassLoaderKlass; friend class RefProcTaskProxy; private: + static STWGCTimer _gc_timer; + static ParallelOldTracer _gc_tracer; static elapsedTimer _accumulated_time; static unsigned int _total_invocations; static unsigned int _maximum_compaction_gc_num; @@ -853,7 +986,6 @@ static IsAliveClosure _is_alive_closure; static SpaceInfo _space_info[last_space_id]; static bool _print_phases; - static AdjustPointerClosure _adjust_root_pointer_closure; static AdjustPointerClosure _adjust_pointer_closure; static AdjustKlassClosure _adjust_klass_closure; @@ -887,10 +1019,8 @@ // Mark live objects static void marking_phase(ParCompactionManager* cm, - bool maximum_heap_compaction); - - template static inline void adjust_pointer(T* p, bool is_root); - static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); } + bool maximum_heap_compaction, + ParallelOldTracer *gc_tracer); template static inline void follow_root(ParCompactionManager* cm, T* p); @@ -979,6 +1109,8 @@ // Adjust addresses in roots. Does not adjust addresses in heap. static void adjust_roots(); + DEBUG_ONLY(static void write_block_fill_histogram(outputStream* const out);) + // Move objects to new locations. static void compact_perm(ParCompactionManager* cm); static void compact(); @@ -1046,7 +1178,6 @@ // Closure accessors static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; } - static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; } static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; } static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; } @@ -1067,6 +1198,7 @@ // Check mark and maybe push on marking stack template static inline void mark_and_push(ParCompactionManager* cm, T* p); + template static inline void adjust_pointer(T* p); static void follow_klass(ParCompactionManager* cm, Klass* klass); static void adjust_klass(ParCompactionManager* cm, Klass* klass); @@ -1145,18 +1277,20 @@ fill_region(cm, region); } + // Fill in the block table for the specified region. + static void fill_blocks(size_t region_idx); + // Update the deferred objects in the space. static void update_deferred_objects(ParCompactionManager* cm, SpaceId id); static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; } static ParallelCompactData& summary_data() { return _summary_data; } - static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); } - static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); } - // Reference Processing static ReferenceProcessor* const ref_processor() { return _ref_processor; } + static STWGCTimer* gc_timer() { return &_gc_timer; } + // Return the SpaceId for the given address. static SpaceId space_id(HeapWord* addr); @@ -1230,7 +1364,7 @@ } template -inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) { +inline void PSParallelCompact::adjust_pointer(T* p) { T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "gc_implementation/parallelScavenge/psOldGen.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/mutableSpace.hpp" #include "memory/memRegion.hpp" #include "oops/oop.inline.hpp" @@ -49,7 +50,7 @@ guarantee(_manager_array != NULL, "Could not initialize promotion manager"); _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads); - guarantee(_stack_array_depth != NULL, "Cound not initialize promotion manager"); + guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager"); // Create and register the PSPromotionManager(s) for the worker threads. for(uint i=0; iclaimed_stack_depth()->is_empty(), "should be empty"); + if (manager->_promotion_failed_info.has_failed()) { + gc_tracer.report_promotion_failed(manager->_promotion_failed_info); + promotion_failure_occurred = true; + } manager->flush_labs(); } + return promotion_failure_occurred; } #if TASKQUEUE_STATS @@ -187,6 +195,8 @@ _old_lab.initialize(MemRegion(lab_base, (size_t)0)); _old_gen_is_full = false; + _promotion_failed_info.reset(); + TASKQUEUE_STATS_ONLY(reset_stats()); } @@ -305,6 +315,8 @@ // We won any races, we "own" this object. assert(obj == obj->forwardee(), "Sanity"); + _promotion_failed_info.register_copy_failure(obj->size()); + obj->push_contents(this); // Save the mark if needed diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,8 @@ #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP #include "gc_implementation/parallelScavenge/psPromotionLAB.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/copyFailedInfo.hpp" #include "memory/allocation.hpp" #include "utilities/taskqueue.hpp" @@ -33,7 +35,7 @@ // psPromotionManager is used by a single thread to manage object survival // during a scavenge. The promotion manager contains thread local data only. // -// NOTE! Be carefull when allocating the stacks on cheap. If you are going +// NOTE! Be careful when allocating the stacks on cheap. If you are going // to use a promotion manager in more than one thread, the stacks MUST be // on cheap. This can lead to memory leaks, though, as they are not auto // deallocated. @@ -85,6 +87,8 @@ uint _array_chunk_size; uint _min_array_size_for_chunking; + PromotionFailedInfo _promotion_failed_info; + // Accessors static PSOldGen* old_gen() { return _old_gen; } static MutableSpace* young_space() { return _young_space; } @@ -149,7 +153,7 @@ static void initialize(); static void pre_scavenge(); - static void post_scavenge(); + static bool post_scavenge(YoungGCTracer& gc_tracer); static PSPromotionManager* gc_thread_promotion_manager(int index); static PSPromotionManager* vm_thread_promotion_manager(); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,7 @@ if (o->is_forwarded()) { o = o->forwardee(); // Card mark - if (PSScavenge::is_obj_in_young((HeapWord*) o)) { + if (PSScavenge::is_obj_in_young(o)) { PSScavenge::card_table()->inline_write_ref_field_gc(p, o); } oopDesc::encode_store_heap_oop_not_null(p, o); @@ -152,7 +152,7 @@ // This is the promotion failed test, and code handling. // The code belongs here for two reasons. It is slightly - // different thatn the code below, and cannot share the + // different than the code below, and cannot share the // CAS testing code. Keeping the code here also minimizes // the impact on the common case fast path code. diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -34,6 +34,10 @@ #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" #include "gc_implementation/parallelScavenge/psTasks.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "gc_interface/gcCause.hpp" @@ -61,20 +65,19 @@ bool PSScavenge::_survivor_overflow = false; uint PSScavenge::_tenuring_threshold = 0; HeapWord* PSScavenge::_young_generation_boundary = NULL; +uintptr_t PSScavenge::_young_generation_boundary_compressed = 0; elapsedTimer PSScavenge::_accumulated_time; +STWGCTimer PSScavenge::_gc_timer; +ParallelScavengeTracer PSScavenge::_gc_tracer; Stack PSScavenge::_preserved_mark_stack; Stack PSScavenge::_preserved_oop_stack; CollectorCounters* PSScavenge::_counters = NULL; -bool PSScavenge::_promotion_failed = false; // Define before use class PSIsAliveClosure: public BoolObjectClosure { public: - void do_object(oop p) { - assert(false, "Do not call."); - } bool do_object_b(oop p) { - return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded(); + return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded(); } }; @@ -261,6 +264,8 @@ assert(_preserved_mark_stack.is_empty(), "should be empty"); assert(_preserved_oop_stack.is_empty(), "should be empty"); + _gc_timer.register_gc_start(os::elapsed_counter()); + TimeStamp scavenge_entry; TimeStamp scavenge_midpoint; TimeStamp scavenge_exit; @@ -280,11 +285,14 @@ return false; } + _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); + bool promotion_failure_occurred = false; PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); + heap->increment_total_collections(); AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); @@ -301,12 +309,12 @@ } heap->print_heap_before_gc(); + heap->trace_heap_before_gc(&_gc_tracer); assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); size_t prev_used = heap->used(); - assert(promotion_failed() == false, "Sanity"); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); @@ -314,8 +322,7 @@ if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyBeforeGC:"); - Universe::verify(); + Universe::verify(" VerifyBeforeGC:"); } { @@ -324,7 +331,7 @@ gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - TraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty); + GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); @@ -390,7 +397,7 @@ // We'll use the promotion manager again later. PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); { - // TraceTime("Roots"); + GCTraceTime tm("Scavenge", false, false, &_gc_timer); ParallelScavengeHeap::ParStrongRootsScope psrs; GCTaskQueue* q = GCTaskQueue::create(); @@ -412,6 +419,7 @@ q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); + q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); @@ -431,38 +439,41 @@ // Process reference objects discovered during scavenge { + GCTraceTime tm("References", false, false, &_gc_timer); + reference_processor()->setup_policy(false); // not always_clear reference_processor()->set_active_mt_degree(active_workers); PSKeepAliveClosure keep_alive(promotion_manager); PSEvacuateFollowersClosure evac_followers(promotion_manager); + ReferenceProcessorStats stats; if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; - reference_processor()->process_discovered_references( - &_is_alive_closure, &keep_alive, &evac_followers, &task_executor); + stats = reference_processor()->process_discovered_references( + &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, + &_gc_timer); } else { - reference_processor()->process_discovered_references( - &_is_alive_closure, &keep_alive, &evac_followers, NULL); + stats = reference_processor()->process_discovered_references( + &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer); + } + + _gc_tracer.report_gc_reference_stats(stats); + + // Enqueue reference objects discovered during scavenge. + if (reference_processor()->processing_is_mt()) { + PSRefProcTaskExecutor task_executor; + reference_processor()->enqueue_discovered_references(&task_executor); + } else { + reference_processor()->enqueue_discovered_references(NULL); } } - // Enqueue reference objects discovered during scavenge. - if (reference_processor()->processing_is_mt()) { - PSRefProcTaskExecutor task_executor; - reference_processor()->enqueue_discovered_references(&task_executor); - } else { - reference_processor()->enqueue_discovered_references(NULL); - } - - // Unlink any dead interned Strings - StringTable::unlink(&_is_alive_closure); - // Process the remaining live ones - PSScavengeRootsClosure root_closure(promotion_manager); - StringTable::oops_do(&root_closure); + GCTraceTime tm("StringTable", false, false, &_gc_timer); + // Unlink any dead interned Strings and process the remaining live ones. + PSScavengeRootsClosure root_closure(promotion_manager); + StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); // Finally, flush the promotion_manager's labs, and deallocate its stacks. - PSPromotionManager::post_scavenge(); - - promotion_failure_occurred = promotion_failed(); + promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); if (promotion_failure_occurred) { clean_up_failed_promotion(); if (PrintGC) { @@ -477,8 +488,6 @@ if (!promotion_failure_occurred) { // Swap the survivor spaces. - - young_gen->eden_space()->clear(SpaceDecorator::Mangle); young_gen->from_space()->clear(SpaceDecorator::Mangle); young_gen->swap_spaces(); @@ -553,19 +562,33 @@ young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds"); + + size_t young_live = young_gen->used_in_bytes(); + size_t eden_live = young_gen->eden_space()->used_in_bytes(); + size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); + size_t max_old_gen_size = old_gen->max_gen_size(); size_t max_eden_size = young_gen->max_size() - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); - size_policy->compute_generation_free_space(young_gen->used_in_bytes(), - young_gen->eden_space()->used_in_bytes(), - old_gen->used_in_bytes(), - young_gen->eden_space()->capacity_in_bytes(), - old_gen->max_gen_size(), - max_eden_size, - false /* full gc*/, - gc_cause, - heap->collector_policy()); + + // Used for diagnostics + size_policy->clear_generation_free_space_flags(); + + size_policy->compute_eden_space_size(young_live, + eden_live, + cur_eden, + max_eden_size, + false /* not full gc*/); + size_policy->check_gc_overhead_limit(young_live, + eden_live, + max_old_gen_size, + max_eden_size, + false /* not full gc*/, + gc_cause, + heap->collector_policy()); + + size_policy->decay_supplemental_growth(false /* not full gc*/); } // Resize the young generation at every collection // even if new sizes have not been calculated. This is @@ -602,7 +625,11 @@ NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); - CodeCache::prune_scavenge_root_nmethods(); + { + GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer); + + CodeCache::prune_scavenge_root_nmethods(); + } // Re-verify object start arrays if (VerifyObjectStartArray && @@ -638,11 +665,12 @@ if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyAfterGC:"); - Universe::verify(); + Universe::verify(" VerifyAfterGC:"); } heap->print_heap_after_gc(); + heap->trace_heap_after_gc(&_gc_tracer); + _gc_tracer.report_tenuring_threshold(tenuring_threshold()); if (ZapUnusedHeapArea) { young_gen->eden_space()->check_mangled_unused_area_complete(); @@ -663,6 +691,11 @@ ParallelTaskTerminator::print_termination_counts(); #endif + + _gc_timer.register_gc_end(os::elapsed_counter()); + + _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); + return !promotion_failure_occurred; } @@ -672,7 +705,6 @@ void PSScavenge::clean_up_failed_promotion() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - assert(promotion_failed(), "Sanity"); PSYoungGen* young_gen = heap->young_gen(); @@ -697,7 +729,6 @@ // Clear the preserved mark and oop stack caches. _preserved_mark_stack.clear(true); _preserved_oop_stack.clear(true); - _promotion_failed = false; } // Reset the PromotionFailureALot counters. @@ -708,11 +739,10 @@ // fails. Some markOops will need preservation, some will not. Note // that the entire eden is traversed after a failed promotion, with // all forwarded headers replaced by the default markOop. This means -// it is not neccessary to preserve most markOops. +// it is not necessary to preserve most markOops. void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { - _promotion_failed = true; if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { - // Should use per-worker private stakcs hetre rather than + // Should use per-worker private stacks here rather than // locking a common pair of stacks. ThreadCritical tc; _preserved_oop_stack.push(obj); @@ -807,7 +837,7 @@ // Set boundary between young_gen and old_gen assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), "old above young"); - _young_generation_boundary = young_gen->eden_space()->bottom(); + set_young_generation_boundary(young_gen->eden_space()->bottom()); // Initialize ref handling object for scavenging. MemRegion mr = young_gen->reserved(); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" #include "gc_implementation/parallelScavenge/psVirtualspace.hpp" #include "gc_implementation/shared/collectorCounters.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "memory/allocation.hpp" #include "oops/oop.hpp" #include "utilities/stack.hpp" @@ -37,8 +38,10 @@ class OopStack; class ReferenceProcessor; class ParallelScavengeHeap; +class ParallelScavengeTracer; class PSIsAliveClosure; class PSRefProcTaskExecutor; +class STWGCTimer; class PSScavenge: AllStatic { friend class PSIsAliveClosure; @@ -62,19 +65,23 @@ protected: // Flags/counters - static ReferenceProcessor* _ref_processor; // Reference processor for scavenging. - static PSIsAliveClosure _is_alive_closure; // Closure used for reference processing - static CardTableExtension* _card_table; // We cache the card table for fast access. - static bool _survivor_overflow; // Overflow this collection - static uint _tenuring_threshold; // tenuring threshold for next scavenge - static elapsedTimer _accumulated_time; // total time spent on scavenge - static HeapWord* _young_generation_boundary; // The lowest address possible for the young_gen. - // This is used to decide if an oop should be scavenged, - // cards should be marked, etc. + static ReferenceProcessor* _ref_processor; // Reference processor for scavenging. + static PSIsAliveClosure _is_alive_closure; // Closure used for reference processing + static CardTableExtension* _card_table; // We cache the card table for fast access. + static bool _survivor_overflow; // Overflow this collection + static uint _tenuring_threshold; // tenuring threshold for next scavenge + static elapsedTimer _accumulated_time; // total time spent on scavenge + static STWGCTimer _gc_timer; // GC time book keeper + static ParallelScavengeTracer _gc_tracer; // GC tracing + // The lowest address possible for the young_gen. + // This is used to decide if an oop should be scavenged, + // cards should be marked, etc. + static HeapWord* _young_generation_boundary; + // Used to optimize compressed oops young gen boundary checking. + static uintptr_t _young_generation_boundary_compressed; static Stack _preserved_mark_stack; // List of marks to be restored after failed promotion static Stack _preserved_oop_stack; // List of oops that need their mark restored. - static CollectorCounters* _counters; // collector performance counters - static bool _promotion_failed; + static CollectorCounters* _counters; // collector performance counters static void clean_up_failed_promotion(); @@ -90,7 +97,6 @@ // Accessors static uint tenuring_threshold() { return _tenuring_threshold; } static elapsedTimer* accumulated_time() { return &_accumulated_time; } - static bool promotion_failed() { return _promotion_failed; } static int consecutive_skipped_scavenges() { return _consecutive_skipped_scavenges; } @@ -112,6 +118,9 @@ // boundary moves, _young_generation_boundary must be reset static void set_young_generation_boundary(HeapWord* v) { _young_generation_boundary = v; + if (UseCompressedOops) { + _young_generation_boundary_compressed = (uintptr_t)oopDesc::encode_heap_oop((oop)v); + } } // Called by parallelScavengeHeap to init the tenuring threshold @@ -140,11 +149,19 @@ static void copy_and_push_safe_barrier_from_klass(PSPromotionManager* pm, oop* p); // Is an object in the young generation - // This assumes that the HeapWord argument is in the heap, + // This assumes that the 'o' is in the heap, // so it only checks one side of the complete predicate. + + inline static bool is_obj_in_young(oop o) { + return (HeapWord*)o >= _young_generation_boundary; + } + + inline static bool is_obj_in_young(narrowOop o) { + return (uintptr_t)o >= _young_generation_boundary_compressed; + } + inline static bool is_obj_in_young(HeapWord* o) { - const bool result = (o >= _young_generation_boundary); - return result; + return o >= _young_generation_boundary; } }; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -39,9 +39,7 @@ template inline bool PSScavenge::should_scavenge(T* p) { T heap_oop = oopDesc::load_heap_oop(p); - if (oopDesc::is_null(heap_oop)) return false; - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - return PSScavenge::is_obj_in_young((HeapWord*)obj); + return PSScavenge::is_obj_in_young(heap_oop); } template @@ -94,7 +92,7 @@ // or from metadata. if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) && Universe::heap()->is_in_reserved(p)) { - if (PSScavenge::is_obj_in_young((HeapWord*)new_obj)) { + if (PSScavenge::is_obj_in_young(new_obj)) { card_table()->inline_write_ref_field_gc(p, new_obj); } } @@ -147,7 +145,7 @@ } oopDesc::encode_store_heap_oop_not_null(p, new_obj); - if (PSScavenge::is_obj_in_young((HeapWord*)new_obj)) { + if (PSScavenge::is_obj_in_young(new_obj)) { do_klass_barrier(); } } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -79,14 +79,15 @@ break; case system_dictionary: - { SystemDictionary::oops_do(&roots_closure); + break; - // Move this to another root_type? - PSScavengeKlassClosure klass_closure(pm); - ClassLoaderDataGraph::oops_do(&roots_closure, &klass_closure, false); - } - break; + case class_loader_data: + { + PSScavengeKlassClosure klass_closure(pm); + ClassLoaderDataGraph::oops_do(&roots_closure, &klass_closure, false); + } + break; case management: Management::oops_do(&roots_closure); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -59,9 +59,10 @@ object_synchronizer = 4, flat_profiler = 5, system_dictionary = 6, - management = 7, - jvmti = 8, - code_cache = 9 + class_loader_data = 7, + management = 8, + jvmti = 9, + code_cache = 10 }; private: RootType _root_type; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp --- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -467,7 +467,7 @@ (free_in_old_gen < (size_t) mem_free_old_limit && free_in_eden < (size_t) mem_free_eden_limit))) { gclog_or_tty->print_cr( - "PSAdaptiveSizePolicy::compute_generation_free_space limits:" + "PSAdaptiveSizePolicy::check_gc_overhead_limit:" " promo_limit: " SIZE_FORMAT " max_eden_size: " SIZE_FORMAT " total_free_limit: " SIZE_FORMAT diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/copyFailedInfo.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/copyFailedInfo.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_COPYFAILEDINFO_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_COPYFAILEDINFO_HPP + +#include "runtime/thread.hpp" +#include "utilities/globalDefinitions.hpp" + +class CopyFailedInfo : public CHeapObj { + size_t _first_size; + size_t _smallest_size; + size_t _total_size; + uint _count; + + public: + CopyFailedInfo() : _first_size(0), _smallest_size(0), _total_size(0), _count(0) {} + + virtual void register_copy_failure(size_t size) { + if (_first_size == 0) { + _first_size = size; + _smallest_size = size; + } else if (size < _smallest_size) { + _smallest_size = size; + } + _total_size += size; + _count++; + } + + virtual void reset() { + _first_size = 0; + _smallest_size = 0; + _total_size = 0; + _count = 0; + } + + bool has_failed() const { return _count != 0; } + size_t first_size() const { return _first_size; } + size_t smallest_size() const { return _smallest_size; } + size_t total_size() const { return _total_size; } + uint failed_count() const { return _count; } +}; + +class PromotionFailedInfo : public CopyFailedInfo { + OSThread* _thread; + + public: + PromotionFailedInfo() : CopyFailedInfo(), _thread(NULL) {} + + void register_copy_failure(size_t size) { + CopyFailedInfo::register_copy_failure(size); + if (_thread == NULL) { + _thread = Thread::current()->osthread(); + } else { + assert(_thread == Thread::current()->osthread(), "The PromotionFailedInfo should be thread local."); + } + } + + void reset() { + CopyFailedInfo::reset(); + _thread = NULL; + } + + OSThread* thread() const { return _thread; } +}; + +class EvacuationFailedInfo : public CopyFailedInfo {}; + +#endif /* SHARE_VM_GC_IMPLEMENTATION_SHARED_COPYFAILEDINFO_HPP */ diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/gcHeapSummary.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP + +#include "memory/allocation.hpp" + +class VirtualSpaceSummary : public StackObj { + HeapWord* _start; + HeapWord* _committed_end; + HeapWord* _reserved_end; +public: + VirtualSpaceSummary() : + _start(NULL), _committed_end(NULL), _reserved_end(NULL) { } + VirtualSpaceSummary(HeapWord* start, HeapWord* committed_end, HeapWord* reserved_end) : + _start(start), _committed_end(committed_end), _reserved_end(reserved_end) { } + + HeapWord* start() const { return _start; } + HeapWord* committed_end() const { return _committed_end; } + HeapWord* reserved_end() const { return _reserved_end; } + size_t committed_size() const { return (uintptr_t)_committed_end - (uintptr_t)_start; } + size_t reserved_size() const { return (uintptr_t)_reserved_end - (uintptr_t)_start; } +}; + +class SpaceSummary : public StackObj { + HeapWord* _start; + HeapWord* _end; + size_t _used; +public: + SpaceSummary() : + _start(NULL), _end(NULL), _used(0) { } + SpaceSummary(HeapWord* start, HeapWord* end, size_t used) : + _start(start), _end(end), _used(used) { } + + HeapWord* start() const { return _start; } + HeapWord* end() const { return _end; } + size_t used() const { return _used; } + size_t size() const { return (uintptr_t)_end - (uintptr_t)_start; } +}; + +class MetaspaceSizes : public StackObj { + size_t _capacity; + size_t _used; + size_t _reserved; + + public: + MetaspaceSizes() : _capacity(0), _used(0), _reserved(0) {} + MetaspaceSizes(size_t capacity, size_t used, size_t reserved) : + _capacity(capacity), _used(used), _reserved(reserved) {} + + size_t capacity() const { return _capacity; } + size_t used() const { return _used; } + size_t reserved() const { return _reserved; } +}; + +class GCHeapSummary; +class PSHeapSummary; + +class GCHeapSummaryVisitor { + public: + virtual void visit(const GCHeapSummary* heap_summary) const = 0; + virtual void visit(const PSHeapSummary* heap_summary) const {} +}; + +class GCHeapSummary : public StackObj { + VirtualSpaceSummary _heap; + size_t _used; + + public: + GCHeapSummary() : + _heap(), _used(0) { } + GCHeapSummary(VirtualSpaceSummary& heap_space, size_t used) : + _heap(heap_space), _used(used) { } + + const VirtualSpaceSummary& heap() const { return _heap; } + size_t used() const { return _used; } + + virtual void accept(GCHeapSummaryVisitor* visitor) const { + visitor->visit(this); + } +}; + +class PSHeapSummary : public GCHeapSummary { + VirtualSpaceSummary _old; + SpaceSummary _old_space; + VirtualSpaceSummary _young; + SpaceSummary _eden; + SpaceSummary _from; + SpaceSummary _to; + public: + PSHeapSummary(VirtualSpaceSummary& heap_space, size_t heap_used, VirtualSpaceSummary old, SpaceSummary old_space, VirtualSpaceSummary young, SpaceSummary eden, SpaceSummary from, SpaceSummary to) : + GCHeapSummary(heap_space, heap_used), _old(old), _old_space(old_space), _young(young), _eden(eden), _from(from), _to(to) { } + const VirtualSpaceSummary& old() const { return _old; } + const SpaceSummary& old_space() const { return _old_space; } + const VirtualSpaceSummary& young() const { return _young; } + const SpaceSummary& eden() const { return _eden; } + const SpaceSummary& from() const { return _from; } + const SpaceSummary& to() const { return _to; } + + virtual void accept(GCHeapSummaryVisitor* visitor) const { + visitor->visit(this); + } +}; + +class MetaspaceSummary : public StackObj { + MetaspaceSizes _meta_space; + MetaspaceSizes _data_space; + MetaspaceSizes _class_space; + + public: + MetaspaceSummary() : _meta_space(), _data_space(), _class_space() {} + MetaspaceSummary(const MetaspaceSizes& meta_space, const MetaspaceSizes& data_space, const MetaspaceSizes& class_space) : + _meta_space(meta_space), _data_space(data_space), _class_space(class_space) { } + + const MetaspaceSizes& meta_space() const { return _meta_space; } + const MetaspaceSizes& data_space() const { return _data_space; } + const MetaspaceSizes& class_space() const { return _class_space; } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/gcTimer.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcTimer.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,374 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "utilities/growableArray.hpp" + +void GCTimer::register_gc_start(jlong time) { + _time_partitions.clear(); + _gc_start = time; +} + +void GCTimer::register_gc_end(jlong time) { + assert(!_time_partitions.has_active_phases(), + "We should have ended all started phases, before ending the GC"); + + _gc_end = time; +} + +void GCTimer::register_gc_pause_start(const char* name, jlong time) { + _time_partitions.report_gc_phase_start(name, time); +} + +void GCTimer::register_gc_pause_end(jlong time) { + _time_partitions.report_gc_phase_end(time); +} + +void GCTimer::register_gc_phase_start(const char* name, jlong time) { + _time_partitions.report_gc_phase_start(name, time); +} + +void GCTimer::register_gc_phase_end(jlong time) { + _time_partitions.report_gc_phase_end(time); +} + + +void STWGCTimer::register_gc_start(jlong time) { + GCTimer::register_gc_start(time); + register_gc_pause_start("GC Pause", time); +} + +void STWGCTimer::register_gc_end(jlong time) { + register_gc_pause_end(time); + GCTimer::register_gc_end(time); +} + +void ConcurrentGCTimer::register_gc_pause_start(const char* name, jlong time) { + GCTimer::register_gc_pause_start(name, time); +} + +void ConcurrentGCTimer::register_gc_pause_end(jlong time) { + GCTimer::register_gc_pause_end(time); +} + +void PhasesStack::clear() { + _next_phase_level = 0; +} + +void PhasesStack::push(int phase_index) { + assert(_next_phase_level < PHASE_LEVELS, "Overflow"); + + _phase_indices[_next_phase_level] = phase_index; + + _next_phase_level++; +} + +int PhasesStack::pop() { + assert(_next_phase_level > 0, "Underflow"); + + _next_phase_level--; + + return _phase_indices[_next_phase_level]; +} + +int PhasesStack::count() const { + return _next_phase_level; +} + + +TimePartitions::TimePartitions() { + _phases = new (ResourceObj::C_HEAP, mtGC) GrowableArray(INITIAL_CAPACITY, true, mtGC); + clear(); +} + +TimePartitions::~TimePartitions() { + delete _phases; + _phases = NULL; +} + +void TimePartitions::clear() { + _phases->clear(); + _active_phases.clear(); + _sum_of_pauses = 0; + _longest_pause = 0; +} + +void TimePartitions::report_gc_phase_start(const char* name, jlong time) { + assert(_phases->length() <= 1000, "Too many recored phases?"); + + int level = _active_phases.count(); + + PausePhase phase; + phase.set_level(level); + phase.set_name(name); + phase.set_start(time); + + int index = _phases->append(phase); + + _active_phases.push(index); +} + +void TimePartitions::update_statistics(GCPhase* phase) { + // FIXME: This should only be done for pause phases + if (phase->level() == 0) { + jlong pause = phase->end() - phase->start(); + _sum_of_pauses += pause; + _longest_pause = MAX2(pause, _longest_pause); + } +} + +void TimePartitions::report_gc_phase_end(jlong time) { + int phase_index = _active_phases.pop(); + GCPhase* phase = _phases->adr_at(phase_index); + phase->set_end(time); + update_statistics(phase); +} + +int TimePartitions::num_phases() const { + return _phases->length(); +} + +GCPhase* TimePartitions::phase_at(int index) const { + assert(index >= 0, "Out of bounds"); + assert(index < _phases->length(), "Out of bounds"); + + return _phases->adr_at(index); +} + +jlong TimePartitions::sum_of_pauses() { + return _sum_of_pauses; +} + +jlong TimePartitions::longest_pause() { + return _longest_pause; +} + +bool TimePartitions::has_active_phases() { + return _active_phases.count() > 0; +} + +bool TimePartitionPhasesIterator::has_next() { + return _next < _time_partitions->num_phases(); +} + +GCPhase* TimePartitionPhasesIterator::next() { + assert(has_next(), "Must have phases left"); + return _time_partitions->phase_at(_next++); +} + + +/////////////// Unit tests /////////////// + +#ifndef PRODUCT + +class TimePartitionPhasesIteratorTest { + public: + static void all() { + one_pause(); + two_pauses(); + one_sub_pause_phase(); + many_sub_pause_phases(); + many_sub_pause_phases2(); + max_nested_pause_phases(); + } + + static void validate_pause_phase(GCPhase* phase, int level, const char* name, jlong start, jlong end) { + assert(phase->level() == level, "Incorrect level"); + assert(strcmp(phase->name(), name) == 0, "Incorrect name"); + assert(phase->start() == start, "Incorrect start"); + assert(phase->end() == end, "Incorrect end"); + } + + static void one_pause() { + TimePartitions time_partitions; + time_partitions.report_gc_phase_start("PausePhase", 2); + time_partitions.report_gc_phase_end(8); + + TimePartitionPhasesIterator iter(&time_partitions); + + validate_pause_phase(iter.next(), 0, "PausePhase", 2, 8); + assert(time_partitions.sum_of_pauses() == 8-2, "Incorrect"); + assert(time_partitions.longest_pause() == 8-2, "Incorrect"); + + assert(!iter.has_next(), "Too many elements"); + } + + static void two_pauses() { + TimePartitions time_partitions; + time_partitions.report_gc_phase_start("PausePhase1", 2); + time_partitions.report_gc_phase_end(3); + time_partitions.report_gc_phase_start("PausePhase2", 4); + time_partitions.report_gc_phase_end(6); + + TimePartitionPhasesIterator iter(&time_partitions); + + validate_pause_phase(iter.next(), 0, "PausePhase1", 2, 3); + validate_pause_phase(iter.next(), 0, "PausePhase2", 4, 6); + + assert(time_partitions.sum_of_pauses() == 3, "Incorrect"); + assert(time_partitions.longest_pause() == 2, "Incorrect"); + + assert(!iter.has_next(), "Too many elements"); + } + + static void one_sub_pause_phase() { + TimePartitions time_partitions; + time_partitions.report_gc_phase_start("PausePhase", 2); + time_partitions.report_gc_phase_start("SubPhase", 3); + time_partitions.report_gc_phase_end(4); + time_partitions.report_gc_phase_end(5); + + TimePartitionPhasesIterator iter(&time_partitions); + + validate_pause_phase(iter.next(), 0, "PausePhase", 2, 5); + validate_pause_phase(iter.next(), 1, "SubPhase", 3, 4); + + assert(time_partitions.sum_of_pauses() == 3, "Incorrect"); + assert(time_partitions.longest_pause() == 3, "Incorrect"); + + assert(!iter.has_next(), "Too many elements"); + } + + static void max_nested_pause_phases() { + TimePartitions time_partitions; + time_partitions.report_gc_phase_start("PausePhase", 2); + time_partitions.report_gc_phase_start("SubPhase1", 3); + time_partitions.report_gc_phase_start("SubPhase2", 4); + time_partitions.report_gc_phase_start("SubPhase3", 5); + time_partitions.report_gc_phase_end(6); + time_partitions.report_gc_phase_end(7); + time_partitions.report_gc_phase_end(8); + time_partitions.report_gc_phase_end(9); + + TimePartitionPhasesIterator iter(&time_partitions); + + validate_pause_phase(iter.next(), 0, "PausePhase", 2, 9); + validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 8); + validate_pause_phase(iter.next(), 2, "SubPhase2", 4, 7); + validate_pause_phase(iter.next(), 3, "SubPhase3", 5, 6); + + assert(time_partitions.sum_of_pauses() == 7, "Incorrect"); + assert(time_partitions.longest_pause() == 7, "Incorrect"); + + assert(!iter.has_next(), "Too many elements"); + } + + static void many_sub_pause_phases() { + TimePartitions time_partitions; + time_partitions.report_gc_phase_start("PausePhase", 2); + + time_partitions.report_gc_phase_start("SubPhase1", 3); + time_partitions.report_gc_phase_end(4); + time_partitions.report_gc_phase_start("SubPhase2", 5); + time_partitions.report_gc_phase_end(6); + time_partitions.report_gc_phase_start("SubPhase3", 7); + time_partitions.report_gc_phase_end(8); + time_partitions.report_gc_phase_start("SubPhase4", 9); + time_partitions.report_gc_phase_end(10); + + time_partitions.report_gc_phase_end(11); + + TimePartitionPhasesIterator iter(&time_partitions); + + validate_pause_phase(iter.next(), 0, "PausePhase", 2, 11); + validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 4); + validate_pause_phase(iter.next(), 1, "SubPhase2", 5, 6); + validate_pause_phase(iter.next(), 1, "SubPhase3", 7, 8); + validate_pause_phase(iter.next(), 1, "SubPhase4", 9, 10); + + assert(time_partitions.sum_of_pauses() == 9, "Incorrect"); + assert(time_partitions.longest_pause() == 9, "Incorrect"); + + assert(!iter.has_next(), "Too many elements"); + } + + static void many_sub_pause_phases2() { + TimePartitions time_partitions; + time_partitions.report_gc_phase_start("PausePhase", 2); + + time_partitions.report_gc_phase_start("SubPhase1", 3); + time_partitions.report_gc_phase_start("SubPhase11", 4); + time_partitions.report_gc_phase_end(5); + time_partitions.report_gc_phase_start("SubPhase12", 6); + time_partitions.report_gc_phase_end(7); + time_partitions.report_gc_phase_end(8); + time_partitions.report_gc_phase_start("SubPhase2", 9); + time_partitions.report_gc_phase_start("SubPhase21", 10); + time_partitions.report_gc_phase_end(11); + time_partitions.report_gc_phase_start("SubPhase22", 12); + time_partitions.report_gc_phase_end(13); + time_partitions.report_gc_phase_end(14); + time_partitions.report_gc_phase_start("SubPhase3", 15); + time_partitions.report_gc_phase_end(16); + + time_partitions.report_gc_phase_end(17); + + TimePartitionPhasesIterator iter(&time_partitions); + + validate_pause_phase(iter.next(), 0, "PausePhase", 2, 17); + validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 8); + validate_pause_phase(iter.next(), 2, "SubPhase11", 4, 5); + validate_pause_phase(iter.next(), 2, "SubPhase12", 6, 7); + validate_pause_phase(iter.next(), 1, "SubPhase2", 9, 14); + validate_pause_phase(iter.next(), 2, "SubPhase21", 10, 11); + validate_pause_phase(iter.next(), 2, "SubPhase22", 12, 13); + validate_pause_phase(iter.next(), 1, "SubPhase3", 15, 16); + + assert(time_partitions.sum_of_pauses() == 15, "Incorrect"); + assert(time_partitions.longest_pause() == 15, "Incorrect"); + + assert(!iter.has_next(), "Too many elements"); + } +}; + +class GCTimerTest { +public: + static void all() { + gc_start(); + gc_end(); + } + + static void gc_start() { + GCTimer gc_timer; + gc_timer.register_gc_start(1); + + assert(gc_timer.gc_start() == 1, "Incorrect"); + } + + static void gc_end() { + GCTimer gc_timer; + gc_timer.register_gc_start(1); + gc_timer.register_gc_end(2); + + assert(gc_timer.gc_end() == 2, "Incorrect"); + } +}; + +void GCTimerAllTest::all() { + GCTimerTest::all(); + TimePartitionPhasesIteratorTest::all(); +} + +#endif diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/gcTimer.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcTimer.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTIMER_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTIMER_HPP + +#include "memory/allocation.hpp" +#include "prims/jni_md.h" +#include "utilities/macros.hpp" + +class ConcurrentPhase; +class GCPhase; +class PausePhase; + +template class GrowableArray; + +class PhaseVisitor { + public: + virtual void visit(GCPhase* phase) = 0; + virtual void visit(PausePhase* phase) { visit((GCPhase*)phase); } + virtual void visit(ConcurrentPhase* phase) { visit((GCPhase*)phase); } +}; + +class GCPhase { + const char* _name; + int _level; + jlong _start; + jlong _end; + + public: + void set_name(const char* name) { _name = name; } + const char* name() { return _name; } + + int level() { return _level; } + void set_level(int level) { _level = level; } + + jlong start() { return _start; } + void set_start(jlong time) { _start = time; } + + jlong end() { return _end; } + void set_end(jlong time) { _end = time; } + + virtual void accept(PhaseVisitor* visitor) = 0; +}; + +class PausePhase : public GCPhase { + public: + void accept(PhaseVisitor* visitor) { + visitor->visit(this); + } +}; + +class ConcurrentPhase : public GCPhase { + void accept(PhaseVisitor* visitor) { + visitor->visit(this); + } +}; + +class PhasesStack { + public: + // FIXME: Temporary set to 5 (used to be 4), since Reference processing needs it. + static const int PHASE_LEVELS = 5; + + private: + int _phase_indices[PHASE_LEVELS]; + int _next_phase_level; + + public: + PhasesStack() { clear(); } + void clear(); + + void push(int phase_index); + int pop(); + int count() const; +}; + +class TimePartitions { + static const int INITIAL_CAPACITY = 10; + + // Currently we only support pause phases. + GrowableArray* _phases; + PhasesStack _active_phases; + + jlong _sum_of_pauses; + jlong _longest_pause; + + public: + TimePartitions(); + ~TimePartitions(); + void clear(); + + void report_gc_phase_start(const char* name, jlong time); + void report_gc_phase_end(jlong time); + + int num_phases() const; + GCPhase* phase_at(int index) const; + + jlong sum_of_pauses(); + jlong longest_pause(); + + bool has_active_phases(); + private: + void update_statistics(GCPhase* phase); +}; + +class PhasesIterator { + public: + virtual bool has_next() = 0; + virtual GCPhase* next() = 0; +}; + +class GCTimer : public ResourceObj { + NOT_PRODUCT(friend class GCTimerTest;) + protected: + jlong _gc_start; + jlong _gc_end; + TimePartitions _time_partitions; + + public: + virtual void register_gc_start(jlong time); + virtual void register_gc_end(jlong time); + + void register_gc_phase_start(const char* name, jlong time); + void register_gc_phase_end(jlong time); + + jlong gc_start() { return _gc_start; } + jlong gc_end() { return _gc_end; } + + TimePartitions* time_partitions() { return &_time_partitions; } + + long longest_pause(); + long sum_of_pauses(); + + protected: + void register_gc_pause_start(const char* name, jlong time); + void register_gc_pause_end(jlong time); +}; + +class STWGCTimer : public GCTimer { + public: + virtual void register_gc_start(jlong time); + virtual void register_gc_end(jlong time); +}; + +class ConcurrentGCTimer : public GCTimer { + public: + void register_gc_pause_start(const char* name, jlong time); + void register_gc_pause_end(jlong time); +}; + +class TimePartitionPhasesIterator { + TimePartitions* _time_partitions; + int _next; + + public: + TimePartitionPhasesIterator(TimePartitions* time_partitions) : _time_partitions(time_partitions), _next(0) { } + + virtual bool has_next(); + virtual GCPhase* next(); +}; + + +/////////////// Unit tests /////////////// + +#ifndef PRODUCT + +class GCTimerAllTest { + public: + static void all(); +}; + +#endif + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTIMER_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/gcTrace.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcTrace.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/copyFailedInfo.hpp" +#include "memory/heapInspection.hpp" +#include "memory/referenceProcessorStats.hpp" +#include "utilities/globalDefinitions.hpp" + +#if INCLUDE_ALL_GCS +#include "gc_implementation/g1/evacuationInfo.hpp" +#endif + +#define assert_unset_gc_id() assert(_shared_gc_info.id() == SharedGCInfo::UNSET_GCID, "GC already started?") +#define assert_set_gc_id() assert(_shared_gc_info.id() != SharedGCInfo::UNSET_GCID, "GC not started?") + +static jlong GCTracer_next_gc_id = 0; +static GCId create_new_gc_id() { + return GCTracer_next_gc_id++; +} + +void GCTracer::report_gc_start_impl(GCCause::Cause cause, jlong timestamp) { + assert_unset_gc_id(); + + GCId gc_id = create_new_gc_id(); + _shared_gc_info.set_id(gc_id); + _shared_gc_info.set_cause(cause); + _shared_gc_info.set_start_timestamp(timestamp); +} + +void GCTracer::report_gc_start(GCCause::Cause cause, jlong timestamp) { + assert_unset_gc_id(); + + report_gc_start_impl(cause, timestamp); +} + +bool GCTracer::has_reported_gc_start() const { + return _shared_gc_info.id() != SharedGCInfo::UNSET_GCID; +} + +void GCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) { + assert_set_gc_id(); + + _shared_gc_info.set_sum_of_pauses(time_partitions->sum_of_pauses()); + _shared_gc_info.set_longest_pause(time_partitions->longest_pause()); + _shared_gc_info.set_end_timestamp(timestamp); + + send_phase_events(time_partitions); + send_garbage_collection_event(); +} + +void GCTracer::report_gc_end(jlong timestamp, TimePartitions* time_partitions) { + assert_set_gc_id(); + + report_gc_end_impl(timestamp, time_partitions); + + _shared_gc_info.set_id(SharedGCInfo::UNSET_GCID); +} + +void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const { + assert_set_gc_id(); + + send_reference_stats_event(REF_SOFT, rps.soft_count()); + send_reference_stats_event(REF_WEAK, rps.weak_count()); + send_reference_stats_event(REF_FINAL, rps.final_count()); + send_reference_stats_event(REF_PHANTOM, rps.phantom_count()); +} + +#if INCLUDE_SERVICES +void ObjectCountEventSenderClosure::do_cinfo(KlassInfoEntry* entry) { + if (should_send_event(entry)) { + send_event(entry); + } +} + +void ObjectCountEventSenderClosure::send_event(KlassInfoEntry* entry) { + _gc_tracer->send_object_count_after_gc_event(entry->klass(), entry->count(), + entry->words() * BytesPerWord); +} + +bool ObjectCountEventSenderClosure::should_send_event(KlassInfoEntry* entry) const { + double percentage_of_heap = ((double) entry->words()) / _total_size_in_words; + return percentage_of_heap > _size_threshold_percentage; +} + +void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) { + assert_set_gc_id(); + + if (should_send_object_count_after_gc_event()) { + ResourceMark rm; + + KlassInfoTable cit(false); + if (!cit.allocation_failed()) { + HeapInspection hi(false, false, false, NULL); + hi.populate_table(&cit, is_alive_cl); + + ObjectCountEventSenderClosure event_sender(this, cit.size_of_instances_in_words()); + cit.iterate(&event_sender); + } + } +} +#endif + +void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const MetaspaceSummary& meta_space_summary) const { + assert_set_gc_id(); + + send_gc_heap_summary_event(when, heap_summary); + send_meta_space_summary_event(when, meta_space_summary); +} + +void YoungGCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) { + assert_set_gc_id(); + assert(_tenuring_threshold != UNSET_TENURING_THRESHOLD, "Tenuring threshold has not been reported"); + + GCTracer::report_gc_end_impl(timestamp, time_partitions); + send_young_gc_event(); + + _tenuring_threshold = UNSET_TENURING_THRESHOLD; +} + +void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) { + assert_set_gc_id(); + + send_promotion_failed_event(pf_info); +} + +void YoungGCTracer::report_tenuring_threshold(const uint tenuring_threshold) { + _tenuring_threshold = tenuring_threshold; +} + +void OldGCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) { + assert_set_gc_id(); + + GCTracer::report_gc_end_impl(timestamp, time_partitions); + send_old_gc_event(); +} + +void ParallelOldTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) { + assert_set_gc_id(); + + OldGCTracer::report_gc_end_impl(timestamp, time_partitions); + send_parallel_old_event(); +} + +void ParallelOldTracer::report_dense_prefix(void* dense_prefix) { + assert_set_gc_id(); + + _parallel_old_gc_info.report_dense_prefix(dense_prefix); +} + +void OldGCTracer::report_concurrent_mode_failure() { + assert_set_gc_id(); + + send_concurrent_mode_failure_event(); +} + +#if INCLUDE_ALL_GCS +void G1NewTracer::report_yc_type(G1YCType type) { + assert_set_gc_id(); + + _g1_young_gc_info.set_type(type); +} + +void G1NewTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) { + assert_set_gc_id(); + + YoungGCTracer::report_gc_end_impl(timestamp, time_partitions); + send_g1_young_gc_event(); +} + +void G1NewTracer::report_evacuation_info(EvacuationInfo* info) { + assert_set_gc_id(); + + send_evacuation_info_event(info); +} + +void G1NewTracer::report_evacuation_failed(EvacuationFailedInfo& ef_info) { + assert_set_gc_id(); + + send_evacuation_failed_event(ef_info); + ef_info.reset(); +} +#endif diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/gcTrace.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcTrace.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACE_HPP + +#include "gc_interface/gcCause.hpp" +#include "gc_interface/gcName.hpp" +#include "gc_implementation/shared/gcWhen.hpp" +#include "gc_implementation/shared/copyFailedInfo.hpp" +#include "memory/allocation.hpp" +#include "memory/klassInfoClosure.hpp" +#include "memory/referenceType.hpp" +#if INCLUDE_ALL_GCS +#include "gc_implementation/g1/g1YCTypes.hpp" +#endif +#include "utilities/macros.hpp" + +typedef uint GCId; + +class EvacuationInfo; +class GCHeapSummary; +class MetaspaceSummary; +class PSHeapSummary; +class ReferenceProcessorStats; +class TimePartitions; +class BoolObjectClosure; + +class SharedGCInfo VALUE_OBJ_CLASS_SPEC { + static const jlong UNSET_TIMESTAMP = -1; + + public: + static const GCId UNSET_GCID = (GCId)-1; + + private: + GCId _id; + GCName _name; + GCCause::Cause _cause; + jlong _start_timestamp; + jlong _end_timestamp; + jlong _sum_of_pauses; + jlong _longest_pause; + + public: + SharedGCInfo(GCName name) : _id(UNSET_GCID), _name(name), _cause(GCCause::_last_gc_cause), + _start_timestamp(UNSET_TIMESTAMP), _end_timestamp(UNSET_TIMESTAMP), _sum_of_pauses(0), _longest_pause(0) {} + + void set_id(GCId id) { _id = id; } + GCId id() const { return _id; } + + void set_start_timestamp(jlong timestamp) { _start_timestamp = timestamp; } + jlong start_timestamp() const { return _start_timestamp; } + + void set_end_timestamp(jlong timestamp) { _end_timestamp = timestamp; } + jlong end_timestamp() const { return _end_timestamp; } + + void set_name(GCName name) { _name = name; } + GCName name() const { return _name; } + + void set_cause(GCCause::Cause cause) { _cause = cause; } + GCCause::Cause cause() const { return _cause; } + + void set_sum_of_pauses(jlong duration) { _sum_of_pauses = duration; } + jlong sum_of_pauses() const { return _sum_of_pauses; } + + void set_longest_pause(jlong duration) { _longest_pause = duration; } + jlong longest_pause() const { return _longest_pause; } +}; + +class ParallelOldGCInfo VALUE_OBJ_CLASS_SPEC { + void* _dense_prefix; + public: + ParallelOldGCInfo() : _dense_prefix(NULL) {} + void report_dense_prefix(void* addr) { + _dense_prefix = addr; + } + void* dense_prefix() const { return _dense_prefix; } +}; + +#if INCLUDE_ALL_GCS + +class G1YoungGCInfo VALUE_OBJ_CLASS_SPEC { + G1YCType _type; + public: + G1YoungGCInfo() : _type(G1YCTypeEndSentinel) {} + void set_type(G1YCType type) { + _type = type; + } + G1YCType type() const { return _type; } +}; + +#endif // INCLUDE_ALL_GCS + +class GCTracer : public ResourceObj { + friend class ObjectCountEventSenderClosure; + protected: + SharedGCInfo _shared_gc_info; + + public: + void report_gc_start(GCCause::Cause cause, jlong timestamp); + void report_gc_end(jlong timestamp, TimePartitions* time_partitions); + void report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const MetaspaceSummary& meta_space_summary) const; + void report_gc_reference_stats(const ReferenceProcessorStats& rp) const; + void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN; + + bool has_reported_gc_start() const; + + protected: + GCTracer(GCName name) : _shared_gc_info(name) {} + virtual void report_gc_start_impl(GCCause::Cause cause, jlong timestamp); + virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions); + + private: + void send_garbage_collection_event() const; + void send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const; + void send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const; + void send_reference_stats_event(ReferenceType type, size_t count) const; + void send_phase_events(TimePartitions* time_partitions) const; + void send_object_count_after_gc_event(Klass* klass, jlong count, julong total_size) const NOT_SERVICES_RETURN; + bool should_send_object_count_after_gc_event() const; +}; + +class ObjectCountEventSenderClosure : public KlassInfoClosure { + GCTracer* _gc_tracer; + const double _size_threshold_percentage; + const size_t _total_size_in_words; + public: + ObjectCountEventSenderClosure(GCTracer* gc_tracer, size_t total_size_in_words) : + _gc_tracer(gc_tracer), + _size_threshold_percentage(ObjectCountCutOffPercent / 100), + _total_size_in_words(total_size_in_words) + {} + virtual void do_cinfo(KlassInfoEntry* entry); + protected: + virtual void send_event(KlassInfoEntry* entry); + private: + bool should_send_event(KlassInfoEntry* entry) const; +}; + +class YoungGCTracer : public GCTracer { + static const uint UNSET_TENURING_THRESHOLD = (uint) -1; + + uint _tenuring_threshold; + + protected: + YoungGCTracer(GCName name) : GCTracer(name), _tenuring_threshold(UNSET_TENURING_THRESHOLD) {} + virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions); + + public: + void report_promotion_failed(const PromotionFailedInfo& pf_info); + void report_tenuring_threshold(const uint tenuring_threshold); + + private: + void send_young_gc_event() const; + void send_promotion_failed_event(const PromotionFailedInfo& pf_info) const; +}; + +class OldGCTracer : public GCTracer { + protected: + OldGCTracer(GCName name) : GCTracer(name) {} + virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions); + + public: + void report_concurrent_mode_failure(); + + private: + void send_old_gc_event() const; + void send_concurrent_mode_failure_event(); +}; + +class ParallelOldTracer : public OldGCTracer { + ParallelOldGCInfo _parallel_old_gc_info; + + public: + ParallelOldTracer() : OldGCTracer(ParallelOld) {} + void report_dense_prefix(void* dense_prefix); + + protected: + void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions); + + private: + void send_parallel_old_event() const; +}; + +class SerialOldTracer : public OldGCTracer { + public: + SerialOldTracer() : OldGCTracer(SerialOld) {} +}; + +class ParallelScavengeTracer : public YoungGCTracer { + public: + ParallelScavengeTracer() : YoungGCTracer(ParallelScavenge) {} +}; + +class DefNewTracer : public YoungGCTracer { + public: + DefNewTracer() : YoungGCTracer(DefNew) {} +}; + +class ParNewTracer : public YoungGCTracer { + public: + ParNewTracer() : YoungGCTracer(ParNew) {} +}; + +#if INCLUDE_ALL_GCS +class G1NewTracer : public YoungGCTracer { + G1YoungGCInfo _g1_young_gc_info; + + public: + G1NewTracer() : YoungGCTracer(G1New) {} + + void report_yc_type(G1YCType type); + void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions); + void report_evacuation_info(EvacuationInfo* info); + void report_evacuation_failed(EvacuationFailedInfo& ef_info); + + private: + void send_g1_young_gc_event(); + void send_evacuation_info_event(EvacuationInfo* info); + void send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const; +}; +#endif + +class CMSTracer : public OldGCTracer { + public: + CMSTracer() : OldGCTracer(ConcurrentMarkSweep) {} +}; + +class G1OldTracer : public OldGCTracer { + public: + G1OldTracer() : OldGCTracer(G1Old) {} +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACE_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/gcTraceSend.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,318 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcWhen.hpp" +#include "gc_implementation/shared/copyFailedInfo.hpp" +#include "trace/tracing.hpp" +#include "trace/traceBackend.hpp" +#if INCLUDE_ALL_GCS +#include "gc_implementation/g1/evacuationInfo.hpp" +#include "gc_implementation/g1/g1YCTypes.hpp" +#endif + +// All GC dependencies against the trace framework is contained within this file. + +typedef uintptr_t TraceAddress; + +void GCTracer::send_garbage_collection_event() const { + EventGCGarbageCollection event(UNTIMED); + if (event.should_commit()) { + event.set_gcId(_shared_gc_info.id()); + event.set_name(_shared_gc_info.name()); + event.set_cause((u2) _shared_gc_info.cause()); + event.set_sumOfPauses(_shared_gc_info.sum_of_pauses()); + event.set_longestPause(_shared_gc_info.longest_pause()); + event.set_starttime(_shared_gc_info.start_timestamp()); + event.set_endtime(_shared_gc_info.end_timestamp()); + event.commit(); + } +} + +void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const { + EventGCReferenceStatistics e; + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_type((u1)type); + e.set_count(count); + e.commit(); + } +} + +void ParallelOldTracer::send_parallel_old_event() const { + EventGCParallelOld e(UNTIMED); + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix()); + e.set_starttime(_shared_gc_info.start_timestamp()); + e.set_endtime(_shared_gc_info.end_timestamp()); + e.commit(); + } +} + +void YoungGCTracer::send_young_gc_event() const { + EventGCYoungGarbageCollection e(UNTIMED); + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_tenuringThreshold(_tenuring_threshold); + e.set_starttime(_shared_gc_info.start_timestamp()); + e.set_endtime(_shared_gc_info.end_timestamp()); + e.commit(); + } +} + +void OldGCTracer::send_old_gc_event() const { + EventGCOldGarbageCollection e(UNTIMED); + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_starttime(_shared_gc_info.start_timestamp()); + e.set_endtime(_shared_gc_info.end_timestamp()); + e.commit(); + } +} + +static TraceStructCopyFailed to_trace_struct(const CopyFailedInfo& cf_info) { + TraceStructCopyFailed failed_info; + failed_info.set_objectCount(cf_info.failed_count()); + failed_info.set_firstSize(cf_info.first_size()); + failed_info.set_smallestSize(cf_info.smallest_size()); + failed_info.set_totalSize(cf_info.total_size()); + return failed_info; +} + +void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const { + EventPromotionFailed e; + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_data(to_trace_struct(pf_info)); + e.set_thread(pf_info.thread()->thread_id()); + e.commit(); + } +} + +// Common to CMS and G1 +void OldGCTracer::send_concurrent_mode_failure_event() { + EventConcurrentModeFailure e; + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.commit(); + } +} + +#if INCLUDE_SERVICES +void GCTracer::send_object_count_after_gc_event(Klass* klass, jlong count, julong total_size) const { + EventObjectCountAfterGC e; + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_class(klass); + e.set_count(count); + e.set_totalSize(total_size); + e.commit(); + } +} +#endif + +bool GCTracer::should_send_object_count_after_gc_event() const { +#if INCLUDE_TRACE + return Tracing::is_event_enabled(EventObjectCountAfterGC::eventId); +#else + return false; +#endif +} + +#if INCLUDE_ALL_GCS +void G1NewTracer::send_g1_young_gc_event() { + EventGCG1GarbageCollection e(UNTIMED); + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_type(_g1_young_gc_info.type()); + e.set_starttime(_shared_gc_info.start_timestamp()); + e.set_endtime(_shared_gc_info.end_timestamp()); + e.commit(); + } +} + +void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) { + EventEvacuationInfo e; + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_cSetRegions(info->collectionset_regions()); + e.set_cSetUsedBefore(info->collectionset_used_before()); + e.set_cSetUsedAfter(info->collectionset_used_after()); + e.set_allocationRegions(info->allocation_regions()); + e.set_allocRegionsUsedBefore(info->alloc_regions_used_before()); + e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied()); + e.set_bytesCopied(info->bytes_copied()); + e.set_regionsFreed(info->regions_freed()); + e.commit(); + } +} + +void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const { + EventEvacuationFailed e; + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_data(to_trace_struct(ef_info)); + e.commit(); + } +} +#endif + +static TraceStructVirtualSpace to_trace_struct(const VirtualSpaceSummary& summary) { + TraceStructVirtualSpace space; + space.set_start((TraceAddress)summary.start()); + space.set_committedEnd((TraceAddress)summary.committed_end()); + space.set_committedSize(summary.committed_size()); + space.set_reservedEnd((TraceAddress)summary.reserved_end()); + space.set_reservedSize(summary.reserved_size()); + return space; +} + +static TraceStructObjectSpace to_trace_struct(const SpaceSummary& summary) { + TraceStructObjectSpace space; + space.set_start((TraceAddress)summary.start()); + space.set_end((TraceAddress)summary.end()); + space.set_used(summary.used()); + space.set_size(summary.size()); + return space; +} + +class GCHeapSummaryEventSender : public GCHeapSummaryVisitor { + GCId _id; + GCWhen::Type _when; + public: + GCHeapSummaryEventSender(GCId id, GCWhen::Type when) : _id(id), _when(when) {} + + void visit(const GCHeapSummary* heap_summary) const { + const VirtualSpaceSummary& heap_space = heap_summary->heap(); + + EventGCHeapSummary e; + if (e.should_commit()) { + e.set_gcId(_id); + e.set_when((u1)_when); + e.set_heapSpace(to_trace_struct(heap_space)); + e.set_heapUsed(heap_summary->used()); + e.commit(); + } + } + + void visit(const PSHeapSummary* ps_heap_summary) const { + visit((GCHeapSummary*)ps_heap_summary); + + const VirtualSpaceSummary& old_summary = ps_heap_summary->old(); + const SpaceSummary& old_space = ps_heap_summary->old_space(); + const VirtualSpaceSummary& young_summary = ps_heap_summary->young(); + const SpaceSummary& eden_space = ps_heap_summary->eden(); + const SpaceSummary& from_space = ps_heap_summary->from(); + const SpaceSummary& to_space = ps_heap_summary->to(); + + EventPSHeapSummary e; + if (e.should_commit()) { + e.set_gcId(_id); + e.set_when((u1)_when); + + e.set_oldSpace(to_trace_struct(ps_heap_summary->old())); + e.set_oldObjectSpace(to_trace_struct(ps_heap_summary->old_space())); + e.set_youngSpace(to_trace_struct(ps_heap_summary->young())); + e.set_edenSpace(to_trace_struct(ps_heap_summary->eden())); + e.set_fromSpace(to_trace_struct(ps_heap_summary->from())); + e.set_toSpace(to_trace_struct(ps_heap_summary->to())); + e.commit(); + } + } +}; + +void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const { + GCHeapSummaryEventSender visitor(_shared_gc_info.id(), when); + heap_summary.accept(&visitor); +} + +static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) { + TraceStructMetaspaceSizes meta_sizes; + + meta_sizes.set_capacity(sizes.capacity()); + meta_sizes.set_used(sizes.used()); + meta_sizes.set_reserved(sizes.reserved()); + + return meta_sizes; +} + +void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const { + EventMetaspaceSummary e; + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_when((u1) when); + e.set_metaspace(to_trace_struct(meta_space_summary.meta_space())); + e.set_dataSpace(to_trace_struct(meta_space_summary.data_space())); + e.set_classSpace(to_trace_struct(meta_space_summary.class_space())); + e.commit(); + } +} + +class PhaseSender : public PhaseVisitor { + GCId _gc_id; + public: + PhaseSender(GCId gc_id) : _gc_id(gc_id) {} + + template + void send_phase(PausePhase* pause) { + T event(UNTIMED); + if (event.should_commit()) { + event.set_gcId(_gc_id); + event.set_name(pause->name()); + event.set_starttime(pause->start()); + event.set_endtime(pause->end()); + event.commit(); + } + } + + void visit(GCPhase* pause) { ShouldNotReachHere(); } + void visit(ConcurrentPhase* pause) { Unimplemented(); } + void visit(PausePhase* pause) { + assert(PhasesStack::PHASE_LEVELS == 5, "Need more event types"); + + switch (pause->level()) { + case 0: send_phase(pause); break; + case 1: send_phase(pause); break; + case 2: send_phase(pause); break; + case 3: send_phase(pause); break; + default: /* Ignore sending this phase */ break; + } + } + +#undef send_phase +}; + +void GCTracer::send_phase_events(TimePartitions* time_partitions) const { + PhaseSender phase_reporter(_shared_gc_info.id()); + + TimePartitionPhasesIterator iter(time_partitions); + while (iter.has_next()) { + GCPhase* phase = iter.next(); + phase->accept(&phase_reporter); + } +} diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/gcTraceTime.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/thread.inline.hpp" +#include "runtime/timer.hpp" +#include "utilities/ostream.hpp" + + +GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer) : + _title(title), _doit(doit), _print_cr(print_cr), _timer(timer) { + if (_doit || _timer != NULL) { + _start_counter = os::elapsed_counter(); + } + + if (_timer != NULL) { + assert(SafepointSynchronize::is_at_safepoint(), "Tracing currently only supported at safepoints"); + assert(Thread::current()->is_VM_thread(), "Tracing currently only supported from the VM thread"); + + _timer->register_gc_phase_start(title, _start_counter); + } + + if (_doit) { + if (PrintGCTimeStamps) { + gclog_or_tty->stamp(); + gclog_or_tty->print(": "); + } + gclog_or_tty->print("[%s", title); + gclog_or_tty->flush(); + } +} + +GCTraceTime::~GCTraceTime() { + jlong stop_counter = 0; + + if (_doit || _timer != NULL) { + stop_counter = os::elapsed_counter(); + } + + if (_timer != NULL) { + _timer->register_gc_phase_end(stop_counter); + } + + if (_doit) { + double seconds = TimeHelper::counter_to_seconds(stop_counter - _start_counter); + if (_print_cr) { + gclog_or_tty->print_cr(", %3.7f secs]", seconds); + } else { + gclog_or_tty->print(", %3.7f secs]", seconds); + } + gclog_or_tty->flush(); + } +} diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/gcTraceTime.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcTraceTime.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP + +#include "prims/jni_md.h" + +class GCTimer; + +class GCTraceTime { + const char* _title; + bool _doit; + bool _print_cr; + GCTimer* _timer; + jlong _start_counter; + + public: + GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer); + ~GCTraceTime(); +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/gcWhen.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcWhen.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCWHEN_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCWHEN_HPP + +#include "memory/allocation.hpp" +#include "utilities/debug.hpp" + +class GCWhen : AllStatic { + public: + enum Type { + BeforeGC, + AfterGC, + GCWhenEndSentinel + }; + + static const char* to_string(GCWhen::Type when) { + switch (when) { + case BeforeGC: return "Before GC"; + case AfterGC: return "After GC"; + default: ShouldNotReachHere(); return NULL; + } + } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCWHEN_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/markSweep.cpp --- a/src/share/vm/gc_implementation/shared/markSweep.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/shared/markSweep.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,13 +24,15 @@ #include "precompiled.hpp" #include "compiler/compileBroker.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "oops/methodData.hpp" #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" -unsigned int MarkSweep::_total_invocations = 0; +uint MarkSweep::_total_invocations = 0; Stack MarkSweep::_marking_stack; Stack MarkSweep::_objarray_stack; @@ -41,6 +43,8 @@ size_t MarkSweep::_preserved_count_max = 0; PreservedMark* MarkSweep::_preserved_marks = NULL; ReferenceProcessor* MarkSweep::_ref_processor = NULL; +STWGCTimer* MarkSweep::_gc_timer = NULL; +SerialOldTracer* MarkSweep::_gc_tracer = NULL; MarkSweep::FollowRootClosure MarkSweep::follow_root_closure; CodeBlobToOopClosure MarkSweep::follow_code_root_closure(&MarkSweep::follow_root_closure, /*do_marking=*/ true); @@ -81,7 +85,7 @@ } void MarkSweep::adjust_class_loader(ClassLoaderData* cld) { - cld->oops_do(&MarkSweep::adjust_root_pointer_closure, &MarkSweep::adjust_klass_closure, true); + cld->oops_do(&MarkSweep::adjust_pointer_closure, &MarkSweep::adjust_klass_closure, true); } @@ -95,7 +99,7 @@ // Process ObjArrays one at a time to avoid marking stack bloat. if (!_objarray_stack.is_empty()) { ObjArrayTask task = _objarray_stack.pop(); - ObjArrayKlass* const k = (ObjArrayKlass*)task.obj()->klass(); + ObjArrayKlass* k = (ObjArrayKlass*)task.obj()->klass(); k->oop_follow_contents(task.obj(), task.index()); } } while (!_marking_stack.is_empty() || !_objarray_stack.is_empty()); @@ -121,11 +125,10 @@ } } -MarkSweep::AdjustPointerClosure MarkSweep::adjust_root_pointer_closure(true); -MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure(false); +MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure; -void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } -void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } +void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); } +void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); } void MarkSweep::adjust_marks() { assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(), @@ -167,7 +170,6 @@ MarkSweep::IsAliveClosure MarkSweep::is_alive; -void MarkSweep::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); } bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); } MarkSweep::KeepAliveClosure MarkSweep::keep_alive; @@ -175,7 +177,10 @@ void MarkSweep::KeepAliveClosure::do_oop(oop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); } void MarkSweep::KeepAliveClosure::do_oop(narrowOop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); } -void marksweep_init() { /* empty */ } +void marksweep_init() { + MarkSweep::_gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); + MarkSweep::_gc_tracer = new (ResourceObj::C_HEAP, mtGC) SerialOldTracer(); +} #ifndef PRODUCT diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/markSweep.hpp --- a/src/share/vm/gc_implementation/shared/markSweep.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/shared/markSweep.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,8 @@ class ReferenceProcessor; class DataLayout; +class SerialOldTracer; +class STWGCTimer; // MarkSweep takes care of global mark-compact garbage collection for a // GenCollectedHeap using a four-phase pointer forwarding algorithm. All @@ -80,10 +82,7 @@ }; class AdjustPointerClosure: public OopsInGenClosure { - private: - bool _is_root; public: - AdjustPointerClosure(bool is_root) : _is_root(is_root) {} virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); }; @@ -91,7 +90,6 @@ // Used for java/lang/ref handling class IsAliveClosure: public BoolObjectClosure { public: - virtual void do_object(oop p); virtual bool do_object_b(oop p); }; @@ -116,7 +114,7 @@ // protected: // Total invocations of a MarkSweep collection - static unsigned int _total_invocations; + static uint _total_invocations; // Traversal stacks used during phase1 static Stack _marking_stack; @@ -132,6 +130,9 @@ // Reference processing (used in ...follow_contents) static ReferenceProcessor* _ref_processor; + static STWGCTimer* _gc_timer; + static SerialOldTracer* _gc_tracer; + // Non public closures static KeepAliveClosure keep_alive; @@ -146,16 +147,18 @@ static MarkAndPushClosure mark_and_push_closure; static FollowKlassClosure follow_klass_closure; static FollowStackClosure follow_stack_closure; - static AdjustPointerClosure adjust_root_pointer_closure; static AdjustPointerClosure adjust_pointer_closure; static AdjustKlassClosure adjust_klass_closure; // Accessors - static unsigned int total_invocations() { return _total_invocations; } + static uint total_invocations() { return _total_invocations; } // Reference Processing static ReferenceProcessor* const ref_processor() { return _ref_processor; } + static STWGCTimer* gc_timer() { return _gc_timer; } + static SerialOldTracer* gc_tracer() { return _gc_tracer; } + // Call backs for marking static void mark_object(oop obj); // Mark pointer and follow contents. Empty marking stack afterwards. @@ -179,12 +182,7 @@ static void adjust_marks(); // Adjust the pointers in the preserved marks table static void restore_marks(); // Restore the marks that we saved in preserve_mark - template static inline void adjust_pointer(T* p, bool isroot); - - static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); } - static void adjust_pointer(oop* p) { adjust_pointer(p, false); } - static void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); } - + template static inline void adjust_pointer(T* p); }; class PreservedMark VALUE_OBJ_CLASS_SPEC { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/markSweep.inline.hpp --- a/src/share/vm/gc_implementation/shared/markSweep.inline.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/shared/markSweep.inline.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -76,7 +76,7 @@ _objarray_stack.push(task); } -template inline void MarkSweep::adjust_pointer(T* p, bool isroot) { +template inline void MarkSweep::adjust_pointer(T* p) { T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp --- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -158,7 +158,7 @@ // Fills in the unallocated portion of the buffer with a garbage object. // If "end_of_gc" is TRUE, is after the last use in the GC. IF "retain" // is true, attempt to re-use the unused portion in the next GC. - void retire(bool end_of_gc, bool retain); + virtual void retire(bool end_of_gc, bool retain); void print() PRODUCT_RETURN; }; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/vmGCOperations.cpp --- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -145,32 +145,37 @@ return false; } +bool VM_GC_HeapInspection::collect() { + if (GC_locker::is_active()) { + return false; + } + Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection); + return true; +} + void VM_GC_HeapInspection::doit() { HandleMark hm; - CollectedHeap* ch = Universe::heap(); - ch->ensure_parsability(false); // must happen, even if collection does - // not happen (e.g. due to GC_locker) + Universe::heap()->ensure_parsability(false); // must happen, even if collection does + // not happen (e.g. due to GC_locker) + // or _full_gc being false if (_full_gc) { - // The collection attempt below would be skipped anyway if - // the gc locker is held. The following dump may then be a tad - // misleading to someone expecting only live objects to show - // up in the dump (see CR 6944195). Just issue a suitable warning - // in that case and do not attempt to do a collection. - // The latter is a subtle point, because even a failed attempt - // to GC will, in fact, induce one in the future, which we - // probably want to avoid in this case because the GC that we may - // be about to attempt holds value for us only - // if it happens now and not if it happens in the eventual - // future. - if (GC_locker::is_active()) { + if (!collect()) { + // The collection attempt was skipped because the gc locker is held. + // The following dump may then be a tad misleading to someone expecting + // only live objects to show up in the dump (see CR 6944195). Just issue + // a suitable warning in that case and do not attempt to do a collection. + // The latter is a subtle point, because even a failed attempt + // to GC will, in fact, induce one in the future, which we + // probably want to avoid in this case because the GC that we may + // be about to attempt holds value for us only + // if it happens now and not if it happens in the eventual + // future. warning("GC locker is held; pre-dump GC was skipped"); - } else { - ch->collect_as_vm_thread(GCCause::_heap_inspection); } } HeapInspection inspect(_csv_format, _print_help, _print_class_stats, _columns); - inspect.heap_inspection(_out, _need_prologue /* need_prologue */); + inspect.heap_inspection(_out); } @@ -225,7 +230,10 @@ gclog_or_tty->print_cr("\nCMS full GC for Metaspace"); } heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold); - _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); + // After a GC try to allocate without expanding. Could fail + // and expansion will be tried below. + _result = + _loader_data->metaspace_non_null()->allocate(_size, _mdtype); } if (_result == NULL && !UseConcMarkSweepGC /* CMS already tried */) { // If still failing, allow the Metaspace to expand. diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_implementation/shared/vmGCOperations.hpp --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -129,21 +129,18 @@ private: outputStream* _out; bool _full_gc; - bool _need_prologue; bool _csv_format; // "comma separated values" format for spreadsheet. bool _print_help; bool _print_class_stats; const char* _columns; public: - VM_GC_HeapInspection(outputStream* out, bool request_full_gc, - bool need_prologue) : + VM_GC_HeapInspection(outputStream* out, bool request_full_gc) : VM_GC_Operation(0 /* total collections, dummy, ignored */, GCCause::_heap_inspection /* GC Cause */, 0 /* total full collections, dummy, ignored */, request_full_gc) { _out = out; _full_gc = request_full_gc; - _need_prologue = need_prologue; _csv_format = false; _print_help = false; _print_class_stats = false; @@ -159,6 +156,8 @@ void set_print_help(bool value) {_print_help = value;} void set_print_class_stats(bool value) {_print_class_stats = value;} void set_columns(const char* value) {_columns = value;} + protected: + bool collect(); }; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_interface/allocTracer.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_interface/allocTracer.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_interface/allocTracer.hpp" +#include "trace/tracing.hpp" +#include "runtime/handles.hpp" +#include "utilities/globalDefinitions.hpp" + +void AllocTracer::send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size) { + EventAllocObjectOutsideTLAB event; + if (event.should_commit()) { + event.set_class(klass()); + event.set_allocationSize(alloc_size); + event.commit(); + } +} + +void AllocTracer::send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size) { + EventAllocObjectInNewTLAB event; + if (event.should_commit()) { + event.set_class(klass()); + event.set_allocationSize(alloc_size); + event.set_tlabSize(tlab_size); + event.commit(); + } +} diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_interface/allocTracer.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_interface/allocTracer.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP +#define SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP + +#include "memory/allocation.hpp" +#include "runtime/handles.hpp" + +class AllocTracer : AllStatic { + public: + static void send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size); + static void send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size); +}; + +#endif /* SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP */ diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_interface/collectedHeap.cpp --- a/src/share/vm/gc_interface/collectedHeap.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_interface/collectedHeap.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,9 +24,15 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" +#include "gc_implementation/shared/gcWhen.hpp" #include "gc_implementation/shared/vmGCOperations.hpp" +#include "gc_interface/allocTracer.hpp" #include "gc_interface/collectedHeap.hpp" #include "gc_interface/collectedHeap.inline.hpp" +#include "memory/metaspace.hpp" #include "oops/oop.inline.hpp" #include "oops/instanceMirrorKlass.hpp" #include "runtime/init.hpp" @@ -65,11 +71,71 @@ } } +VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { + size_t capacity_in_words = capacity() / HeapWordSize; + + return VirtualSpaceSummary( + reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end()); +} + +GCHeapSummary CollectedHeap::create_heap_summary() { + VirtualSpaceSummary heap_space = create_heap_space_summary(); + return GCHeapSummary(heap_space, used()); +} + +MetaspaceSummary CollectedHeap::create_metaspace_summary() { + const MetaspaceSizes meta_space( + 0, /*MetaspaceAux::capacity_in_bytes(),*/ + 0, /*MetaspaceAux::used_in_bytes(),*/ + MetaspaceAux::reserved_in_bytes()); + const MetaspaceSizes data_space( + 0, /*MetaspaceAux::capacity_in_bytes(Metaspace::NonClassType),*/ + 0, /*MetaspaceAux::used_in_bytes(Metaspace::NonClassType),*/ + MetaspaceAux::reserved_in_bytes(Metaspace::NonClassType)); + const MetaspaceSizes class_space( + 0, /*MetaspaceAux::capacity_in_bytes(Metaspace::ClassType),*/ + 0, /*MetaspaceAux::used_in_bytes(Metaspace::ClassType),*/ + MetaspaceAux::reserved_in_bytes(Metaspace::ClassType)); + + return MetaspaceSummary(meta_space, data_space, class_space); +} + +void CollectedHeap::print_heap_before_gc() { + if (PrintHeapAtGC) { + Universe::print_heap_before_gc(); + } + if (_gc_heap_log != NULL) { + _gc_heap_log->log_heap_before(); + } +} + +void CollectedHeap::print_heap_after_gc() { + if (PrintHeapAtGC) { + Universe::print_heap_after_gc(); + } + if (_gc_heap_log != NULL) { + _gc_heap_log->log_heap_after(); + } +} + +void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) { + const GCHeapSummary& heap_summary = create_heap_summary(); + const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); + gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary); +} + +void CollectedHeap::trace_heap_before_gc(GCTracer* gc_tracer) { + trace_heap(GCWhen::BeforeGC, gc_tracer); +} + +void CollectedHeap::trace_heap_after_gc(GCTracer* gc_tracer) { + trace_heap(GCWhen::AfterGC, gc_tracer); +} + // Memory state functions. CollectedHeap::CollectedHeap() : _n_par_threads(0) - { const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); const size_t elements_per_word = HeapWordSize / sizeof(jint); @@ -185,7 +251,7 @@ } #endif -HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) { +HeapWord* CollectedHeap::allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size) { // Retain tlab and allocate object in shared space if // the amount free in the tlab is too large to discard. @@ -209,6 +275,9 @@ if (obj == NULL) { return NULL; } + + AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize); + if (ZeroTLAB) { // ..and clear it. Copy::zero_to_words(obj, new_tlab_size); @@ -458,28 +527,28 @@ } } -void CollectedHeap::pre_full_gc_dump() { +void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { if (HeapDumpBeforeFullGC) { - TraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, gclog_or_tty); + GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer); // We are doing a "major" collection and a heap dump before // major collection has been requested. HeapDumper::dump_heap(); } if (PrintClassHistogramBeforeFullGC) { - TraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, gclog_or_tty); - VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); + GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer); + VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); inspector.doit(); } } -void CollectedHeap::post_full_gc_dump() { +void CollectedHeap::post_full_gc_dump(GCTimer* timer) { if (HeapDumpAfterFullGC) { - TraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, gclog_or_tty); + GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer); HeapDumper::dump_heap(); } if (PrintClassHistogramAfterFullGC) { - TraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, gclog_or_tty); - VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); + GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer); + VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); inspector.doit(); } } @@ -490,7 +559,7 @@ assert(size >= 0, "int won't convert to size_t"); HeapWord* obj; assert(ScavengeRootsInCode > 0, "must be"); - obj = common_mem_allocate_init(size, CHECK_NULL); + obj = common_mem_allocate_init(real_klass, size, CHECK_NULL); post_allocation_setup_common(klass, obj); assert(Universe::is_bootstrapping() || !((oop)obj)->is_array(), "must not be an array"); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_interface/collectedHeap.hpp --- a/src/share/vm/gc_interface/collectedHeap.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP #include "gc_interface/gcCause.hpp" +#include "gc_implementation/shared/gcWhen.hpp" #include "memory/allocation.hpp" #include "memory/barrierSet.hpp" #include "runtime/handles.hpp" @@ -38,11 +39,16 @@ // class defines the functions that a heap must implement, and contains // infrastructure common to all heaps. +class AdaptiveSizePolicy; class BarrierSet; +class CollectorPolicy; +class GCHeapSummary; +class GCTimer; +class GCTracer; +class MetaspaceSummary; +class Thread; class ThreadClosure; -class AdaptiveSizePolicy; -class Thread; -class CollectorPolicy; +class VirtualSpaceSummary; class GCMessage : public FormatBuffer<1024> { public: @@ -128,16 +134,16 @@ virtual void resize_all_tlabs(); // Allocate from the current thread's TLAB, with broken-out slow path. - inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size); - static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size); + inline static HeapWord* allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size); + static HeapWord* allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size); // Allocate an uninitialized block of the given size, or returns NULL if // this is impossible. - inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS); + inline static HeapWord* common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS); // Like allocate_init, but the block returned by a successful allocation // is guaranteed initialized to zeros. - inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS); + inline static HeapWord* common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS); // Helper functions for (VM) allocation. inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj); @@ -166,6 +172,8 @@ // Fill with a single object (either an int array or a java.lang.Object). static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true); + virtual void trace_heap(GCWhen::Type when, GCTracer* tracer); + // Verification functions virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size) PRODUCT_RETURN; @@ -202,8 +210,6 @@ MemRegion reserved_region() const { return _reserved; } address base() const { return (address)reserved_region().start(); } - // Future cleanup here. The following functions should specify bytes or - // heapwords as part of their signature. virtual size_t capacity() const = 0; virtual size_t used() const = 0; @@ -550,8 +556,13 @@ virtual void prepare_for_verify() = 0; // Generate any dumps preceding or following a full gc - void pre_full_gc_dump(); - void post_full_gc_dump(); + void pre_full_gc_dump(GCTimer* timer); + void post_full_gc_dump(GCTimer* timer); + + VirtualSpaceSummary create_heap_space_summary(); + GCHeapSummary create_heap_summary(); + + MetaspaceSummary create_metaspace_summary(); // Print heap information on the given outputStream. virtual void print_on(outputStream* st) const = 0; @@ -560,7 +571,7 @@ print_on(tty); } // Print more detailed heap information on the given - // outputStream. The default behaviour is to call print_on(). It is + // outputStream. The default behavior is to call print_on(). It is // up to each subclass to override it and add any additional output // it needs. virtual void print_extended_on(outputStream* st) const { @@ -589,23 +600,11 @@ // Default implementation does nothing. virtual void print_tracing_info() const = 0; - // If PrintHeapAtGC is set call the appropriate routi - void print_heap_before_gc() { - if (PrintHeapAtGC) { - Universe::print_heap_before_gc(); - } - if (_gc_heap_log != NULL) { - _gc_heap_log->log_heap_before(); - } - } - void print_heap_after_gc() { - if (PrintHeapAtGC) { - Universe::print_heap_after_gc(); - } - if (_gc_heap_log != NULL) { - _gc_heap_log->log_heap_after(); - } - } + void print_heap_before_gc(); + void print_heap_after_gc(); + + void trace_heap_before_gc(GCTracer* gc_tracer); + void trace_heap_after_gc(GCTracer* gc_tracer); // Heap verification virtual void verify(bool silent, VerifyOption option) = 0; @@ -619,7 +618,7 @@ inline bool promotion_should_fail(); // Reset the PromotionFailureALot counters. Should be called at the end of a - // GC in which promotion failure ocurred. + // GC in which promotion failure occurred. inline void reset_promotion_should_fail(volatile size_t* count); inline void reset_promotion_should_fail(); #endif // #ifndef PRODUCT diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_interface/collectedHeap.inline.hpp --- a/src/share/vm/gc_interface/collectedHeap.inline.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP +#include "gc_interface/allocTracer.hpp" #include "gc_interface/collectedHeap.hpp" #include "memory/threadLocalAllocBuffer.inline.hpp" #include "memory/universe.hpp" @@ -107,7 +108,7 @@ post_allocation_notify(klass, (oop)obj); } -HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, TRAPS) { +HeapWord* CollectedHeap::common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS) { // Clear unhandled oops for memory allocation. Memory allocation might // not take out a lock if from tlab, so clear here. @@ -120,7 +121,7 @@ HeapWord* result = NULL; if (UseTLAB) { - result = CollectedHeap::allocate_from_tlab(THREAD, size); + result = allocate_from_tlab(klass, THREAD, size); if (result != NULL) { assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage"); @@ -136,6 +137,9 @@ assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage"); THREAD->incr_allocated_bytes(size * HeapWordSize); + + AllocTracer::send_allocation_outside_tlab_event(klass, size * HeapWordSize); + return result; } @@ -165,13 +169,13 @@ } } -HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, TRAPS) { - HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL); +HeapWord* CollectedHeap::common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS) { + HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL); init_obj(obj, size); return obj; } -HeapWord* CollectedHeap::allocate_from_tlab(Thread* thread, size_t size) { +HeapWord* CollectedHeap::allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size) { assert(UseTLAB, "should use UseTLAB"); HeapWord* obj = thread->tlab().allocate(size); @@ -179,7 +183,7 @@ return obj; } // Otherwise... - return allocate_from_tlab_slow(thread, size); + return allocate_from_tlab_slow(klass, thread, size); } void CollectedHeap::init_obj(HeapWord* obj, size_t size) { @@ -194,7 +198,7 @@ debug_only(check_for_valid_allocation_state()); assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); assert(size >= 0, "int won't convert to size_t"); - HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL); + HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL); post_allocation_setup_obj(klass, obj); NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); return (oop)obj; @@ -207,7 +211,7 @@ debug_only(check_for_valid_allocation_state()); assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); assert(size >= 0, "int won't convert to size_t"); - HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL); + HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL); post_allocation_setup_array(klass, obj, length); NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); return (oop)obj; @@ -220,7 +224,7 @@ debug_only(check_for_valid_allocation_state()); assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); assert(size >= 0, "int won't convert to size_t"); - HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL); + HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL); ((oop)obj)->set_klass_gap(0); post_allocation_setup_array(klass, obj, length); #ifndef PRODUCT diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_interface/gcCause.cpp --- a/src/share/vm/gc_interface/gcCause.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_interface/gcCause.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -72,6 +72,9 @@ case _cms_final_remark: return "CMS Final Remark"; + case _cms_concurrent_mark: + return "CMS Concurrent Mark"; + case _old_generation_expanded_on_last_scavenge: return "Old Generation Expanded On Last Scavenge"; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_interface/gcCause.hpp --- a/src/share/vm/gc_interface/gcCause.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/gc_interface/gcCause.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -60,6 +60,7 @@ _cms_generation_full, _cms_initial_mark, _cms_final_remark, + _cms_concurrent_mark, _old_generation_expanded_on_last_scavenge, _old_generation_too_full_to_scavenge, diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/gc_interface/gcName.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_interface/gcName.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_INTERFACE_GCNAME_HPP +#define SHARE_VM_GC_INTERFACE_GCNAME_HPP + +#include "utilities/debug.hpp" + +enum GCName { + ParallelOld, + SerialOld, + PSMarkSweep, + ParallelScavenge, + DefNew, + ParNew, + G1New, + ConcurrentMarkSweep, + G1Old, + GCNameEndSentinel +}; + +class GCNameHelper { + public: + static const char* to_string(GCName name) { + switch(name) { + case ParallelOld: return "ParallelOld"; + case SerialOld: return "SerialOld"; + case PSMarkSweep: return "PSMarkSweep"; + case ParallelScavenge: return "ParallelScavenge"; + case DefNew: return "DefNew"; + case ParNew: return "ParNew"; + case G1New: return "G1New"; + case ConcurrentMarkSweep: return "ConcurrentMarkSweep"; + case G1Old: return "G1Old"; + default: ShouldNotReachHere(); return NULL; + } + } +}; + +#endif // SHARE_VM_GC_INTERFACE_GCNAME_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/graal/graalCompilerToVM.cpp --- a/src/share/vm/graal/graalCompilerToVM.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/graal/graalCompilerToVM.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1132,7 +1132,11 @@ C2V_VMENTRY(void, reprofile, (JNIEnv *env, jobject, jlong metaspace_method)) Method* method = asMethod(metaspace_method); - method->reset_counters(); + MethodCounters* mcs = method->method_counters(); + if (mcs != NULL) { + mcs->clear_counters(); + } + NOT_PRODUCT(method->set_compiled_invocation_count(0)); nmethod* code = method->code(); if (code != NULL) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/interpreter/bytecodeInterpreter.cpp --- a/src/share/vm/interpreter/bytecodeInterpreter.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -32,6 +32,7 @@ #include "interpreter/interpreterRuntime.hpp" #include "memory/cardTableModRefBS.hpp" #include "memory/resourceArea.hpp" +#include "oops/methodCounters.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" @@ -304,11 +305,12 @@ #define METHOD istate->method() -#define INVOCATION_COUNT METHOD->invocation_counter() -#define BACKEDGE_COUNT METHOD->backedge_counter() - - -#define INCR_INVOCATION_COUNT INVOCATION_COUNT->increment() +#define GET_METHOD_COUNTERS(res) \ + res = METHOD->method_counters(); \ + if (res == NULL) { \ + CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \ + } + #define OSR_REQUEST(res, branch_pc) \ CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception); /* @@ -325,10 +327,12 @@ #define DO_BACKEDGE_CHECKS(skip, branch_pc) \ if ((skip) <= 0) { \ + MethodCounters* mcs; \ + GET_METHOD_COUNTERS(mcs); \ if (UseLoopCounter) { \ bool do_OSR = UseOnStackReplacement; \ - BACKEDGE_COUNT->increment(); \ - if (do_OSR) do_OSR = BACKEDGE_COUNT->reached_InvocationLimit(); \ + mcs->backedge_counter()->increment(); \ + if (do_OSR) do_OSR = mcs->backedge_counter()->reached_InvocationLimit(); \ if (do_OSR) { \ nmethod* osr_nmethod; \ OSR_REQUEST(osr_nmethod, branch_pc); \ @@ -341,7 +345,7 @@ } \ } \ } /* UseCompiler ... */ \ - INCR_INVOCATION_COUNT; \ + mcs->invocation_counter()->increment(); \ SAFEPOINT; \ } @@ -464,7 +468,25 @@ #ifdef ASSERT if (istate->_msg != initialize) { - assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); + // We have a problem here if we are running with a pre-hsx24 JDK (for example during bootstrap) + // because in that case, EnableInvokeDynamic is true by default but will be later switched off + // if java_lang_invoke_MethodHandle::compute_offsets() detects that the JDK only has the classes + // for the old JSR292 implementation. + // This leads to a situation where 'istate->_stack_limit' always accounts for + // methodOopDesc::extra_stack_entries() because it is computed in + // CppInterpreterGenerator::generate_compute_interpreter_state() which was generated while + // EnableInvokeDynamic was still true. On the other hand, istate->_method->max_stack() doesn't + // account for extra_stack_entries() anymore because at the time when it is called + // EnableInvokeDynamic was already set to false. + // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was + // switched off because of the wrong classes. + if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) { + assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); + } else { + const int extra_stack_entries = Method::extra_stack_entries_for_indy; + assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries + + 1), "bad stack limit"); + } #ifndef SHARK IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong")); #endif // !SHARK @@ -618,11 +640,13 @@ // count invocations assert(initialized, "Interpreter not initialized"); if (_compiling) { + MethodCounters* mcs; + GET_METHOD_COUNTERS(mcs); if (ProfileInterpreter) { - METHOD->increment_interpreter_invocation_count(); + METHOD->increment_interpreter_invocation_count(THREAD); } - INCR_INVOCATION_COUNT; - if (INVOCATION_COUNT->reached_InvocationLimit()) { + mcs->invocation_counter()->increment(); + if (mcs->invocation_counter()->reached_InvocationLimit()) { CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); // We no longer retry on a counter overflow diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/interpreter/interpreterRuntime.cpp --- a/src/share/vm/interpreter/interpreterRuntime.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -465,7 +465,7 @@ continuation = Interpreter::remove_activation_entry(); #endif // Count this for compilation purposes - h_method->interpreter_throwout_increment(); + h_method->interpreter_throwout_increment(THREAD); } else { // handler in this method => change bci/bcp to handler bci/bcp and continue there handler_pc = h_method->code_base() + handler_bci; @@ -914,6 +914,15 @@ fr.interpreter_frame_set_mdp(new_mdp); IRT_END +IRT_ENTRY(MethodCounters*, InterpreterRuntime::build_method_counters(JavaThread* thread, Method* m)) + MethodCounters* mcs = Method::build_method_counters(m, thread); + if (HAS_PENDING_EXCEPTION) { + assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); + CLEAR_PENDING_EXCEPTION; + } + return mcs; +IRT_END + IRT_ENTRY(void, InterpreterRuntime::at_safepoint(JavaThread* thread)) // We used to need an explict preserve_arguments here for invoke bytecodes. However, @@ -1054,7 +1063,7 @@ return; } if (set_handler_blob() == NULL) { - vm_exit_out_of_memory(blob_size, "native signature handlers"); + vm_exit_out_of_memory(blob_size, OOM_MALLOC_ERROR, "native signature handlers"); } BufferBlob* bb = BufferBlob::create("Signature Handler Temp Buffer", diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/interpreter/interpreterRuntime.hpp --- a/src/share/vm/interpreter/interpreterRuntime.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/interpreter/interpreterRuntime.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -169,6 +169,7 @@ #ifdef ASSERT static void verify_mdp(Method* method, address bcp, address mdp); #endif // ASSERT + static MethodCounters* build_method_counters(JavaThread* thread, Method* m); }; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/interpreter/invocationCounter.cpp --- a/src/share/vm/interpreter/invocationCounter.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/interpreter/invocationCounter.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -109,15 +109,19 @@ static address do_nothing(methodHandle method, TRAPS) { // dummy action for inactive invocation counters - method->invocation_counter()->set_carry(); - method->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); + MethodCounters* mcs = method->method_counters(); + assert(mcs != NULL, ""); + mcs->invocation_counter()->set_carry(); + mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); return NULL; } static address do_decay(methodHandle method, TRAPS) { // decay invocation counters so compilation gets delayed - method->invocation_counter()->decay(); + MethodCounters* mcs = method->method_counters(); + assert(mcs != NULL, ""); + mcs->invocation_counter()->decay(); return NULL; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/interpreter/linkResolver.cpp --- a/src/share/vm/interpreter/linkResolver.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/interpreter/linkResolver.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1014,13 +1014,28 @@ resolved_method->name(), resolved_method->signature())); } - // check if public - if (!sel_method->is_public()) { - ResourceMark rm(THREAD); - THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), - Method::name_and_sig_as_C_string(recv_klass(), - sel_method->name(), - sel_method->signature())); + // check access + if (sel_method->method_holder()->is_interface()) { + // Method holder is an interface. Throw Illegal Access Error if sel_method + // is neither public nor private. + if (!(sel_method->is_public() || sel_method->is_private())) { + ResourceMark rm(THREAD); + THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), + Method::name_and_sig_as_C_string(recv_klass(), + sel_method->name(), + sel_method->signature())); + } + } + else { + // Method holder is a class. Throw Illegal Access Error if sel_method + // is not public. + if (!sel_method->is_public()) { + ResourceMark rm(THREAD); + THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), + Method::name_and_sig_as_C_string(recv_klass(), + sel_method->name(), + sel_method->signature())); + } } // check if abstract if (check_null_and_abstract && sel_method->is_abstract()) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/allocation.cpp --- a/src/share/vm/memory/allocation.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/allocation.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,16 +49,22 @@ # include "os_bsd.inline.hpp" #endif -void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; -void StackObj::operator delete(void* p) { ShouldNotCallThis(); }; -void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; -void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }; +void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; } +void StackObj::operator delete(void* p) { ShouldNotCallThis(); } +void* StackObj::operator new [](size_t size) { ShouldNotCallThis(); return 0; } +void StackObj::operator delete [](void* p) { ShouldNotCallThis(); } + +void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; } +void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); } +void* _ValueObj::operator new [](size_t size) { ShouldNotCallThis(); return 0; } +void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); } void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, - size_t word_size, bool read_only, TRAPS) { + size_t word_size, bool read_only, + MetaspaceObj::Type type, TRAPS) { // Klass has it's own operator new return Metaspace::allocate(loader_data, word_size, read_only, - Metaspace::NonClassType, CHECK_NULL); + type, CHECK_NULL); } bool MetaspaceObj::is_shared() const { @@ -81,7 +87,6 @@ st->print(" {"INTPTR_FORMAT"}", this); } - void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) { address res; switch (type) { @@ -99,6 +104,10 @@ return res; } +void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) { + return (address) operator new(size, type, flags); +} + void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, allocation_type type, MEMFLAGS flags) { //should only call this with std::nothrow, use other operator new() otherwise @@ -118,6 +127,10 @@ return res; } +void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant, + allocation_type type, MEMFLAGS flags) { + return (address)operator new(size, nothrow_constant, type, flags); +} void ResourceObj::operator delete(void* p) { assert(((ResourceObj *)p)->allocated_on_C_heap(), @@ -126,6 +139,10 @@ FreeHeap(p); } +void ResourceObj::operator delete [](void* p) { + operator delete(p); +} + #ifdef ASSERT void ResourceObj::set_allocation_type(address res, allocation_type type) { // Set allocation type in the resource object @@ -215,8 +232,6 @@ tty->print_cr("Heap free " INTPTR_FORMAT, p); } -bool warn_new_operator = false; // see vm_main - //-------------------------------------------------------------------------------------- // ChunkPool implementation @@ -259,7 +274,7 @@ } if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); if (p == NULL) - vm_exit_out_of_memory(bytes, "ChunkPool::allocate"); + vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate"); return p; } @@ -360,7 +375,7 @@ void* Chunk::operator new(size_t requested_size, size_t length) { // requested_size is equal to sizeof(Chunk) but in order for the arena // allocations to come out aligned as expected the size must be aligned - // to expected arean alignment. + // to expected arena alignment. // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it. assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); size_t bytes = ARENA_ALIGN(requested_size) + length; @@ -371,7 +386,7 @@ default: { void *p = os::malloc(bytes, mtChunk, CALLER_PC); if (p == NULL) - vm_exit_out_of_memory(bytes, "Chunk::new"); + vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new"); return p; } } @@ -531,7 +546,7 @@ } void Arena::signal_out_of_memory(size_t sz, const char* whence) const { - vm_exit_out_of_memory(sz, whence); + vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence); } // Grow a new Chunk @@ -669,19 +684,40 @@ // a memory leak. Use CHeapObj as the base class of such objects to make it explicit // that they're allocated on the C heap. // Commented out in product version to avoid conflicts with third-party C++ native code. -// %% note this is causing a problem on solaris debug build. the global -// new is being called from jdk source and causing data corruption. -// src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew -// define CATCH_OPERATOR_NEW_USAGE if you want to use this. -#ifdef CATCH_OPERATOR_NEW_USAGE +// On certain platforms, such as Mac OS X (Darwin), in debug version, new is being called +// from jdk source and causing data corruption. Such as +// Java_sun_security_ec_ECKeyPairGenerator_generateECKeyPair +// define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed. +// +#ifndef ALLOW_OPERATOR_NEW_USAGE void* operator new(size_t size){ - static bool warned = false; - if (!warned && warn_new_operator) - warning("should not call global (default) operator new"); - warned = true; - return (void *) AllocateHeap(size, "global operator new"); + assert(false, "Should not call global operator new"); + return 0; +} + +void* operator new [](size_t size){ + assert(false, "Should not call global operator new[]"); + return 0; +} + +void* operator new(size_t size, const std::nothrow_t& nothrow_constant){ + assert(false, "Should not call global operator new"); + return 0; } -#endif + +void* operator new [](size_t size, std::nothrow_t& nothrow_constant){ + assert(false, "Should not call global operator new[]"); + return 0; +} + +void operator delete(void* p) { + assert(false, "Should not call global delete"); +} + +void operator delete [](void* p) { + assert(false, "Should not call global delete []"); +} +#endif // ALLOW_OPERATOR_NEW_USAGE void AllocatedObj::print() const { print_on(tty); } void AllocatedObj::print_value() const { print_value_on(tty); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/allocation.hpp --- a/src/share/vm/memory/allocation.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/allocation.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -86,12 +86,23 @@ // subclasses. // // The following macros and function should be used to allocate memory -// directly in the resource area or in the C-heap: +// directly in the resource area or in the C-heap, The _OBJ variants +// of the NEW/FREE_C_HEAP macros are used for alloc/dealloc simple +// objects which are not inherited from CHeapObj, note constructor and +// destructor are not called. The preferable way to allocate objects +// is using the new operator. // -// NEW_RESOURCE_ARRAY(type,size) +// WARNING: The array variant must only be used for a homogenous array +// where all objects are of the exact type specified. If subtypes are +// stored in the array then must pay attention to calling destructors +// at needed. +// +// NEW_RESOURCE_ARRAY(type, size) // NEW_RESOURCE_OBJ(type) -// NEW_C_HEAP_ARRAY(type,size) -// NEW_C_HEAP_OBJ(type) +// NEW_C_HEAP_ARRAY(type, size) +// NEW_C_HEAP_OBJ(type, memflags) +// FREE_C_HEAP_ARRAY(type, old, memflags) +// FREE_C_HEAP_OBJ(objname, type, memflags) // char* AllocateHeap(size_t size, const char* name); // void FreeHeap(void* p); // @@ -146,7 +157,8 @@ mtJavaHeap = 0x0C00, // Java heap mtClassShared = 0x0D00, // class data sharing mtTest = 0x0E00, // Test type for verifying NMT - mt_number_of_types = 0x000E, // number of memory types (mtDontTrack + mtTracing = 0x0F00, // memory used for Tracing + mt_number_of_types = 0x000F, // number of memory types (mtDontTrack // is not included as validate type) mtDontTrack = 0x0F00, // memory we do not or cannot track mt_masks = 0x7F00, @@ -195,8 +207,11 @@ _NOINLINE_ void* operator new(size_t size, address caller_pc = 0); _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant, address caller_pc = 0); - + _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0); + _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, + address caller_pc = 0); void operator delete(void* p); + void operator delete [] (void* p); }; // Base class for objects allocated on the stack only. @@ -206,6 +221,8 @@ private: void* operator new(size_t size); void operator delete(void* p); + void* operator new [](size_t size); + void operator delete [](void* p); }; // Base class for objects used as value objects. @@ -229,7 +246,9 @@ class _ValueObj { private: void* operator new(size_t size); - void operator delete(void* p); + void operator delete(void* p); + void* operator new [](size_t size); + void operator delete [](void* p); }; @@ -250,8 +269,55 @@ bool is_shared() const; void print_address_on(outputStream* st) const; // nonvirtual address printing +#define METASPACE_OBJ_TYPES_DO(f) \ + f(Unknown) \ + f(Class) \ + f(Symbol) \ + f(TypeArrayU1) \ + f(TypeArrayU2) \ + f(TypeArrayU4) \ + f(TypeArrayU8) \ + f(TypeArrayOther) \ + f(Method) \ + f(ConstMethod) \ + f(MethodData) \ + f(ConstantPool) \ + f(ConstantPoolCache) \ + f(Annotation) \ + f(MethodCounters) + +#define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type, +#define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name; + + enum Type { + // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc + METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) + _number_of_types + }; + + static const char * type_name(Type type) { + switch(type) { + METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) + default: + ShouldNotReachHere(); + return NULL; + } + } + + static MetaspaceObj::Type array_type(size_t elem_size) { + switch (elem_size) { + case 1: return TypeArrayU1Type; + case 2: return TypeArrayU2Type; + case 4: return TypeArrayU4Type; + case 8: return TypeArrayU8Type; + default: + return TypeArrayOtherType; + } + } + void* operator new(size_t size, ClassLoaderData* loader_data, - size_t word_size, bool read_only, Thread* thread); + size_t word_size, bool read_only, + Type type, Thread* thread); // can't use TRAPS from this header file. void operator delete(void* p) { ShouldNotCallThis(); } }; @@ -510,13 +576,24 @@ public: void* operator new(size_t size, allocation_type type, MEMFLAGS flags); + void* operator new [](size_t size, allocation_type type, MEMFLAGS flags); void* operator new(size_t size, const std::nothrow_t& nothrow_constant, allocation_type type, MEMFLAGS flags); + void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, + allocation_type type, MEMFLAGS flags); + void* operator new(size_t size, Arena *arena) { address res = (address)arena->Amalloc(size); DEBUG_ONLY(set_allocation_type(res, ARENA);) return res; } + + void* operator new [](size_t size, Arena *arena) { + address res = (address)arena->Amalloc(size); + DEBUG_ONLY(set_allocation_type(res, ARENA);) + return res; + } + void* operator new(size_t size) { address res = (address)resource_allocate_bytes(size); DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) @@ -529,7 +606,20 @@ return res; } + void* operator new [](size_t size) { + address res = (address)resource_allocate_bytes(size); + DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) + return res; + } + + void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) { + address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL); + DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);) + return res; + } + void operator delete(void* p); + void operator delete [](void* p); }; // One of the following macros must be used when allocating an array @@ -539,6 +629,9 @@ #define NEW_RESOURCE_ARRAY(type, size)\ (type*) resource_allocate_bytes((size) * sizeof(type)) +#define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\ + (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL) + #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\ (type*) resource_allocate_bytes(thread, (size) * sizeof(type)) @@ -560,24 +653,25 @@ #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\ (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags)) -#define FREE_C_HEAP_ARRAY(type,old,memflags) \ +#define FREE_C_HEAP_ARRAY(type, old, memflags) \ FreeHeap((char*)(old), memflags) -#define NEW_C_HEAP_OBJ(type, memflags)\ - NEW_C_HEAP_ARRAY(type, 1, memflags) - - #define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\ (type*) (AllocateHeap((size) * sizeof(type), memflags, pc)) #define REALLOC_C_HEAP_ARRAY2(type, old, size, memflags, pc)\ (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, pc)) -#define NEW_C_HEAP_OBJ2(type, memflags, pc)\ - NEW_C_HEAP_ARRAY2(type, 1, memflags, pc) +#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail) \ + (type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail) +// allocate type in heap without calling ctor +#define NEW_C_HEAP_OBJ(type, memflags)\ + NEW_C_HEAP_ARRAY(type, 1, memflags) -extern bool warn_new_operator; +// deallocate obj of type in heap without calling dtor +#define FREE_C_HEAP_OBJ(objname, memflags)\ + FreeHeap((char*)objname, memflags); // for statistics #ifndef PRODUCT diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/allocation.inline.hpp --- a/src/share/vm/memory/allocation.inline.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/allocation.inline.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -58,7 +58,9 @@ #ifdef ASSERT if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p); #endif - if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) vm_exit_out_of_memory(size, "AllocateHeap"); + if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { + vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap"); + } return p; } @@ -68,7 +70,9 @@ #ifdef ASSERT if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p); #endif - if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) vm_exit_out_of_memory(size, "ReallocateHeap"); + if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { + vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap"); + } return p; } @@ -82,30 +86,39 @@ template void* CHeapObj::operator new(size_t size, address caller_pc){ + void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC)); #ifdef ASSERT - void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC)); if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p); +#endif return p; -#else - return (void *) AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC)); -#endif } template void* CHeapObj::operator new (size_t size, const std::nothrow_t& nothrow_constant, address caller_pc) { -#ifdef ASSERT void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC), AllocFailStrategy::RETURN_NULL); +#ifdef ASSERT if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p); +#endif return p; -#else - return (void *) AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC), - AllocFailStrategy::RETURN_NULL); -#endif +} + +template void* CHeapObj::operator new [](size_t size, + address caller_pc){ + return CHeapObj::operator new(size, caller_pc); +} + +template void* CHeapObj::operator new [](size_t size, + const std::nothrow_t& nothrow_constant, address caller_pc) { + return CHeapObj::operator new(size, nothrow_constant, caller_pc); } template void CHeapObj::operator delete(void* p){ - FreeHeap(p, F); + FreeHeap(p, F); +} + +template void CHeapObj::operator delete [](void* p){ + FreeHeap(p, F); } template @@ -128,14 +141,14 @@ int alignment = os::vm_allocation_granularity(); _size = align_size_up(_size, alignment); - _addr = os::reserve_memory(_size, NULL, alignment); + _addr = os::reserve_memory(_size, NULL, alignment, F); if (_addr == NULL) { - vm_exit_out_of_memory(_size, "Allocator (reserve)"); + vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)"); } bool success = os::commit_memory(_addr, _size, false /* executable */); if (!success) { - vm_exit_out_of_memory(_size, "Allocator (commit)"); + vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (commit)"); } return (E*)_addr; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/blockOffsetTable.cpp --- a/src/share/vm/memory/blockOffsetTable.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/blockOffsetTable.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -80,7 +80,7 @@ assert(delta > 0, "just checking"); if (!_vs.expand_by(delta)) { // Do better than this for Merlin - vm_exit_out_of_memory(delta, "offset table expansion"); + vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion"); } assert(_vs.high() == high + delta, "invalid expansion"); } else { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/cardTableModRefBS.cpp --- a/src/share/vm/memory/cardTableModRefBS.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/cardTableModRefBS.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -80,15 +80,11 @@ _covered = new MemRegion[max_covered_regions]; _committed = new MemRegion[max_covered_regions]; - if (_covered == NULL || _committed == NULL) + if (_covered == NULL || _committed == NULL) { vm_exit_during_initialization("couldn't alloc card table covered region set."); - int i; - for (i = 0; i < max_covered_regions; i++) { - _covered[i].set_word_size(0); - _committed[i].set_word_size(0); } + _cur_covered_regions = 0; - const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : MAX2(_page_size, (size_t) os::vm_allocation_granularity()); ReservedSpace heap_rs(_byte_map_size, rs_align, false); @@ -116,7 +112,7 @@ _guard_region = MemRegion((HeapWord*)guard_page, _page_size); if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) { // Do better than this for Merlin - vm_exit_out_of_memory(_page_size, "card table last card"); + vm_exit_out_of_memory(_page_size, OOM_MMAP_ERROR, "card table last card"); } *guard_card = last_card; @@ -134,7 +130,7 @@ || _lowest_non_clean_base_chunk_index == NULL || _last_LNC_resizing_collection == NULL) vm_exit_during_initialization("couldn't allocate an LNC array."); - for (i = 0; i < max_covered_regions; i++) { + for (int i = 0; i < max_covered_regions; i++) { _lowest_non_clean[i] = NULL; _lowest_non_clean_chunk_size[i] = 0; _last_LNC_resizing_collection[i] = -1; @@ -153,6 +149,33 @@ } } +CardTableModRefBS::~CardTableModRefBS() { + if (_covered) { + delete[] _covered; + _covered = NULL; + } + if (_committed) { + delete[] _committed; + _committed = NULL; + } + if (_lowest_non_clean) { + FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean, mtGC); + _lowest_non_clean = NULL; + } + if (_lowest_non_clean_chunk_size) { + FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size, mtGC); + _lowest_non_clean_chunk_size = NULL; + } + if (_lowest_non_clean_base_chunk_index) { + FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index, mtGC); + _lowest_non_clean_base_chunk_index = NULL; + } + if (_last_LNC_resizing_collection) { + FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection, mtGC); + _last_LNC_resizing_collection = NULL; + } +} + int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) { int i; for (i = 0; i < _cur_covered_regions; i++) { @@ -292,7 +315,7 @@ if (!os::commit_memory((char*)new_committed.start(), new_committed.byte_size(), _page_size)) { // Do better than this for Merlin - vm_exit_out_of_memory(new_committed.byte_size(), + vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR, "card table expansion"); } // Use new_end_aligned (as opposed to new_end_for_commit) because diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/cardTableModRefBS.hpp --- a/src/share/vm/memory/cardTableModRefBS.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/cardTableModRefBS.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -280,6 +280,7 @@ } CardTableModRefBS(MemRegion whole_heap, int max_covered_regions); + ~CardTableModRefBS(); // *** Barrier set functions. diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/cardTableRS.cpp --- a/src/share/vm/memory/cardTableRS.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/cardTableRS.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,9 +54,10 @@ _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions); #endif set_bs(_ct_bs); - _last_cur_val_in_gen = new jbyte[GenCollectedHeap::max_gens + 1]; + _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1, + mtGC, 0, AllocFailStrategy::RETURN_NULL); if (_last_cur_val_in_gen == NULL) { - vm_exit_during_initialization("Could not last_cur_val_in_gen array."); + vm_exit_during_initialization("Could not create last_cur_val_in_gen array."); } for (int i = 0; i < GenCollectedHeap::max_gens + 1; i++) { _last_cur_val_in_gen[i] = clean_card_val(); @@ -64,6 +65,16 @@ _ct_bs->set_CTRS(this); } +CardTableRS::~CardTableRS() { + if (_ct_bs) { + delete _ct_bs; + _ct_bs = NULL; + } + if (_last_cur_val_in_gen) { + FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen, mtInternal); + } +} + void CardTableRS::resize_covered_region(MemRegion new_region) { _ct_bs->resize_covered_region(new_region); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/cardTableRS.hpp --- a/src/share/vm/memory/cardTableRS.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/cardTableRS.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -102,6 +102,7 @@ public: CardTableRS(MemRegion whole_heap, int max_covered_regions); + ~CardTableRS(); // *** GenRemSet functions. GenRemSet::Name rs_kind() { return GenRemSet::CardTable; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/collectorPolicy.cpp --- a/src/share/vm/memory/collectorPolicy.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/collectorPolicy.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -48,6 +48,17 @@ // CollectorPolicy methods. void CollectorPolicy::initialize_flags() { + assert(max_alignment() >= min_alignment(), + err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT, + max_alignment(), min_alignment())); + assert(max_alignment() % min_alignment() == 0, + err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT, + max_alignment(), min_alignment())); + + if (MaxHeapSize < InitialHeapSize) { + vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); + } + if (MetaspaceSize > MaxMetaspaceSize) { MaxMetaspaceSize = MetaspaceSize; } @@ -71,21 +82,9 @@ } void CollectorPolicy::initialize_size_info() { - // User inputs from -mx and ms are aligned - set_initial_heap_byte_size(InitialHeapSize); - if (initial_heap_byte_size() == 0) { - set_initial_heap_byte_size(NewSize + OldSize); - } - set_initial_heap_byte_size(align_size_up(_initial_heap_byte_size, - min_alignment())); - - set_min_heap_byte_size(Arguments::min_heap_size()); - if (min_heap_byte_size() == 0) { - set_min_heap_byte_size(NewSize + OldSize); - } - set_min_heap_byte_size(align_size_up(_min_heap_byte_size, - min_alignment())); - + // User inputs from -mx and ms must be aligned + set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment())); + set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment())); set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment())); // Check heap parameter properties @@ -201,9 +200,6 @@ // All sizes must be multiples of the generation granularity. set_min_alignment((uintx) Generation::GenGrain); set_max_alignment(compute_max_alignment()); - assert(max_alignment() >= min_alignment() && - max_alignment() % min_alignment() == 0, - "invalid alignment constraints"); CollectorPolicy::initialize_flags(); @@ -233,9 +229,6 @@ GenCollectorPolicy::initialize_flags(); OldSize = align_size_down(OldSize, min_alignment()); - if (NewSize + OldSize > MaxHeapSize) { - MaxHeapSize = NewSize + OldSize; - } if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) { // NewRatio will be used later to set the young generation size so we use @@ -250,6 +243,48 @@ } MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); + // adjust max heap size if necessary + if (NewSize + OldSize > MaxHeapSize) { + if (FLAG_IS_CMDLINE(MaxHeapSize)) { + // somebody set a maximum heap size with the intention that we should not + // exceed it. Adjust New/OldSize as necessary. + uintx calculated_size = NewSize + OldSize; + double shrink_factor = (double) MaxHeapSize / calculated_size; + // align + NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); + // OldSize is already aligned because above we aligned MaxHeapSize to + // max_alignment(), and we just made sure that NewSize is aligned to + // min_alignment(). In initialize_flags() we verified that max_alignment() + // is a multiple of min_alignment(). + OldSize = MaxHeapSize - NewSize; + } else { + MaxHeapSize = NewSize + OldSize; + } + } + // need to do this again + MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); + + // adjust max heap size if necessary + if (NewSize + OldSize > MaxHeapSize) { + if (FLAG_IS_CMDLINE(MaxHeapSize)) { + // somebody set a maximum heap size with the intention that we should not + // exceed it. Adjust New/OldSize as necessary. + uintx calculated_size = NewSize + OldSize; + double shrink_factor = (double) MaxHeapSize / calculated_size; + // align + NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); + // OldSize is already aligned because above we aligned MaxHeapSize to + // max_alignment(), and we just made sure that NewSize is aligned to + // min_alignment(). In initialize_flags() we verified that max_alignment() + // is a multiple of min_alignment(). + OldSize = MaxHeapSize - NewSize; + } else { + MaxHeapSize = NewSize + OldSize; + } + } + // need to do this again + MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); + always_do_update_barrier = UseConcMarkSweepGC; // Check validity of heap flags @@ -717,7 +752,7 @@ // free memory should be here, especially if they are expensive. If this // attempt fails, an OOM exception will be thrown. { - IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted + UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted gch->do_collection(true /* full */, true /* clear_all_soft_refs */, @@ -842,7 +877,7 @@ } void MarkSweepPolicy::initialize_generations() { - _generations = new GenerationSpecPtr[number_of_generations()]; + _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL); if (_generations == NULL) vm_exit_during_initialization("Unable to allocate gen spec"); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/defNewGeneration.cpp --- a/src/share/vm/memory/defNewGeneration.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/defNewGeneration.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,10 @@ #include "precompiled.hpp" #include "gc_implementation/shared/collectorCounters.hpp" #include "gc_implementation/shared/gcPolicyCounters.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "memory/defNewGeneration.inline.hpp" #include "memory/gcLocker.inline.hpp" @@ -50,9 +54,6 @@ DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { assert(g->level() == 0, "Optimized for youngest gen."); } -void DefNewGeneration::IsAliveClosure::do_object(oop p) { - assert(false, "Do not call."); -} bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); } @@ -226,6 +227,8 @@ _next_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; + + _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); } void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, @@ -561,12 +564,18 @@ size_t size, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); + GenCollectedHeap* gch = GenCollectedHeap::heap(); + + _gc_timer->register_gc_start(os::elapsed_counter()); + DefNewTracer gc_tracer; + gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); + _next_gen = gch->next_gen(this); assert(_next_gen != NULL, "This must be the youngest gen, and not the only gen"); - // If the next generation is too full to accomodate promotion + // If the next generation is too full to accommodate promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { @@ -580,10 +589,12 @@ init_assuming_no_promotion_failure(); - TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty); + GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); + gch->trace_heap_before_gc(&gc_tracer); + SpecializationStats::clear(); // These can be shared for all code paths @@ -634,9 +645,12 @@ FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ReferenceProcessor* rp = ref_processor(); rp->setup_policy(clear_all_soft_refs); + const ReferenceProcessorStats& stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, - NULL); - if (!promotion_failed()) { + NULL, _gc_timer); + gc_tracer.report_gc_reference_stats(stats); + + if (!_promotion_failed) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); from()->clear(SpaceDecorator::Mangle); @@ -683,6 +697,7 @@ // Inform the next generation that a promotion failure occurred. _next_gen->promotion_failure_occurred(); + gc_tracer.report_promotion_failed(_promotion_failed_info); // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) @@ -692,11 +707,18 @@ to()->set_concurrent_iteration_safe_limit(to()->top()); SpecializationStats::print(); - // We need to use a monotonically non-deccreasing time in ms + // We need to use a monotonically non-decreasing time in ms // or we will see time-warp warnings and os::javaTimeMillis() // does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; update_time_of_last_gc(now); + + gch->trace_heap_after_gc(&gc_tracer); + gc_tracer.report_tenuring_threshold(tenuring_threshold()); + + _gc_timer->register_gc_end(os::elapsed_counter()); + + gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); } class RemoveForwardPointerClosure: public ObjectClosure { @@ -708,6 +730,7 @@ void DefNewGeneration::init_assuming_no_promotion_failure() { _promotion_failed = false; + _promotion_failed_info.reset(); from()->set_next_compaction_space(NULL); } @@ -729,7 +752,7 @@ } void DefNewGeneration::preserve_mark(oop obj, markOop m) { - assert(promotion_failed() && m->must_be_preserved_for_promotion_failure(obj), + assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj), "Oversaving!"); _objs_with_preserved_marks.push(obj); _preserved_marks_of_objs.push(m); @@ -747,6 +770,7 @@ old->size()); } _promotion_failed = true; + _promotion_failed_info.register_copy_failure(old->size()); preserve_mark_if_necessary(old, old->mark()); // forward to self old->forward_to(old); @@ -965,6 +989,10 @@ from()->set_top_for_allocations(); } +void DefNewGeneration::ref_processor_init() { + Generation::ref_processor_init(); +} + void DefNewGeneration::update_counters() { if (UsePerfData) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/defNewGeneration.hpp --- a/src/share/vm/memory/defNewGeneration.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/defNewGeneration.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,12 +28,14 @@ #include "gc_implementation/shared/ageTable.hpp" #include "gc_implementation/shared/cSpaceCounters.hpp" #include "gc_implementation/shared/generationCounters.hpp" +#include "gc_implementation/shared/copyFailedInfo.hpp" #include "memory/generation.inline.hpp" #include "utilities/stack.hpp" class EdenSpace; class ContiguousSpace; class ScanClosure; +class STWGCTimer; // DefNewGeneration is a young generation containing eden, from- and // to-space. @@ -46,15 +48,17 @@ uint _tenuring_threshold; // Tenuring threshold for next collection. ageTable _age_table; // Size of object to pretenure in words; command line provides bytes - size_t _pretenure_size_threshold_words; + size_t _pretenure_size_threshold_words; ageTable* age_table() { return &_age_table; } + // Initialize state to optimistically assume no promotion failure will // happen. void init_assuming_no_promotion_failure(); // True iff a promotion has failed in the current collection. bool _promotion_failed; bool promotion_failed() { return _promotion_failed; } + PromotionFailedInfo _promotion_failed_info; // Handling promotion failure. A young generation collection // can fail if a live object cannot be copied out of its @@ -132,6 +136,8 @@ ContiguousSpace* _from_space; ContiguousSpace* _to_space; + STWGCTimer* _gc_timer; + enum SomeProtectedConstants { // Generations are GenGrain-aligned and have size that are multiples of // GenGrain. @@ -150,7 +156,6 @@ Generation* _g; public: IsAliveClosure(Generation* g); - void do_object(oop p); bool do_object_b(oop p); }; @@ -204,6 +209,8 @@ DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level, const char* policy="Copy"); + virtual void ref_processor_init(); + virtual Generation::Name kind() { return Generation::DefNew; } // Accessing spaces diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/filemap.cpp --- a/src/share/vm/memory/filemap.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/filemap.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -238,8 +238,8 @@ void FileMapInfo::write_space(int i, Metaspace* space, bool read_only) { align_file_position(); - size_t used = space->used_words(Metaspace::NonClassType) * BytesPerWord; - size_t capacity = space->capacity_words(Metaspace::NonClassType) * BytesPerWord; + size_t used = space->used_bytes_slow(Metaspace::NonClassType); + size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType); struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i]; write_region(i, (char*)space->bottom(), used, capacity, read_only, false); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/freeList.cpp --- a/src/share/vm/memory/freeList.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/freeList.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -55,17 +55,6 @@ } template -FreeList::FreeList(Chunk* fc) : - _head(fc), _tail(fc) -#ifdef ASSERT - , _protecting_lock(NULL) -#endif -{ - _size = fc->size(); - _count = 1; -} - -template void FreeList::link_head(Chunk* v) { assert_proper_lock_protection(); set_head(v); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/freeList.hpp --- a/src/share/vm/memory/freeList.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/freeList.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -80,8 +80,6 @@ // Constructor // Construct a list without any entries. FreeList(); - // Construct a list with "fc" as the first (and lone) entry in the list. - FreeList(Chunk_t* fc); // Do initialization void initialize(); @@ -177,9 +175,6 @@ // found. Return NULL if "fc" is not found. bool verify_chunk_in_free_list(Chunk_t* fc) const; - // Stats verification -// void verify_stats() const { ShouldNotReachHere(); }; - // Printing support static void print_labels_on(outputStream* st, const char* c); void print_on(outputStream* st, const char* c = NULL) const; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/genCollectedHeap.cpp --- a/src/share/vm/memory/genCollectedHeap.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/genCollectedHeap.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "classfile/vmSymbols.hpp" #include "code/icBuffer.hpp" #include "gc_implementation/shared/collectorCounters.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/vmGCOperations.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/filemap.hpp" @@ -377,7 +378,7 @@ ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); - const size_t metadata_prev_used = MetaspaceAux::used_in_bytes(); + const size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); print_heap_before_gc(); @@ -388,7 +389,7 @@ const char* gc_cause_prefix = complete ? "Full GC" : "GC"; gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - TraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, gclog_or_tty); + GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL); gc_prologue(complete); increment_total_collections(complete); @@ -417,10 +418,11 @@ // The full_collections increment was missed above. increment_total_full_collections(); } - pre_full_gc_dump(); // do any pre full gc dumps + pre_full_gc_dump(NULL); // do any pre full gc dumps } // Timer for individual generations. Last argument is false: no CR - TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty); + // FIXME: We should try to start the timing earlier to cover more of the GC pause + GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL); TraceCollectorStats tcs(_gens[i]->counters()); TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause()); @@ -447,8 +449,7 @@ prepare_for_verify(); prepared_for_verification = true; } - gclog_or_tty->print(" VerifyBeforeGC:"); - Universe::verify(); + Universe::verify(" VerifyBeforeGC:"); } COMPILER2_PRESENT(DerivedPointerTable::clear()); @@ -519,8 +520,7 @@ if (VerifyAfterGC && i >= VerifyGCLevel && total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyAfterGC:"); - Universe::verify(); + Universe::verify(" VerifyAfterGC:"); } if (PrintGCDetails) { @@ -536,7 +536,8 @@ complete = complete || (max_level_collected == n_gens() - 1); if (complete) { // We did a "major" collection - post_full_gc_dump(); // do any post full gc dumps + // FIXME: See comment at pre_full_gc_dump call + post_full_gc_dump(NULL); // do any post full gc dumps } if (PrintGCDetails) { @@ -556,6 +557,7 @@ if (complete) { // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); // Resize the metaspace capacity after full collections MetaspaceGC::compute_new_size(); update_full_collections_completed(); @@ -633,9 +635,8 @@ } void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure) { - SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure); + CodeBlobClosure* code_roots) { + SharedHeap::process_weak_roots(root_closure, code_roots); // "Local" "weak" refs for (int i = 0; i < _n_gens; i++) { _gens[i]->ref_processor()->weak_oops_do(root_closure); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/genCollectedHeap.hpp --- a/src/share/vm/memory/genCollectedHeap.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/genCollectedHeap.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -432,8 +432,7 @@ // JNI weak roots, the code cache, system dictionary, symbol table, // string table, and referents of reachable weak refs. void gen_process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure); + CodeBlobClosure* code_roots); // Set the saved marks of generations, if that makes sense. // In particular, if any generation might iterate over the oops diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/genMarkSweep.cpp --- a/src/share/vm/memory/genMarkSweep.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/genMarkSweep.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,10 @@ #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/genCollectedHeap.hpp" #include "memory/genMarkSweep.hpp" @@ -65,7 +69,9 @@ _ref_processor = rp; rp->setup_policy(clear_all_softrefs); - TraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty); + GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); + + gch->trace_heap_before_gc(_gc_tracer); // When collecting the permanent generation Method*s may be moving, // so we either have to flush all bcp data or convert it into bci. @@ -155,6 +161,8 @@ // does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; gch->update_time_of_last_gc(now); + + gch->trace_heap_after_gc(_gc_tracer); } void GenMarkSweep::allocate_stacks() { @@ -192,7 +200,7 @@ void GenMarkSweep::mark_sweep_phase1(int level, bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer); trace(" 1"); GenCollectedHeap* gch = GenCollectedHeap::heap(); @@ -219,27 +227,31 @@ // Process reference objects found during marking { ref_processor()->setup_policy(clear_all_softrefs); - ref_processor()->process_discovered_references( - &is_alive, &keep_alive, &follow_stack_closure, NULL); + const ReferenceProcessorStats& stats = + ref_processor()->process_discovered_references( + &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer); + gc_tracer()->report_gc_reference_stats(stats); } - // Follow system dictionary roots and unload classes + // This is the point where the entire marking should have completed. + assert(_marking_stack.is_empty(), "Marking should have completed"); + + // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&is_alive); - // Follow code cache roots + // Unload nmethods. CodeCache::do_unloading(&is_alive, purged_class); - follow_stack(); // Flush marking stack - // Update subklass/sibling/implementor links of live klasses + // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&is_alive); - assert(_marking_stack.is_empty(), "just drained"); - // Visit interned string tables and delete unmarked oops + // Delete entries for dead interned strings. StringTable::unlink(&is_alive); + // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); - assert(_marking_stack.is_empty(), "stack should be empty by now"); + gc_tracer()->report_object_count_after_gc(&is_alive); } @@ -259,7 +271,7 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); - TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer); trace("2"); gch->prepare_for_compaction(); @@ -276,17 +288,16 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); // Adjust the pointers to reflect the new locations - TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer); trace("3"); // Need new claim bits for the pointer adjustment tracing. ClassLoaderDataGraph::clear_claimed_marks(); - // Because the two closures below are created statically, cannot + // Because the closure below is created statically, we cannot // use OopsInGenClosure constructor which takes a generation, // as the Universe has not been created when the static constructors // are run. - adjust_root_pointer_closure.set_orig_generation(gch->get_gen(level)); adjust_pointer_closure.set_orig_generation(gch->get_gen(level)); gch->gen_process_strong_roots(level, @@ -294,18 +305,17 @@ true, // activate StrongRootsScope false, // not scavenging SharedHeap::SO_AllClasses, - &adjust_root_pointer_closure, + &adjust_pointer_closure, false, // do not walk code - &adjust_root_pointer_closure, + &adjust_pointer_closure, &adjust_klass_closure); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure, /*do_marking=*/ false); - gch->gen_process_weak_roots(&adjust_root_pointer_closure, - &adjust_code_pointer_closure, - &adjust_pointer_closure); + gch->gen_process_weak_roots(&adjust_pointer_closure, + &adjust_code_pointer_closure); adjust_marks(); GenAdjustPointersClosure blk; @@ -333,7 +343,7 @@ // to use a higher index (saved from phase2) when verifying perm_gen. GenCollectedHeap* gch = GenCollectedHeap::heap(); - TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer); trace("4"); GenCompactClosure blk; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/generation.cpp --- a/src/share/vm/memory/generation.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/generation.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,8 @@ */ #include "precompiled.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/allocation.inline.hpp" @@ -624,12 +626,26 @@ bool clear_all_soft_refs, size_t size, bool is_tlab) { + GenCollectedHeap* gch = GenCollectedHeap::heap(); + SpecializationStats::clear(); // Temporarily expand the span of our ref processor, so // refs discovery is over the entire heap, not just this generation ReferenceProcessorSpanMutator - x(ref_processor(), GenCollectedHeap::heap()->reserved_region()); + x(ref_processor(), gch->reserved_region()); + + STWGCTimer* gc_timer = GenMarkSweep::gc_timer(); + gc_timer->register_gc_start(os::elapsed_counter()); + + SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); + gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); + GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs); + + gc_timer->register_gc_end(os::elapsed_counter()); + + gc_tracer->report_gc_end(os::elapsed_counter(), gc_timer->time_partitions()); + SpecializationStats::print(); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/heap.cpp --- a/src/share/vm/memory/heap.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/heap.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -42,7 +42,7 @@ _log2_segment_size = 0; _next_segment = 0; _freelist = NULL; - _free_segments = 0; + _freelist_segments = 0; } @@ -115,8 +115,8 @@ } on_code_mapping(_memory.low(), _memory.committed_size()); - _number_of_committed_segments = number_of_segments(_memory.committed_size()); - _number_of_reserved_segments = number_of_segments(_memory.reserved_size()); + _number_of_committed_segments = size_to_segments(_memory.committed_size()); + _number_of_reserved_segments = size_to_segments(_memory.reserved_size()); assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); // reserve space for _segmap @@ -149,8 +149,8 @@ if (!_memory.expand_by(dm)) return false; on_code_mapping(base, dm); size_t i = _number_of_committed_segments; - _number_of_committed_segments = number_of_segments(_memory.committed_size()); - assert(_number_of_reserved_segments == number_of_segments(_memory.reserved_size()), "number of reserved segments should not change"); + _number_of_committed_segments = size_to_segments(_memory.committed_size()); + assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change"); assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); // expand _segmap space size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size(); @@ -176,33 +176,44 @@ } -void* CodeHeap::allocate(size_t size) { - size_t length = number_of_segments(size + sizeof(HeapBlock)); - assert(length *_segment_size >= sizeof(FreeBlock), "not enough room for FreeList"); +void* CodeHeap::allocate(size_t instance_size, bool is_critical) { + size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock)); + assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList"); // First check if we can satify request from freelist debug_only(verify()); - HeapBlock* block = search_freelist(length); + HeapBlock* block = search_freelist(number_of_segments, is_critical); debug_only(if (VerifyCodeCacheOften) verify()); if (block != NULL) { - assert(block->length() >= length && block->length() < length + CodeCacheMinBlockLength, "sanity check"); + assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check"); assert(!block->free(), "must be marked free"); #ifdef ASSERT - memset((void *)block->allocated_space(), badCodeHeapNewVal, size); + memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size); #endif return block->allocated_space(); } - if (length < CodeCacheMinBlockLength) { - length = CodeCacheMinBlockLength; + // Ensure minimum size for allocation to the heap. + if (number_of_segments < CodeCacheMinBlockLength) { + number_of_segments = CodeCacheMinBlockLength; } - if (_next_segment + length <= _number_of_committed_segments) { - mark_segmap_as_used(_next_segment, _next_segment + length); + + if (!is_critical) { + // Make sure the allocation fits in the unallocated heap without using + // the CodeCacheMimimumFreeSpace that is reserved for critical allocations. + if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) { + // Fail allocation + return NULL; + } + } + + if (_next_segment + number_of_segments <= _number_of_committed_segments) { + mark_segmap_as_used(_next_segment, _next_segment + number_of_segments); HeapBlock* b = block_at(_next_segment); - b->initialize(length); - _next_segment += length; + b->initialize(number_of_segments); + _next_segment += number_of_segments; #ifdef ASSERT - memset((void *)b->allocated_space(), badCodeHeapNewVal, size); + memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size); #endif return b->allocated_space(); } else { @@ -219,7 +230,7 @@ #ifdef ASSERT memset((void *)b->allocated_space(), badCodeHeapFreeVal, - size(b->length()) - sizeof(HeapBlock)); + segments_to_size(b->length()) - sizeof(HeapBlock)); #endif add_to_freelist(b); @@ -299,32 +310,14 @@ } size_t CodeHeap::allocated_capacity() const { - // Start with the committed size in _memory; - size_t l = _memory.committed_size(); - - // Subtract the committed, but unused, segments - l -= size(_number_of_committed_segments - _next_segment); - - // Subtract the size of the freelist - l -= size(_free_segments); - - return l; + // size of used heap - size on freelist + return segments_to_size(_next_segment - _freelist_segments); } -size_t CodeHeap::largest_free_block() const { - // First check unused space excluding free blocks. - size_t free_sz = size(_free_segments); - size_t unused = max_capacity() - allocated_capacity() - free_sz; - if (unused >= free_sz) - return unused; - - // Now check largest free block. - size_t len = 0; - for (FreeBlock* b = _freelist; b != NULL; b = b->link()) { - if (b->length() > len) - len = b->length(); - } - return MAX2(unused, size(len)); +// Returns size of the unallocated heap block +size_t CodeHeap::heap_unallocated_capacity() const { + // Total number of segments - number currently used + return segments_to_size(_number_of_reserved_segments - _next_segment); } // Free list management @@ -365,7 +358,7 @@ assert(b != _freelist, "cannot be removed twice"); // Mark as free and update free space count - _free_segments += b->length(); + _freelist_segments += b->length(); b->set_free(); // First element in list? @@ -400,7 +393,7 @@ // Search freelist for an entry on the list with the best fit // Return NULL if no one was found -FreeBlock* CodeHeap::search_freelist(size_t length) { +FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) { FreeBlock *best_block = NULL; FreeBlock *best_prev = NULL; size_t best_length = 0; @@ -411,6 +404,16 @@ while(cur != NULL) { size_t l = cur->length(); if (l >= length && (best_block == NULL || best_length > l)) { + + // Non critical allocations are not allowed to use the last part of the code heap. + if (!is_critical) { + // Make sure the end of the allocation doesn't cross into the last part of the code heap + if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) { + // the freelist is sorted by address - if one fails, all consecutive will also fail. + break; + } + } + // Remember best block, its previous element, and its length best_block = cur; best_prev = prev; @@ -452,7 +455,7 @@ } best_block->set_used(); - _free_segments -= length; + _freelist_segments -= length; return best_block; } @@ -478,7 +481,7 @@ } // Verify that freelist contains the right amount of free space - // guarantee(len == _free_segments, "wrong freelist"); + // guarantee(len == _freelist_segments, "wrong freelist"); // Verify that the number of free blocks is not out of hand. static int free_block_threshold = 10000; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/heap.hpp --- a/src/share/vm/memory/heap.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/heap.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -91,11 +91,11 @@ size_t _next_segment; FreeBlock* _freelist; - size_t _free_segments; // No. of segments in freelist + size_t _freelist_segments; // No. of segments in freelist // Helper functions - size_t number_of_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; } - size_t size(size_t number_of_segments) const { return number_of_segments << _log2_segment_size; } + size_t size_to_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; } + size_t segments_to_size(size_t number_of_segments) const { return number_of_segments << _log2_segment_size; } size_t segment_for(void* p) const { return ((char*)p - _memory.low()) >> _log2_segment_size; } HeapBlock* block_at(size_t i) const { return (HeapBlock*)(_memory.low() + (i << _log2_segment_size)); } @@ -110,7 +110,7 @@ // Toplevel freelist management void add_to_freelist(HeapBlock *b); - FreeBlock* search_freelist(size_t length); + FreeBlock* search_freelist(size_t length, bool is_critical); // Iteration helpers void* next_free(HeapBlock* b) const; @@ -132,22 +132,19 @@ void clear(); // clears all heap contents // Memory allocation - void* allocate (size_t size); // allocates a block of size or returns NULL + void* allocate (size_t size, bool is_critical); // allocates a block of size or returns NULL void deallocate(void* p); // deallocates a block // Attributes - void* begin() const { return _memory.low (); } - void* end() const { return _memory.high(); } - bool contains(void* p) const { return begin() <= p && p < end(); } - void* find_start(void* p) const; // returns the block containing p or NULL - size_t alignment_unit() const; // alignment of any block - size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit - static size_t header_size(); // returns the header size for each heap block + char* low_boundary() const { return _memory.low_boundary (); } + char* high() const { return _memory.high(); } + char* high_boundary() const { return _memory.high_boundary(); } - // Returns reserved area high and low addresses - char *low_boundary() const { return _memory.low_boundary (); } - char *high() const { return _memory.high(); } - char *high_boundary() const { return _memory.high_boundary(); } + bool contains(const void* p) const { return low_boundary() <= p && p < high(); } + void* find_start(void* p) const; // returns the block containing p or NULL + size_t alignment_unit() const; // alignment of any block + size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit + static size_t header_size(); // returns the header size for each heap block // Iteration @@ -161,8 +158,11 @@ size_t max_capacity() const; size_t allocated_capacity() const; size_t unallocated_capacity() const { return max_capacity() - allocated_capacity(); } - size_t largest_free_block() const; +private: + size_t heap_unallocated_capacity() const; + +public: // Debugging void verify(); void print() PRODUCT_RETURN; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/heapInspection.cpp --- a/src/share/vm/memory/heapInspection.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/heapInspection.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -95,7 +95,7 @@ } elt = elt->next(); } - elt = new KlassInfoEntry(k, list()); + elt = new (std::nothrow) KlassInfoEntry(k, list()); // We may be out of space to allocate the new entry. if (elt != NULL) { set_list(elt); @@ -127,13 +127,15 @@ _table->lookup(k); } -KlassInfoTable::KlassInfoTable(int size, HeapWord* ref, - bool need_class_stats) { +KlassInfoTable::KlassInfoTable(bool need_class_stats) { + _size_of_instances_in_words = 0; _size = 0; - _ref = ref; - _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, size, mtInternal); + _ref = (HeapWord*) Universe::boolArrayKlassObj(); + _buckets = + (KlassInfoBucket*) AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets, + mtInternal, 0, AllocFailStrategy::RETURN_NULL); if (_buckets != NULL) { - _size = size; + _size = _num_buckets; for (int index = 0; index < _size; index++) { _buckets[index].initialize(); } @@ -154,12 +156,12 @@ } } -uint KlassInfoTable::hash(Klass* p) { +uint KlassInfoTable::hash(const Klass* p) { assert(p->is_metadata(), "all klasses are metadata"); return (uint)(((uintptr_t)p - (uintptr_t)_ref) >> 2); } -KlassInfoEntry* KlassInfoTable::lookup(Klass* const k) { +KlassInfoEntry* KlassInfoTable::lookup(Klass* k) { uint idx = hash(k) % _size; assert(_buckets != NULL, "Allocation failure should have been caught"); KlassInfoEntry* e = _buckets[idx].lookup(k); @@ -179,6 +181,7 @@ if (elt != NULL) { elt->set_count(elt->count() + 1); elt->set_words(elt->words() + obj->size()); + _size_of_instances_in_words += obj->size(); return true; } else { return false; @@ -192,14 +195,18 @@ } } +size_t KlassInfoTable::size_of_instances_in_words() const { + return _size_of_instances_in_words; +} + int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) { return (*e1)->compare(*e1,*e2); } -KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit, const char* title, int estimatedCount) : +KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit, const char* title) : _cit(cit), _title(title) { - _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(estimatedCount,true); + _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(_histo_initial_size, true); } KlassInfoHisto::~KlassInfoHisto() { @@ -444,25 +451,37 @@ private: KlassInfoTable* _cit; size_t _missed_count; + BoolObjectClosure* _filter; public: - RecordInstanceClosure(KlassInfoTable* cit) : - _cit(cit), _missed_count(0) {} + RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) : + _cit(cit), _missed_count(0), _filter(filter) {} void do_object(oop obj) { - if (!_cit->record_instance(obj)) { - _missed_count++; + if (should_visit(obj)) { + if (!_cit->record_instance(obj)) { + _missed_count++; + } } } size_t missed_count() { return _missed_count; } + + private: + bool should_visit(oop obj) { + return _filter == NULL || _filter->do_object_b(obj); + } }; -void HeapInspection::heap_inspection(outputStream* st, bool need_prologue) { +size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) { ResourceMark rm; - // Get some random number for ref (the hash key) - HeapWord* ref = (HeapWord*) Universe::boolArrayKlassObj(); - CollectedHeap* heap = Universe::heap(); - bool is_shared_heap = false; + + RecordInstanceClosure ric(cit, filter); + Universe::heap()->object_iterate(&ric); + return ric.missed_count(); +} + +void HeapInspection::heap_inspection(outputStream* st) { + ResourceMark rm; if (_print_help) { for (int c=0; cobject_iterate(&ric); - - // Report if certain classes are not counted because of - // running out of C-heap for the histogram. - size_t missed_count = ric.missed_count(); + size_t missed_count = populate_table(&cit); if (missed_count != 0) { st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT " total instances in data below", missed_count); } + // Sort and print klass instance info const char *title = "\n" " num #instances #bytes class name\n" "----------------------------------------------"; - KlassInfoHisto histo(&cit, title, KlassInfoHisto::histo_initial_size); + KlassInfoHisto histo(&cit, title); HistoClosure hc(&histo); + cit.iterate(&hc); + histo.sort(); histo.print_histo_on(st, _print_class_stats, _csv_format, _columns); } else { st->print_cr("WARNING: Ran out of C-heap; histogram not generated"); } st->flush(); - - if (need_prologue && is_shared_heap) { - SharedHeap* sh = (SharedHeap*)heap; - sh->gc_epilogue(false /* !full */); // release all acquired locks, etc. - } } class FindInstanceClosure : public ObjectClosure { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/heapInspection.hpp --- a/src/share/vm/memory/heapInspection.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/heapInspection.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -26,6 +26,7 @@ #define SHARE_VM_MEMORY_HEAPINSPECTION_HPP #include "memory/allocation.inline.hpp" +#include "memory/klassInfoClosure.hpp" #include "oops/oop.inline.hpp" #include "oops/annotations.hpp" #include "utilities/macros.hpp" @@ -189,33 +190,27 @@ KlassInfoEntry(Klass* k, KlassInfoEntry* next) : _klass(k), _instance_count(0), _instance_words(0), _next(next), _index(-1) {} - KlassInfoEntry* next() { return _next; } - bool is_equal(Klass* k) { return k == _klass; } - Klass* klass() { return _klass; } - long count() { return _instance_count; } + KlassInfoEntry* next() const { return _next; } + bool is_equal(const Klass* k) { return k == _klass; } + Klass* klass() const { return _klass; } + long count() const { return _instance_count; } void set_count(long ct) { _instance_count = ct; } - size_t words() { return _instance_words; } + size_t words() const { return _instance_words; } void set_words(size_t wds) { _instance_words = wds; } void set_index(long index) { _index = index; } - long index() { return _index; } + long index() const { return _index; } int compare(KlassInfoEntry* e1, KlassInfoEntry* e2); void print_on(outputStream* st) const; const char* name() const; }; -class KlassInfoClosure: public StackObj { - public: - // Called for each KlassInfoEntry. - virtual void do_cinfo(KlassInfoEntry* cie) = 0; -}; - class KlassInfoBucket: public CHeapObj { private: KlassInfoEntry* _list; KlassInfoEntry* list() { return _list; } void set_list(KlassInfoEntry* l) { _list = l; } public: - KlassInfoEntry* lookup(Klass* const k); + KlassInfoEntry* lookup(Klass* k); void initialize() { _list = NULL; } void empty(); void iterate(KlassInfoClosure* cic); @@ -224,6 +219,8 @@ class KlassInfoTable: public StackObj { private: int _size; + static const int _num_buckets = 20011; + size_t _size_of_instances_in_words; // An aligned reference address (typically the least // address in the perm gen) used for hashing klass @@ -231,8 +228,8 @@ HeapWord* _ref; KlassInfoBucket* _buckets; - uint hash(Klass* p); - KlassInfoEntry* lookup(Klass* const k); // allocates if not found! + uint hash(const Klass* p); + KlassInfoEntry* lookup(Klass* k); // allocates if not found! class AllClassesFinder : public KlassClosure { KlassInfoTable *_table; @@ -242,21 +239,19 @@ }; public: - // Table size - enum { - cit_size = 20011 - }; - KlassInfoTable(int size, HeapWord* ref, bool need_class_stats); + KlassInfoTable(bool need_class_stats); ~KlassInfoTable(); bool record_instance(const oop obj); void iterate(KlassInfoClosure* cic); bool allocation_failed() { return _buckets == NULL; } + size_t size_of_instances_in_words() const; friend class KlassInfoHisto; }; class KlassInfoHisto : public StackObj { private: + static const int _histo_initial_size = 1000; KlassInfoTable *_cit; GrowableArray* _elements; GrowableArray* elements() const { return _elements; } @@ -334,11 +329,7 @@ } public: - enum { - histo_initial_size = 1000 - }; - KlassInfoHisto(KlassInfoTable* cit, const char* title, - int estimatedCount); + KlassInfoHisto(KlassInfoTable* cit, const char* title); ~KlassInfoHisto(); void add(KlassInfoEntry* cie); void print_histo_on(outputStream* st, bool print_class_stats, bool csv_format, const char *columns); @@ -347,6 +338,11 @@ #endif // INCLUDE_SERVICES +// These declarations are needed since teh declaration of KlassInfoTable and +// KlassInfoClosure are guarded by #if INLCUDE_SERVICES +class KlassInfoTable; +class KlassInfoClosure; + class HeapInspection : public StackObj { bool _csv_format; // "comma separated values" format for spreadsheet. bool _print_help; @@ -357,8 +353,11 @@ bool print_class_stats, const char *columns) : _csv_format(csv_format), _print_help(print_help), _print_class_stats(print_class_stats), _columns(columns) {} - void heap_inspection(outputStream* st, bool need_prologue) NOT_SERVICES_RETURN; + void heap_inspection(outputStream* st) NOT_SERVICES_RETURN; + size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN; static void find_instances_at_safepoint(Klass* k, GrowableArray* result) NOT_SERVICES_RETURN; + private: + void iterate_over_heap(KlassInfoTable* cit, BoolObjectClosure* filter = NULL); }; #endif // SHARE_VM_MEMORY_HEAPINSPECTION_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/iterator.hpp --- a/src/share/vm/memory/iterator.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/iterator.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -158,7 +158,7 @@ }; -class BoolObjectClosure : public ObjectClosure { +class BoolObjectClosure : public Closure { public: virtual bool do_object_b(oop obj) = 0; }; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/klassInfoClosure.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/memory/klassInfoClosure.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_KLASSINFOCLOSURE_HPP +#define SHARE_VM_MEMORY_KLASSINFOCLOSURE_HPP + +class KlassInfoEntry; + +class KlassInfoClosure : public StackObj { + public: + // Called for each KlassInfoEntry. + virtual void do_cinfo(KlassInfoEntry* cie) = 0; +}; + +#endif // SHARE_VM_MEMORY_KLASSINFOCLOSURE_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/memRegion.cpp --- a/src/share/vm/memory/memRegion.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/memRegion.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -23,6 +23,8 @@ */ #include "precompiled.hpp" +#include "memory/allocation.hpp" +#include "memory/allocation.inline.hpp" #include "memory/memRegion.hpp" #include "runtime/globals.hpp" @@ -99,3 +101,19 @@ ShouldNotReachHere(); return MemRegion(); } + +void* MemRegion::operator new(size_t size) { + return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL); +} + +void* MemRegion::operator new [](size_t size) { + return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL); +} +void MemRegion::operator delete(void* p) { + FreeHeap(p, mtGC); +} + +void MemRegion::operator delete [](void* p) { + FreeHeap(p, mtGC); +} + diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/memRegion.hpp --- a/src/share/vm/memory/memRegion.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/memRegion.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -34,7 +34,9 @@ // Note that MemRegions are passed by value, not by reference. // The intent is that they remain very small and contain no -// objects. +// objects. _ValueObj should never be allocated in heap but we do +// create MemRegions (in CardTableModRefBS) in heap so operator +// new and operator new [] added for this special case. class MetaWord; @@ -92,6 +94,10 @@ size_t word_size() const { return _word_size; } bool is_empty() const { return word_size() == 0; } + void* operator new(size_t size); + void* operator new [](size_t size); + void operator delete(void* p); + void operator delete [](void* p); }; // For iteration over MemRegion's. diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/metachunk.cpp --- a/src/share/vm/memory/metachunk.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/metachunk.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -28,6 +28,7 @@ #include "utilities/copy.hpp" #include "utilities/debug.hpp" +class VirtualSpaceNode; // // Future modification // @@ -45,27 +46,30 @@ // Metachunk methods -Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) { - // Set bottom, top, and end. Allow space for the Metachunk itself - Metachunk* chunk = (Metachunk*) ptr; - - MetaWord* chunk_bottom = ptr + _overhead; - chunk->set_bottom(ptr); - chunk->set_top(chunk_bottom); - MetaWord* chunk_end = ptr + word_size; - assert(chunk_end > chunk_bottom, "Chunk must be too small"); - chunk->set_end(chunk_end); - chunk->set_next(NULL); - chunk->set_prev(NULL); - chunk->set_word_size(word_size); +Metachunk::Metachunk(size_t word_size, + VirtualSpaceNode* container) : + _word_size(word_size), + _bottom(NULL), + _end(NULL), + _top(NULL), + _next(NULL), + _prev(NULL), + _container(container) +{ + _bottom = (MetaWord*)this; + _top = (MetaWord*)this + _overhead; + _end = (MetaWord*)this + word_size; #ifdef ASSERT - size_t data_word_size = pointer_delta(chunk_end, chunk_bottom, sizeof(MetaWord)); - Copy::fill_to_words((HeapWord*) chunk_bottom, data_word_size, metadata_chunk_initialize); + set_is_free(false); + size_t data_word_size = pointer_delta(end(), + top(), + sizeof(MetaWord)); + Copy::fill_to_words((HeapWord*) top(), + data_word_size, + metadata_chunk_initialize); #endif - return chunk; } - MetaWord* Metachunk::allocate(size_t word_size) { MetaWord* result = NULL; // If available, bump the pointer to allocate. diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/metachunk.hpp --- a/src/share/vm/memory/metachunk.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/metachunk.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -41,10 +41,13 @@ // | | | | // +--------------+ <- bottom ---+ ---+ +class VirtualSpaceNode; + class Metachunk VALUE_OBJ_CLASS_SPEC { // link to support lists of chunks Metachunk* _next; Metachunk* _prev; + VirtualSpaceNode* _container; MetaWord* _bottom; MetaWord* _end; @@ -61,29 +64,20 @@ // the space. static size_t _overhead; - void set_bottom(MetaWord* v) { _bottom = v; } - void set_end(MetaWord* v) { _end = v; } - void set_top(MetaWord* v) { _top = v; } - void set_word_size(size_t v) { _word_size = v; } public: -#ifdef ASSERT - Metachunk() : _bottom(NULL), _end(NULL), _top(NULL), _is_free(false), - _next(NULL), _prev(NULL) {} -#else - Metachunk() : _bottom(NULL), _end(NULL), _top(NULL), - _next(NULL), _prev(NULL) {} -#endif + Metachunk(size_t word_size , VirtualSpaceNode* container); // Used to add a Metachunk to a list of Metachunks void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");} void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");} + void set_container(VirtualSpaceNode* v) { _container = v; } MetaWord* allocate(size_t word_size); - static Metachunk* initialize(MetaWord* ptr, size_t word_size); // Accessors Metachunk* next() const { return _next; } Metachunk* prev() const { return _prev; } + VirtualSpaceNode* container() const { return _container; } MetaWord* bottom() const { return _bottom; } MetaWord* end() const { return _end; } MetaWord* top() const { return _top; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/metaspace.cpp --- a/src/share/vm/memory/metaspace.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/metaspace.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -47,7 +47,6 @@ // the free chunk lists const bool metaspace_slow_verify = false; - // Parameters for stress mode testing const uint metadata_deallocate_a_lot_block = 10; const uint metadata_deallocate_a_lock_chunk = 3; @@ -103,27 +102,7 @@ // a chunk is placed on the free list of blocks (BlockFreelist) and // reused from there. -// Pointer to list of Metachunks. -class ChunkList VALUE_OBJ_CLASS_SPEC { - // List of free chunks - Metachunk* _head; - - public: - // Constructor - ChunkList() : _head(NULL) {} - - // Accessors - Metachunk* head() { return _head; } - void set_head(Metachunk* v) { _head = v; } - - // Link at head of the list - void add_at_head(Metachunk* head, Metachunk* tail); - void add_at_head(Metachunk* head); - - size_t sum_list_size(); - size_t sum_list_count(); - size_t sum_list_capacity(); -}; +typedef class FreeList ChunkList; // Manages the global free lists of chunks. // Has three lists of free chunks, and a total size and @@ -132,6 +111,7 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC { // Free list of chunks of different sizes. + // SpecializedChunk // SmallChunk // MediumChunk // HumongousChunk @@ -185,6 +165,14 @@ // for special, small, medium, and humongous chunks. static ChunkIndex list_index(size_t size); + // Remove the chunk from its freelist. It is + // expected to be on one of the _free_chunks[] lists. + void remove_chunk(Metachunk* chunk); + + // Add the simple linked list of chunks to the freelist of chunks + // of type index. + void return_chunks(ChunkIndex index, Metachunk* chunks); + // Total of the space in the free chunks list size_t free_chunks_total(); size_t free_chunks_total_in_bytes(); @@ -231,7 +219,6 @@ void print_on(outputStream* st); }; - // Used to manage the free list of Metablocks (a block corresponds // to the allocation of a quantum of metadata). class BlockFreelist VALUE_OBJ_CLASS_SPEC { @@ -271,6 +258,8 @@ ReservedSpace _rs; VirtualSpace _virtual_space; MetaWord* _top; + // count of chunks contained in this VirtualSpace + uintx _container_count; // Convenience functions for logical bottom and end MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } @@ -280,10 +269,19 @@ char* low() const { return virtual_space()->low(); } char* high() const { return virtual_space()->high(); } + // The first Metachunk will be allocated at the bottom of the + // VirtualSpace + Metachunk* first_chunk() { return (Metachunk*) bottom(); } + + void inc_container_count(); +#ifdef ASSERT + uint container_count_slow(); +#endif + public: VirtualSpaceNode(size_t byte_size); - VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs) {} + VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} ~VirtualSpaceNode(); // address of next available space in _virtual_space; @@ -298,15 +296,22 @@ MemRegion* reserved() { return &_reserved; } VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } - // Returns true if "word_size" is available in the virtual space + // Returns true if "word_size" is available in the VirtualSpace bool is_available(size_t word_size) { return _top + word_size <= end(); } MetaWord* top() const { return _top; } void inc_top(size_t word_size) { _top += word_size; } + uintx container_count() { return _container_count; } + void dec_container_count(); +#ifdef ASSERT + void verify_container_count(); +#endif + // used and capacity in this single entry in the list size_t used_words_in_vs() const; size_t capacity_words_in_vs() const; + size_t free_words_in_vs() const; bool initialize(); @@ -322,6 +327,10 @@ bool expand_by(size_t words, bool pre_touch = false); bool shrink_by(size_t words); + // In preparation for deleting this node, remove all the chunks + // in the node from any freelist. + void purge(ChunkManager* chunk_manager); + #ifdef ASSERT // Debug support static void verify_virtual_space_total(); @@ -333,7 +342,7 @@ }; // byte_size is the size of the associated virtualspace. -VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0) { +VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) { // align up to vm allocation granularity byte_size = align_size_up(byte_size, os::vm_allocation_granularity()); @@ -357,6 +366,39 @@ MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); } +void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { + Metachunk* chunk = first_chunk(); + Metachunk* invalid_chunk = (Metachunk*) top(); + while (chunk < invalid_chunk ) { + assert(chunk->is_free(), "Should be marked free"); + MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); + chunk_manager->remove_chunk(chunk); + assert(chunk->next() == NULL && + chunk->prev() == NULL, + "Was not removed from its list"); + chunk = (Metachunk*) next; + } +} + +#ifdef ASSERT +uint VirtualSpaceNode::container_count_slow() { + uint count = 0; + Metachunk* chunk = first_chunk(); + Metachunk* invalid_chunk = (Metachunk*) top(); + while (chunk < invalid_chunk ) { + MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); + // Don't count the chunks on the free lists. Those are + // still part of the VirtualSpaceNode but not currently + // counted. + if (!chunk->is_free()) { + count++; + } + chunk = (Metachunk*) next; + } + return count; +} +#endif + // List of VirtualSpaces for metadata allocation. // It has a _next link for singly linked list and a MemRegion // for total space in the VirtualSpace. @@ -406,6 +448,8 @@ VirtualSpaceList(size_t word_size); VirtualSpaceList(ReservedSpace rs); + size_t free_bytes(); + Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words, size_t medium_chunk_bunch); @@ -426,14 +470,14 @@ void initialize(size_t word_size); size_t virtual_space_total() { return _virtual_space_total; } - void inc_virtual_space_total(size_t v) { - Atomic::add_ptr(v, &_virtual_space_total); - } - - size_t virtual_space_count() { return _virtual_space_count; } - void inc_virtual_space_count() { - Atomic::inc_ptr(&_virtual_space_count); - } + + void inc_virtual_space_total(size_t v); + void dec_virtual_space_total(size_t v); + void inc_virtual_space_count(); + void dec_virtual_space_count(); + + // Unlink empty VirtualSpaceNodes and free it. + void purge(); // Used and capacity in the entire list of virtual spaces. // These are global values shared by all Metaspaces @@ -518,6 +562,9 @@ // protects allocations and contains. Mutex* const _lock; + // Type of metadata allocated. + Metaspace::MetadataType _mdtype; + // Chunk related size size_t _medium_chunk_bunch; @@ -536,7 +583,11 @@ bool has_small_chunk_limit() { return !vs_list()->is_class(); } // Sum of all space in allocated chunks - size_t _allocation_total; + size_t _allocated_blocks_words; + + // Sum of all allocated chunks + size_t _allocated_chunks_words; + size_t _allocated_chunks_count; // Free lists of blocks are per SpaceManager since they // are assumed to be in chunks in use by the SpaceManager @@ -558,6 +609,7 @@ return (BlockFreelist*) &_block_freelists; } + Metaspace::MetadataType mdtype() { return _mdtype; } VirtualSpaceList* vs_list() const { return _vs_list; } Metachunk* current_chunk() const { return _current_chunk; } @@ -578,7 +630,8 @@ void initialize(); public: - SpaceManager(Mutex* lock, + SpaceManager(Metaspace::MetadataType mdtype, + Mutex* lock, VirtualSpaceList* vs_list); ~SpaceManager(); @@ -592,12 +645,27 @@ size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; } size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } - size_t allocation_total() const { return _allocation_total; } - void inc_allocation_total(size_t v) { Atomic::add_ptr(v, &_allocation_total); } + size_t allocated_blocks_words() const { return _allocated_blocks_words; } + size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } + size_t allocated_chunks_words() const { return _allocated_chunks_words; } + size_t allocated_chunks_count() const { return _allocated_chunks_count; } + bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } static Mutex* expand_lock() { return _expand_lock; } + // Increment the per Metaspace and global running sums for Metachunks + // by the given size. This is used when a Metachunk to added to + // the in-use list. + void inc_size_metrics(size_t words); + // Increment the per Metaspace and global running sums Metablocks by the given + // size. This is used when a Metablock is allocated. + void inc_used_metrics(size_t words); + // Delete the portion of the running sums for this SpaceManager. That is, + // the globals running sums for the Metachunks and Metablocks are + // decremented for all the Metachunks in-use by this SpaceManager. + void dec_total_from_size_metrics(); + // Set the sizes for the initial chunks. void get_initial_chunk_sizes(Metaspace::MetaspaceType type, size_t* chunk_word_size, @@ -643,8 +711,25 @@ void verify_chunk_size(Metachunk* chunk); NOT_PRODUCT(void mangle_freed_chunks();) #ifdef ASSERT - void verify_allocation_total(); + void verify_allocated_blocks_words(); #endif + + size_t get_raw_word_size(size_t word_size) { + // If only the dictionary is going to be used (i.e., no + // indexed free list), then there is a minimum size requirement. + // MinChunkSize is a placeholder for the real minimum size JJJ + size_t byte_size = word_size * BytesPerWord; + + size_t byte_size_with_overhead = byte_size + Metablock::overhead(); + + size_t raw_bytes_size = MAX2(byte_size_with_overhead, + Metablock::min_block_byte_size()); + raw_bytes_size = ARENA_ALIGN(raw_bytes_size); + size_t raw_word_size = raw_bytes_size / BytesPerWord; + assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); + + return raw_word_size; + } }; uint const SpaceManager::_small_chunk_limit = 4; @@ -657,6 +742,28 @@ SpaceManager::_expand_lock_name, Mutex::_allow_vm_block_flag); +void VirtualSpaceNode::inc_container_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _container_count++; + assert(_container_count == container_count_slow(), + err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT + "container_count_slow() " SIZE_FORMAT, + _container_count, container_count_slow())); +} + +void VirtualSpaceNode::dec_container_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _container_count--; +} + +#ifdef ASSERT +void VirtualSpaceNode::verify_container_count() { + assert(_container_count == container_count_slow(), + err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT + "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow())); +} +#endif + // BlockFreelist methods BlockFreelist::BlockFreelist() : _dictionary(NULL) {} @@ -717,6 +824,10 @@ VirtualSpaceNode::~VirtualSpaceNode() { _rs.release(); +#ifdef ASSERT + size_t word_size = sizeof(*this) / BytesPerWord; + Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); +#endif } size_t VirtualSpaceNode::used_words_in_vs() const { @@ -728,6 +839,9 @@ return pointer_delta(end(), bottom(), sizeof(MetaWord)); } +size_t VirtualSpaceNode::free_words_in_vs() const { + return pointer_delta(end(), top(), sizeof(MetaWord)); +} // Allocates the chunk from the virtual space only. // This interface is also used internally for debugging. Not all @@ -749,8 +863,8 @@ // Take the space (bump top on the current virtual space). inc_top(chunk_word_size); - // Point the chunk at the space - Metachunk* result = Metachunk::initialize(chunk_limit, chunk_word_size); + // Initialize the chunk + Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); return result; } @@ -778,9 +892,11 @@ Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { assert_lock_strong(SpaceManager::expand_lock()); - Metachunk* result = NULL; - - return take_from_committed(chunk_word_size); + Metachunk* result = take_from_committed(chunk_word_size); + if (result != NULL) { + inc_container_count(); + } + return result; } Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) { @@ -859,6 +975,83 @@ } } +void VirtualSpaceList::inc_virtual_space_total(size_t v) { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_total = _virtual_space_total + v; +} +void VirtualSpaceList::dec_virtual_space_total(size_t v) { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_total = _virtual_space_total - v; +} + +void VirtualSpaceList::inc_virtual_space_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_count++; +} +void VirtualSpaceList::dec_virtual_space_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_count--; +} + +void ChunkManager::remove_chunk(Metachunk* chunk) { + size_t word_size = chunk->word_size(); + ChunkIndex index = list_index(word_size); + if (index != HumongousIndex) { + free_chunks(index)->remove_chunk(chunk); + } else { + humongous_dictionary()->remove_chunk(chunk); + } + + // Chunk is being removed from the chunks free list. + dec_free_chunks_total(chunk->capacity_word_size()); +} + +// Walk the list of VirtualSpaceNodes and delete +// nodes with a 0 container_count. Remove Metachunks in +// the node from their respective freelists. +void VirtualSpaceList::purge() { + assert_lock_strong(SpaceManager::expand_lock()); + // Don't use a VirtualSpaceListIterator because this + // list is being changed and a straightforward use of an iterator is not safe. + VirtualSpaceNode* purged_vsl = NULL; + VirtualSpaceNode* prev_vsl = virtual_space_list(); + VirtualSpaceNode* next_vsl = prev_vsl; + while (next_vsl != NULL) { + VirtualSpaceNode* vsl = next_vsl; + next_vsl = vsl->next(); + // Don't free the current virtual space since it will likely + // be needed soon. + if (vsl->container_count() == 0 && vsl != current_virtual_space()) { + // Unlink it from the list + if (prev_vsl == vsl) { + // This is the case of the current note being the first note. + assert(vsl == virtual_space_list(), "Expected to be the first note"); + set_virtual_space_list(vsl->next()); + } else { + prev_vsl->set_next(vsl->next()); + } + + vsl->purge(chunk_manager()); + dec_virtual_space_total(vsl->reserved()->word_size()); + dec_virtual_space_count(); + purged_vsl = vsl; + delete vsl; + } else { + prev_vsl = vsl; + } + } +#ifdef ASSERT + if (purged_vsl != NULL) { + // List should be stable enough to use an iterator here. + VirtualSpaceListIterator iter(virtual_space_list()); + while (iter.repeat()) { + VirtualSpaceNode* vsl = iter.get_next(); + assert(vsl != purged_vsl, "Purge of vsl failed"); + } + } +#endif +} + size_t VirtualSpaceList::used_words_sum() { size_t allocated_by_vs = 0; VirtualSpaceListIterator iter(virtual_space_list()); @@ -899,6 +1092,9 @@ Mutex::_no_safepoint_check_flag); bool initialization_succeeded = grow_vs(word_size); + _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk); + _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk); + _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk); assert(initialization_succeeded, " VirtualSpaceList initialization should not fail"); } @@ -913,10 +1109,17 @@ Mutex::_no_safepoint_check_flag); VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); bool succeeded = class_entry->initialize(); + _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk); + _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk); + _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk); assert(succeeded, " VirtualSpaceList initialization should not fail"); link_vs(class_entry, rs.size()/BytesPerWord); } +size_t VirtualSpaceList::free_bytes() { + return virtual_space_list()->free_words_in_vs() * BytesPerWord; +} + // Allocate another meta virtual space and add it to the list. bool VirtualSpaceList::grow_vs(size_t vs_word_size) { assert_lock_strong(SpaceManager::expand_lock()); @@ -965,8 +1168,10 @@ // Get a chunk from the chunk freelist Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); - // Allocate a chunk out of the current virtual space. - if (next == NULL) { + if (next != NULL) { + next->container()->inc_container_count(); + } else { + // Allocate a chunk out of the current virtual space. next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); } @@ -1055,9 +1260,9 @@ // // After the GC the compute_new_size() for MetaspaceGC is called to // resize the capacity of the metaspaces. The current implementation -// is based on the flags MinMetaspaceFreeRatio and MaxHeapFreeRatio used +// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used // to resize the Java heap by some GC's. New flags can be implemented -// if really needed. MinHeapFreeRatio is used to calculate how much +// if really needed. MinMetaspaceFreeRatio is used to calculate how much // free space is desirable in the metaspace capacity to decide how much // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much // free space is desirable in the metaspace capacity before decreasing @@ -1092,7 +1297,11 @@ } bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { + + size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes(); // If the user wants a limit, impose one. + size_t max_metaspace_size_bytes = MaxMetaspaceSize; + size_t metaspace_size_bytes = MetaspaceSize; if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) && MetaspaceAux::reserved_in_bytes() >= MaxMetaspaceSize) { return false; @@ -1104,57 +1313,48 @@ // If this is part of an allocation after a GC, expand // unconditionally. - if(MetaspaceGC::expand_after_GC()) { + if (MetaspaceGC::expand_after_GC()) { return true; } - size_t metaspace_size_words = MetaspaceSize / BytesPerWord; + // If the capacity is below the minimum capacity, allow the // expansion. Also set the high-water-mark (capacity_until_GC) // to that minimum capacity so that a GC will not be induced // until that minimum capacity is exceeded. - if (vsl->capacity_words_sum() < metaspace_size_words || + if (committed_capacity_bytes < metaspace_size_bytes || capacity_until_GC() == 0) { - set_capacity_until_GC(metaspace_size_words); + set_capacity_until_GC(metaspace_size_bytes); return true; } else { - if (vsl->capacity_words_sum() < capacity_until_GC()) { + if (committed_capacity_bytes < capacity_until_GC()) { return true; } else { if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT " capacity_until_GC " SIZE_FORMAT - " capacity_words_sum " SIZE_FORMAT - " used_words_sum " SIZE_FORMAT - " free chunks " SIZE_FORMAT - " free chunks count %d", + " allocated_capacity_bytes " SIZE_FORMAT, word_size, capacity_until_GC(), - vsl->capacity_words_sum(), - vsl->used_words_sum(), - vsl->chunk_manager()->free_chunks_total(), - vsl->chunk_manager()->free_chunks_count()); + MetaspaceAux::allocated_capacity_bytes()); } return false; } } } -// Variables are in bytes + void MetaspaceGC::compute_new_size() { assert(_shrink_factor <= 100, "invalid shrink factor"); uint current_shrink_factor = _shrink_factor; _shrink_factor = 0; - VirtualSpaceList *vsl = Metaspace::space_list(); - - size_t capacity_after_gc = vsl->capacity_bytes_sum(); - // Check to see if these two can be calculated without walking the CLDG - size_t used_after_gc = vsl->used_bytes_sum(); - size_t capacity_until_GC = vsl->capacity_bytes_sum(); - size_t free_after_gc = capacity_until_GC - used_after_gc; + // Until a faster way of calculating the "used" quantity is implemented, + // use "capacity". + const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes(); + const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; const double maximum_used_percentage = 1.0 - minimum_free_percentage; @@ -1167,45 +1367,34 @@ MetaspaceSize); if (PrintGCDetails && Verbose) { - const double free_percentage = ((double)free_after_gc) / capacity_until_GC; gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: "); gclog_or_tty->print_cr(" " " minimum_free_percentage: %6.2f" " maximum_used_percentage: %6.2f", minimum_free_percentage, maximum_used_percentage); - double d_free_after_gc = free_after_gc / (double) K; gclog_or_tty->print_cr(" " - " free_after_gc : %6.1fK" - " used_after_gc : %6.1fK" - " capacity_after_gc : %6.1fK" - " metaspace HWM : %6.1fK", - free_after_gc / (double) K, - used_after_gc / (double) K, - capacity_after_gc / (double) K, - capacity_until_GC / (double) K); - gclog_or_tty->print_cr(" " - " free_percentage: %6.2f", - free_percentage); + " used_after_gc : %6.1fKB", + used_after_gc / (double) K); } + size_t shrink_bytes = 0; if (capacity_until_GC < minimum_desired_capacity) { // If we have less capacity below the metaspace HWM, then // increment the HWM. size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; // Don't expand unless it's significant if (expand_bytes >= MinMetaspaceExpansion) { - size_t expand_words = expand_bytes / BytesPerWord; - MetaspaceGC::inc_capacity_until_GC(expand_words); + MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes); } if (PrintGCDetails && Verbose) { - size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes(); + size_t new_capacity_until_GC = capacity_until_GC; gclog_or_tty->print_cr(" expanding:" - " minimum_desired_capacity: %6.1fK" - " expand_words: %6.1fK" - " MinMetaspaceExpansion: %6.1fK" - " new metaspace HWM: %6.1fK", + " minimum_desired_capacity: %6.1fKB" + " expand_bytes: %6.1fKB" + " MinMetaspaceExpansion: %6.1fKB" + " new metaspace HWM: %6.1fKB", minimum_desired_capacity / (double) K, expand_bytes / (double) K, MinMetaspaceExpansion / (double) K, @@ -1215,11 +1404,10 @@ } // No expansion, now see if we want to shrink - size_t shrink_words = 0; // We would never want to shrink more than this - size_t max_shrink_words = capacity_until_GC - minimum_desired_capacity; - assert(max_shrink_words >= 0, err_msg("max_shrink_words " SIZE_FORMAT, - max_shrink_words)); + size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; + assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT, + max_shrink_bytes)); // Should shrinking be considered? if (MaxMetaspaceFreeRatio < 100) { @@ -1229,17 +1417,15 @@ size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); maximum_desired_capacity = MAX2(maximum_desired_capacity, MetaspaceSize); - if (PrintGC && Verbose) { + if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr(" " " maximum_free_percentage: %6.2f" " minimum_used_percentage: %6.2f", maximum_free_percentage, minimum_used_percentage); gclog_or_tty->print_cr(" " - " capacity_until_GC: %6.1fK" - " minimum_desired_capacity: %6.1fK" - " maximum_desired_capacity: %6.1fK", - capacity_until_GC / (double) K, + " minimum_desired_capacity: %6.1fKB" + " maximum_desired_capacity: %6.1fKB", minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); } @@ -1249,17 +1435,17 @@ if (capacity_until_GC > maximum_desired_capacity) { // Capacity too large, compute shrinking size - shrink_words = capacity_until_GC - maximum_desired_capacity; + shrink_bytes = capacity_until_GC - maximum_desired_capacity; // We don't want shrink all the way back to initSize if people call // System.gc(), because some programs do that between "phases" and then // we'd just have to grow the heap up again for the next phase. So we // damp the shrinking: 0% on the first call, 10% on the second call, 40% // on the third call, and 100% by the fourth call. But if we recompute // size without shrinking, it goes back to 0%. - shrink_words = shrink_words / 100 * current_shrink_factor; - assert(shrink_words <= max_shrink_words, + shrink_bytes = shrink_bytes / 100 * current_shrink_factor; + assert(shrink_bytes <= max_shrink_bytes, err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, - shrink_words, max_shrink_words)); + shrink_bytes, max_shrink_bytes)); if (current_shrink_factor == 0) { _shrink_factor = 10; } else { @@ -1273,11 +1459,11 @@ MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); gclog_or_tty->print_cr(" " - " shrink_words: %.1fK" + " shrink_bytes: %.1fK" " current_shrink_factor: %d" " new shrink factor: %d" " MinMetaspaceExpansion: %.1fK", - shrink_words / (double) K, + shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); @@ -1285,23 +1471,11 @@ } } - // Don't shrink unless it's significant - if (shrink_words >= MinMetaspaceExpansion) { - VirtualSpaceNode* csp = vsl->current_virtual_space(); - size_t available_to_shrink = csp->capacity_words_in_vs() - - csp->used_words_in_vs(); - shrink_words = MIN2(shrink_words, available_to_shrink); - csp->shrink_by(shrink_words); - MetaspaceGC::dec_capacity_until_GC(shrink_words); - if (PrintGCDetails && Verbose) { - size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes(); - gclog_or_tty->print_cr(" metaspace HWM: %.1fK", new_capacity_until_GC / (double) K); - } + if (shrink_bytes >= MinMetaspaceExpansion && + ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { + MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes); } - assert(used_after_gc <= vsl->capacity_bytes_sum(), - "sanity check"); - } // Metadebug methods @@ -1380,76 +1554,6 @@ } #endif -// ChunkList methods - -size_t ChunkList::sum_list_size() { - size_t result = 0; - Metachunk* cur = head(); - while (cur != NULL) { - result += cur->word_size(); - cur = cur->next(); - } - return result; -} - -size_t ChunkList::sum_list_count() { - size_t result = 0; - Metachunk* cur = head(); - while (cur != NULL) { - result++; - cur = cur->next(); - } - return result; -} - -size_t ChunkList::sum_list_capacity() { - size_t result = 0; - Metachunk* cur = head(); - while (cur != NULL) { - result += cur->capacity_word_size(); - cur = cur->next(); - } - return result; -} - -void ChunkList::add_at_head(Metachunk* head, Metachunk* tail) { - assert_lock_strong(SpaceManager::expand_lock()); - assert(head == tail || tail->next() == NULL, - "Not the tail or the head has already been added to a list"); - - if (TraceMetadataChunkAllocation && Verbose) { - gclog_or_tty->print("ChunkList::add_at_head(head, tail): "); - Metachunk* cur = head; - while (cur != NULL) { - gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", cur, cur->word_size()); - cur = cur->next(); - } - gclog_or_tty->print_cr(""); - } - - if (tail != NULL) { - tail->set_next(_head); - } - set_head(head); -} - -void ChunkList::add_at_head(Metachunk* list) { - if (list == NULL) { - // Nothing to add - return; - } - assert_lock_strong(SpaceManager::expand_lock()); - Metachunk* head = list; - Metachunk* tail = list; - Metachunk* cur = head->next(); - // Search for the tail since it is not passed. - while (cur != NULL) { - tail = cur; - cur = cur->next(); - } - add_at_head(head, tail); -} - // ChunkManager methods // Verification of _free_chunks_total and _free_chunks_count does not @@ -1553,7 +1657,7 @@ continue; } - result = result + list->sum_list_capacity(); + result = result + list->count() * list->size(); } result = result + humongous_dictionary()->total_size(); return result; @@ -1567,7 +1671,7 @@ if (list == NULL) { continue; } - count = count + list->sum_list_count(); + count = count + list->count(); } count = count + humongous_dictionary()->total_free_blocks(); return count; @@ -1622,7 +1726,7 @@ } // Remove the chunk as the head of the list. - free_list->set_head(chunk->next()); + free_list->remove_chunk(chunk); // Chunk is being removed from the chunks free list. dec_free_chunks_total(chunk->capacity_word_size()); @@ -1647,9 +1751,6 @@ } // Chunk is being removed from the chunks free list. dec_free_chunks_total(chunk->capacity_word_size()); -#ifdef ASSERT - chunk->set_is_free(false); -#endif } else { return NULL; } @@ -1658,6 +1759,11 @@ // Remove it from the links to this freelist chunk->set_next(NULL); chunk->set_prev(NULL); +#ifdef ASSERT + // Chunk is no longer on any freelist. Setting to false make container_count_slow() + // work. + chunk->set_is_free(false); +#endif slow_locked_verify(); return chunk; } @@ -1679,7 +1785,7 @@ size_t list_count; if (list_index(word_size) < HumongousIndex) { ChunkList* list = find_free_chunks_list(word_size); - list_count = list->sum_list_count(); + list_count = list->count(); } else { list_count = humongous_dictionary()->total_count(); } @@ -1772,18 +1878,28 @@ } size_t SpaceManager::sum_capacity_in_chunks_in_use() const { - MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); - size_t sum = 0; - for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { - Metachunk* chunk = chunks_in_use(i); - while (chunk != NULL) { - // Just changed this sum += chunk->capacity_word_size(); - // sum += chunk->word_size() - Metachunk::overhead(); - sum += chunk->capacity_word_size(); - chunk = chunk->next(); + // For CMS use "allocated_chunks_words()" which does not need the + // Metaspace lock. For the other collectors sum over the + // lists. Use both methods as a check that "allocated_chunks_words()" + // is correct. That is, sum_capacity_in_chunks() is too expensive + // to use in the product and allocated_chunks_words() should be used + // but allow for checking that allocated_chunks_words() returns the same + // value as sum_capacity_in_chunks_in_use() which is the definitive + // answer. + if (UseConcMarkSweepGC) { + return allocated_chunks_words(); + } else { + MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); + size_t sum = 0; + for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { + Metachunk* chunk = chunks_in_use(i); + while (chunk != NULL) { + sum += chunk->capacity_word_size(); + chunk = chunk->next(); + } } + return sum; } - return sum; } size_t SpaceManager::sum_count_in_chunks_in_use() { @@ -1938,15 +2054,49 @@ } } -SpaceManager::SpaceManager(Mutex* lock, +SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, + Mutex* lock, VirtualSpaceList* vs_list) : _vs_list(vs_list), - _allocation_total(0), + _mdtype(mdtype), + _allocated_blocks_words(0), + _allocated_chunks_words(0), + _allocated_chunks_count(0), _lock(lock) { initialize(); } +void SpaceManager::inc_size_metrics(size_t words) { + assert_lock_strong(SpaceManager::expand_lock()); + // Total of allocated Metachunks and allocated Metachunks count + // for each SpaceManager + _allocated_chunks_words = _allocated_chunks_words + words; + _allocated_chunks_count++; + // Global total of capacity in allocated Metachunks + MetaspaceAux::inc_capacity(mdtype(), words); + // Global total of allocated Metablocks. + // used_words_slow() includes the overhead in each + // Metachunk so include it in the used when the + // Metachunk is first added (so only added once per + // Metachunk). + MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); +} + +void SpaceManager::inc_used_metrics(size_t words) { + // Add to the per SpaceManager total + Atomic::add_ptr(words, &_allocated_blocks_words); + // Add to the global total + MetaspaceAux::inc_used(mdtype(), words); +} + +void SpaceManager::dec_total_from_size_metrics() { + MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words()); + MetaspaceAux::dec_used(mdtype(), allocated_blocks_words()); + // Also deduct the overhead per Metachunk + MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead()); +} + void SpaceManager::initialize() { Metadebug::init_allocation_fail_alot_count(); for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { @@ -1958,9 +2108,37 @@ } } +void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) { + if (chunks == NULL) { + return; + } + ChunkList* list = free_chunks(index); + assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes"); + assert_lock_strong(SpaceManager::expand_lock()); + Metachunk* cur = chunks; + + // This returns chunks one at a time. If a new + // class List can be created that is a base class + // of FreeList then something like FreeList::prepend() + // can be used in place of this loop + while (cur != NULL) { + assert(cur->container() != NULL, "Container should have been set"); + cur->container()->dec_container_count(); + // Capture the next link before it is changed + // by the call to return_chunk_at_head(); + Metachunk* next = cur->next(); + cur->set_is_free(true); + list->return_chunk_at_head(cur); + cur = next; + } +} + SpaceManager::~SpaceManager() { // This call this->_lock which can't be done while holding expand_lock() - const size_t in_use_before = sum_capacity_in_chunks_in_use(); + assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), + err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT + " allocated_chunks_words() " SIZE_FORMAT, + sum_capacity_in_chunks_in_use(), allocated_chunks_words())); MutexLockerEx fcl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); @@ -1969,17 +2147,19 @@ chunk_manager->slow_locked_verify(); + dec_total_from_size_metrics(); + if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this); locked_print_chunks_in_use_on(gclog_or_tty); } - // Mangle freed memory. - NOT_PRODUCT(mangle_freed_chunks();) + // Do not mangle freed Metachunks. The chunk size inside Metachunks + // is during the freeing of a VirtualSpaceNodes. // Have to update before the chunks_in_use lists are emptied // below. - chunk_manager->inc_free_chunks_total(in_use_before, + chunk_manager->inc_free_chunks_total(allocated_chunks_words(), sum_count_in_chunks_in_use()); // Add all the chunks in use by this space manager @@ -1995,11 +2175,11 @@ chunk_size_name(i)); } Metachunk* chunks = chunks_in_use(i); - chunk_manager->free_chunks(i)->add_at_head(chunks); + chunk_manager->return_chunks(i, chunks); set_chunks_in_use(i, NULL); if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print_cr("updated freelist count %d %s", - chunk_manager->free_chunks(i)->sum_list_count(), + chunk_manager->free_chunks(i)->count(), chunk_size_name(i)); } assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); @@ -2035,6 +2215,7 @@ " granularity %d", humongous_chunks->word_size(), HumongousChunkGranularity)); Metachunk* next_humongous_chunks = humongous_chunks->next(); + humongous_chunks->container()->dec_container_count(); chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks); humongous_chunks = next_humongous_chunks; } @@ -2044,7 +2225,6 @@ chunk_manager->humongous_dictionary()->total_count(), chunk_size_name(HumongousIndex)); } - set_chunks_in_use(HumongousIndex, NULL); chunk_manager->slow_locked_verify(); } @@ -2124,12 +2304,17 @@ assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); } + // Add to the running sum of capacity + inc_size_metrics(new_chunk->word_size()); + assert(new_chunk->is_empty(), "Not ready for reuse"); if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print("SpaceManager::add_chunk: %d) ", sum_count_in_chunks_in_use()); new_chunk->print_on(gclog_or_tty); - vs_list()->chunk_manager()->locked_print_free_chunks(tty); + if (vs_list() != NULL) { + vs_list()->chunk_manager()->locked_print_free_chunks(tty); + } } } @@ -2152,19 +2337,7 @@ MetaWord* SpaceManager::allocate(size_t word_size) { MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); - // If only the dictionary is going to be used (i.e., no - // indexed free list), then there is a minimum size requirement. - // MinChunkSize is a placeholder for the real minimum size JJJ - size_t byte_size = word_size * BytesPerWord; - - size_t byte_size_with_overhead = byte_size + Metablock::overhead(); - - size_t raw_bytes_size = MAX2(byte_size_with_overhead, - Metablock::min_block_byte_size()); - raw_bytes_size = ARENA_ALIGN(raw_bytes_size); - size_t raw_word_size = raw_bytes_size / BytesPerWord; - assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); - + size_t raw_word_size = get_raw_word_size(word_size); BlockFreelist* fl = block_freelists(); MetaWord* p = NULL; // Allocation from the dictionary is expensive in the sense that @@ -2200,7 +2373,7 @@ // of memory if this returns null. if (DumpSharedSpaces) { assert(current_chunk() != NULL, "should never happen"); - inc_allocation_total(word_size); + inc_used_metrics(word_size); return current_chunk()->allocate(word_size); // caller handles null result } if (current_chunk() != NULL) { @@ -2211,7 +2384,7 @@ result = grow_and_allocate(word_size); } if (result > 0) { - inc_allocation_total(word_size); + inc_used_metrics(word_size); assert(result != (MetaWord*) chunks_in_use(MediumIndex), "Head of the list is being allocated"); } @@ -2245,20 +2418,14 @@ } #ifdef ASSERT -void SpaceManager::verify_allocation_total() { +void SpaceManager::verify_allocated_blocks_words() { // Verification is only guaranteed at a safepoint. - if (SafepointSynchronize::is_at_safepoint()) { - gclog_or_tty->print_cr("Chunk " PTR_FORMAT " allocation_total " SIZE_FORMAT - " sum_used_in_chunks_in_use " SIZE_FORMAT, - this, - allocation_total(), - sum_used_in_chunks_in_use()); - } - MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); - assert(allocation_total() == sum_used_in_chunks_in_use(), + assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), + "Verification can fail if the applications is running"); + assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), err_msg("allocation total is not consistent " SIZE_FORMAT " vs " SIZE_FORMAT, - allocation_total(), sum_used_in_chunks_in_use())); + allocated_blocks_words(), sum_used_in_chunks_in_use())); } #endif @@ -2314,14 +2481,65 @@ // MetaspaceAux -size_t MetaspaceAux::used_in_bytes(Metaspace::MetadataType mdtype) { + +size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0}; +size_t MetaspaceAux::_allocated_used_words[] = {0, 0}; + +size_t MetaspaceAux::free_bytes() { + size_t result = 0; + if (Metaspace::class_space_list() != NULL) { + result = result + Metaspace::class_space_list()->free_bytes(); + } + if (Metaspace::space_list() != NULL) { + result = result + Metaspace::space_list()->free_bytes(); + } + return result; +} + +void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { + assert_lock_strong(SpaceManager::expand_lock()); + assert(words <= allocated_capacity_words(mdtype), + err_msg("About to decrement below 0: words " SIZE_FORMAT + " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT, + words, mdtype, allocated_capacity_words(mdtype))); + _allocated_capacity_words[mdtype] -= words; +} + +void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { + assert_lock_strong(SpaceManager::expand_lock()); + // Needs to be atomic + _allocated_capacity_words[mdtype] += words; +} + +void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) { + assert(words <= allocated_used_words(mdtype), + err_msg("About to decrement below 0: words " SIZE_FORMAT + " is greater than _allocated_used_words[%u] " SIZE_FORMAT, + words, mdtype, allocated_used_words(mdtype))); + // For CMS deallocation of the Metaspaces occurs during the + // sweep which is a concurrent phase. Protection by the expand_lock() + // is not enough since allocation is on a per Metaspace basis + // and protected by the Metaspace lock. + jlong minus_words = (jlong) - (jlong) words; + Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]); +} + +void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { + // _allocated_used_words tracks allocations for + // each piece of metadata. Those allocations are + // generally done concurrently by different application + // threads so must be done atomically. + Atomic::add_ptr(words, &_allocated_used_words[mdtype]); +} + +size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { size_t used = 0; ClassLoaderDataGraphMetaspaceIterator iter; while (iter.repeat()) { Metaspace* msp = iter.get_next(); - // Sum allocation_total for each metaspace + // Sum allocated_blocks_words for each metaspace if (msp != NULL) { - used += msp->used_words(mdtype); + used += msp->used_words_slow(mdtype); } } return used * BytesPerWord; @@ -2339,13 +2557,15 @@ return free * BytesPerWord; } -size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) { - size_t capacity = free_chunks_total(mdtype); +size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { + // Don't count the space in the freelists. That space will be + // added to the capacity calculation as needed. + size_t capacity = 0; ClassLoaderDataGraphMetaspaceIterator iter; while (iter.repeat()) { Metaspace* msp = iter.get_next(); if (msp != NULL) { - capacity += msp->capacity_words(mdtype); + capacity += msp->capacity_words_slow(mdtype); } } return capacity * BytesPerWord; @@ -2372,23 +2592,30 @@ return free_chunks_total(mdtype) * BytesPerWord; } +size_t MetaspaceAux::free_chunks_total() { + return free_chunks_total(Metaspace::ClassType) + + free_chunks_total(Metaspace::NonClassType); +} + +size_t MetaspaceAux::free_chunks_total_in_bytes() { + return free_chunks_total() * BytesPerWord; +} + void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { gclog_or_tty->print(", [Metaspace:"); if (PrintGCDetails && Verbose) { gclog_or_tty->print(" " SIZE_FORMAT "->" SIZE_FORMAT - "(" SIZE_FORMAT "/" SIZE_FORMAT ")", + "(" SIZE_FORMAT ")", prev_metadata_used, - used_in_bytes(), - capacity_in_bytes(), + allocated_capacity_bytes(), reserved_in_bytes()); } else { gclog_or_tty->print(" " SIZE_FORMAT "K" "->" SIZE_FORMAT "K" - "(" SIZE_FORMAT "K/" SIZE_FORMAT "K)", + "(" SIZE_FORMAT "K)", prev_metadata_used / K, - used_in_bytes()/ K, - capacity_in_bytes()/K, + allocated_capacity_bytes() / K, reserved_in_bytes()/ K); } @@ -2403,23 +2630,28 @@ out->print_cr(" Metaspace total " SIZE_FORMAT "K, used " SIZE_FORMAT "K," " reserved " SIZE_FORMAT "K", - capacity_in_bytes()/K, used_in_bytes()/K, reserved_in_bytes()/K); + allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K); + out->print_cr(" data space " SIZE_FORMAT "K, used " SIZE_FORMAT "K," " reserved " SIZE_FORMAT "K", - capacity_in_bytes(nct)/K, used_in_bytes(nct)/K, reserved_in_bytes(nct)/K); + allocated_capacity_bytes(nct)/K, + allocated_used_bytes(nct)/K, + reserved_in_bytes(nct)/K); out->print_cr(" class space " SIZE_FORMAT "K, used " SIZE_FORMAT "K," " reserved " SIZE_FORMAT "K", - capacity_in_bytes(ct)/K, used_in_bytes(ct)/K, reserved_in_bytes(ct)/K); + allocated_capacity_bytes(ct)/K, + allocated_used_bytes(ct)/K, + reserved_in_bytes(ct)/K); } // Print information for class space and data space separately. // This is almost the same as above. void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype); - size_t capacity_bytes = capacity_in_bytes(mdtype); - size_t used_bytes = used_in_bytes(mdtype); + size_t capacity_bytes = capacity_bytes_slow(mdtype); + size_t used_bytes = used_bytes_slow(mdtype); size_t free_bytes = free_in_bytes(mdtype); size_t used_and_free = used_bytes + free_bytes + free_chunks_capacity_bytes; @@ -2492,6 +2724,54 @@ Metaspace::class_space_list()->chunk_manager()->verify(); } +void MetaspaceAux::verify_capacity() { +#ifdef ASSERT + size_t running_sum_capacity_bytes = allocated_capacity_bytes(); + // For purposes of the running sum of capacity, verify against capacity + size_t capacity_in_use_bytes = capacity_bytes_slow(); + assert(running_sum_capacity_bytes == capacity_in_use_bytes, + err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT + " capacity_bytes_slow()" SIZE_FORMAT, + running_sum_capacity_bytes, capacity_in_use_bytes)); + for (Metaspace::MetadataType i = Metaspace::ClassType; + i < Metaspace:: MetadataTypeCount; + i = (Metaspace::MetadataType)(i + 1)) { + size_t capacity_in_use_bytes = capacity_bytes_slow(i); + assert(allocated_capacity_bytes(i) == capacity_in_use_bytes, + err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT + " capacity_bytes_slow(%u)" SIZE_FORMAT, + i, allocated_capacity_bytes(i), i, capacity_in_use_bytes)); + } +#endif +} + +void MetaspaceAux::verify_used() { +#ifdef ASSERT + size_t running_sum_used_bytes = allocated_used_bytes(); + // For purposes of the running sum of used, verify against used + size_t used_in_use_bytes = used_bytes_slow(); + assert(allocated_used_bytes() == used_in_use_bytes, + err_msg("allocated_used_bytes() " SIZE_FORMAT + " used_bytes_slow()" SIZE_FORMAT, + allocated_used_bytes(), used_in_use_bytes)); + for (Metaspace::MetadataType i = Metaspace::ClassType; + i < Metaspace:: MetadataTypeCount; + i = (Metaspace::MetadataType)(i + 1)) { + size_t used_in_use_bytes = used_bytes_slow(i); + assert(allocated_used_bytes(i) == used_in_use_bytes, + err_msg("allocated_used_bytes(%u) " SIZE_FORMAT + " used_bytes_slow(%u)" SIZE_FORMAT, + i, allocated_used_bytes(i), i, used_in_use_bytes)); + } +#endif +} + +void MetaspaceAux::verify_metrics() { + verify_capacity(); + verify_used(); +} + + // Metaspace methods size_t Metaspace::_first_chunk_word_size = 0; @@ -2583,7 +2863,7 @@ assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized"); - _vsm = new SpaceManager(lock, space_list()); + _vsm = new SpaceManager(Metaspace::NonClassType, lock, space_list()); if (_vsm == NULL) { return; } @@ -2597,7 +2877,7 @@ "Class VirtualSpaceList has not been initialized"); // Allocate SpaceManager for classes. - _class_vsm = new SpaceManager(lock, class_space_list()); + _class_vsm = new SpaceManager(Metaspace::ClassType, lock, class_space_list()); if (_class_vsm == NULL) { return; } @@ -2621,6 +2901,9 @@ if (class_chunk != NULL) { class_vsm()->add_chunk(class_chunk, true); } + + _alloc_record_head = NULL; + _alloc_record_tail = NULL; } size_t Metaspace::align_word_size_up(size_t word_size) { @@ -2641,8 +2924,8 @@ MetaWord* result; MetaspaceGC::set_expand_after_GC(true); size_t before_inc = MetaspaceGC::capacity_until_GC(); - size_t delta_words = MetaspaceGC::delta_capacity_until_GC(word_size); - MetaspaceGC::inc_capacity_until_GC(delta_words); + size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord; + MetaspaceGC::inc_capacity_until_GC(delta_bytes); if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC()); @@ -2660,8 +2943,8 @@ return (char*)vsm()->current_chunk()->bottom(); } -size_t Metaspace::used_words(MetadataType mdtype) const { - // return vsm()->allocation_total(); +size_t Metaspace::used_words_slow(MetadataType mdtype) const { + // return vsm()->allocated_used_words(); return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() : vsm()->sum_used_in_chunks_in_use(); // includes overhead! } @@ -2676,16 +2959,24 @@ // have been made. Don't include space in the global freelist and // in the space available in the dictionary which // is already counted in some chunk. -size_t Metaspace::capacity_words(MetadataType mdtype) const { +size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() : vsm()->sum_capacity_in_chunks_in_use(); } +size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { + return used_words_slow(mdtype) * BytesPerWord; +} + +size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { + return capacity_words_slow(mdtype) * BytesPerWord; +} + void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { if (SafepointSynchronize::is_at_safepoint()) { assert(Thread::current()->is_VM_thread(), "should be the VM thread"); // Don't take Heap_lock - MutexLocker ml(vsm()->lock()); + MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); if (word_size < TreeChunk::min_size()) { // Dark matter. Too small for dictionary. #ifdef ASSERT @@ -2699,7 +2990,7 @@ vsm()->deallocate(ptr, word_size); } } else { - MutexLocker ml(vsm()->lock()); + MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); if (word_size < TreeChunk::min_size()) { // Dark matter. Too small for dictionary. @@ -2717,12 +3008,14 @@ } Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, - bool read_only, MetadataType mdtype, TRAPS) { + bool read_only, MetaspaceObj::Type type, TRAPS) { if (HAS_PENDING_EXCEPTION) { assert(false, "Should not allocate with exception pending"); return NULL; // caller does a CHECK_NULL too } + MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; + // SSS: Should we align the allocations and make sure the sizes are aligned. MetaWord* result = NULL; @@ -2732,13 +3025,13 @@ // with the SymbolTable_lock. Dumping is single threaded for now. We'll have // to revisit this for application class data sharing. if (DumpSharedSpaces) { - if (read_only) { - result = loader_data->ro_metaspace()->allocate(word_size, NonClassType); - } else { - result = loader_data->rw_metaspace()->allocate(word_size, NonClassType); - } + assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); + Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); + result = space->allocate(word_size, NonClassType); if (result == NULL) { report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); + } else { + space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); } return Metablock::initialize(result, word_size); } @@ -2773,6 +3066,45 @@ return Metablock::initialize(result, word_size); } +void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { + assert(DumpSharedSpaces, "sanity"); + + AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize); + if (_alloc_record_head == NULL) { + _alloc_record_head = _alloc_record_tail = rec; + } else { + _alloc_record_tail->_next = rec; + _alloc_record_tail = rec; + } +} + +void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { + assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); + + address last_addr = (address)bottom(); + + for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { + address ptr = rec->_ptr; + if (last_addr < ptr) { + closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr); + } + closure->doit(ptr, rec->_type, rec->_byte_size); + last_addr = ptr + rec->_byte_size; + } + + address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType); + if (last_addr < top) { + closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr); + } +} + +void Metaspace::purge() { + MutexLockerEx cl(SpaceManager::expand_lock(), + Mutex::_no_safepoint_check_flag); + space_list()->purge(); + class_space_list()->purge(); +} + void Metaspace::print_on(outputStream* out) const { // Print both class virtual space counts and metaspace. if (Verbose) { @@ -2790,7 +3122,8 @@ // aren't deleted presently. When they are, some sort of locking might // be needed. Note, locking this can cause inversion problems with the // caller in MetaspaceObj::is_metadata() function. - return space_list()->contains(ptr) || class_space_list()->contains(ptr); + return space_list()->contains(ptr) || + class_space_list()->contains(ptr); } void Metaspace::verify() { @@ -2799,10 +3132,6 @@ } void Metaspace::dump(outputStream* const out) const { - if (UseMallocOnly) { - // Just print usage for now - out->print_cr("usage %d", used_words(Metaspace::NonClassType)); - } out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm()); vsm()->dump(out); out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm()); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/metaspace.hpp --- a/src/share/vm/memory/metaspace.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/metaspace.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,7 +86,10 @@ friend class MetaspaceAux; public: - enum MetadataType {ClassType, NonClassType}; + enum MetadataType {ClassType = 0, + NonClassType = ClassType + 1, + MetadataTypeCount = ClassType + 2 + }; enum MetaspaceType { StandardMetaspaceType, BootMetaspaceType, @@ -111,6 +114,10 @@ SpaceManager* _class_vsm; SpaceManager* class_vsm() const { return _class_vsm; } + // Allocate space for metadata of type mdtype. This is space + // within a Metachunk and is used by + // allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS) + // which returns a Metablock. MetaWord* allocate(size_t word_size, MetadataType mdtype); // Virtual Space lists for both classes and other metadata @@ -120,6 +127,23 @@ static VirtualSpaceList* space_list() { return _space_list; } static VirtualSpaceList* class_space_list() { return _class_space_list; } + // This is used by DumpSharedSpaces only, where only _vsm is used. So we will + // maintain a single list for now. + void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size); + + class AllocRecord : public CHeapObj { + public: + AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size) + : _next(NULL), _ptr(ptr), _type(type), _byte_size(byte_size) {} + AllocRecord *_next; + address _ptr; + MetaspaceObj::Type _type; + int _byte_size; + }; + + AllocRecord * _alloc_record_head; + AllocRecord * _alloc_record_tail; + public: Metaspace(Mutex* lock, MetaspaceType type); @@ -133,13 +157,16 @@ static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; } char* bottom() const; - size_t used_words(MetadataType mdtype) const; + size_t used_words_slow(MetadataType mdtype) const; size_t free_words(MetadataType mdtype) const; - size_t capacity_words(MetadataType mdtype) const; + size_t capacity_words_slow(MetadataType mdtype) const; size_t waste_words(MetadataType mdtype) const; - static Metablock* allocate(ClassLoaderData* loader_data, size_t size, - bool read_only, MetadataType mdtype, TRAPS); + size_t used_bytes_slow(MetadataType mdtype) const; + size_t capacity_bytes_slow(MetadataType mdtype) const; + + static Metablock* allocate(ClassLoaderData* loader_data, size_t word_size, + bool read_only, MetaspaceObj::Type type, TRAPS); void deallocate(MetaWord* ptr, size_t byte_size, bool is_class); MetaWord* expand_and_allocate(size_t size, @@ -150,36 +177,114 @@ static bool contains(const void *ptr); void dump(outputStream* const out) const; + // Free empty virtualspaces + static void purge(); + void print_on(outputStream* st) const; // Debugging support void verify(); + + class AllocRecordClosure : public StackObj { + public: + virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) = 0; + }; + + void iterate(AllocRecordClosure *closure); }; class MetaspaceAux : AllStatic { - - // Statistics for class space and data space in metaspace. - static size_t used_in_bytes(Metaspace::MetadataType mdtype); - static size_t free_in_bytes(Metaspace::MetadataType mdtype); - static size_t capacity_in_bytes(Metaspace::MetadataType mdtype); - static size_t reserved_in_bytes(Metaspace::MetadataType mdtype); - static size_t free_chunks_total(Metaspace::MetadataType mdtype); static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype); public: - // Total of space allocated to metadata in all Metaspaces - static size_t used_in_bytes() { - return used_in_bytes(Metaspace::ClassType) + - used_in_bytes(Metaspace::NonClassType); + // Statistics for class space and data space in metaspace. + + // These methods iterate over the classloader data graph + // for the given Metaspace type. These are slow. + static size_t used_bytes_slow(Metaspace::MetadataType mdtype); + static size_t free_in_bytes(Metaspace::MetadataType mdtype); + static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype); + + // Iterates over the virtual space list. + static size_t reserved_in_bytes(Metaspace::MetadataType mdtype); + + // Running sum of space in all Metachunks that has been + // allocated to a Metaspace. This is used instead of + // iterating over all the classloaders. One for each + // type of Metadata + static size_t _allocated_capacity_words[Metaspace:: MetadataTypeCount]; + // Running sum of space in all Metachunks that have + // are being used for metadata. One for each + // type of Metadata. + static size_t _allocated_used_words[Metaspace:: MetadataTypeCount]; + + public: + // Decrement and increment _allocated_capacity_words + static void dec_capacity(Metaspace::MetadataType type, size_t words); + static void inc_capacity(Metaspace::MetadataType type, size_t words); + + // Decrement and increment _allocated_used_words + static void dec_used(Metaspace::MetadataType type, size_t words); + static void inc_used(Metaspace::MetadataType type, size_t words); + + // Total of space allocated to metadata in all Metaspaces. + // This sums the space used in each Metachunk by + // iterating over the classloader data graph + static size_t used_bytes_slow() { + return used_bytes_slow(Metaspace::ClassType) + + used_bytes_slow(Metaspace::NonClassType); } - // Total of available space in all Metaspaces - // Total of capacity allocated to all Metaspaces. This includes - // space in Metachunks not yet allocated and in the Metachunk - // freelist. - static size_t capacity_in_bytes() { - return capacity_in_bytes(Metaspace::ClassType) + - capacity_in_bytes(Metaspace::NonClassType); + // Used by MetaspaceCounters + static size_t free_chunks_total(); + static size_t free_chunks_total_in_bytes(); + + static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) { + return _allocated_capacity_words[mdtype]; + } + static size_t allocated_capacity_words() { + return _allocated_capacity_words[Metaspace::ClassType] + + _allocated_capacity_words[Metaspace::NonClassType]; + } + static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) { + return allocated_capacity_words(mdtype) * BytesPerWord; + } + static size_t allocated_capacity_bytes() { + return allocated_capacity_words() * BytesPerWord; + } + + static size_t allocated_used_words(Metaspace::MetadataType mdtype) { + return _allocated_used_words[mdtype]; + } + static size_t allocated_used_words() { + return _allocated_used_words[Metaspace::ClassType] + + _allocated_used_words[Metaspace::NonClassType]; + } + static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) { + return allocated_used_words(mdtype) * BytesPerWord; + } + static size_t allocated_used_bytes() { + return allocated_used_words() * BytesPerWord; + } + + static size_t free_bytes(); + + // Total capacity in all Metaspaces + static size_t capacity_bytes_slow() { +#ifdef PRODUCT + // Use allocated_capacity_bytes() in PRODUCT instead of this function. + guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); +#endif + size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); + size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); + assert(allocated_capacity_bytes() == class_capacity + non_class_capacity, + err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT + " class_capacity + non_class_capacity " SIZE_FORMAT + " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, + allocated_capacity_bytes(), class_capacity + non_class_capacity, + class_capacity, non_class_capacity)); + + return class_capacity + non_class_capacity; } // Total space reserved in all Metaspaces @@ -198,6 +303,11 @@ static void print_waste(outputStream* out); static void dump(outputStream* out); static void verify_free_chunks(); + // Checks that the values returned by allocated_capacity_bytes() and + // capacity_bytes_slow() are the same. + static void verify_capacity(); + static void verify_used(); + static void verify_metrics(); }; // Metaspace are deallocated when their class loader are GC'ed. @@ -232,7 +342,6 @@ public: static size_t capacity_until_GC() { return _capacity_until_GC; } - static size_t capacity_until_GC_in_bytes() { return _capacity_until_GC * BytesPerWord; } static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; } static void dec_capacity_until_GC(size_t v) { _capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/metaspaceCounters.cpp --- a/src/share/vm/memory/metaspaceCounters.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/metaspaceCounters.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -29,6 +29,16 @@ MetaspaceCounters* MetaspaceCounters::_metaspace_counters = NULL; +size_t MetaspaceCounters::calc_total_capacity() { + // The total capacity is the sum of + // 1) capacity of Metachunks in use by all Metaspaces + // 2) unused space at the end of each Metachunk + // 3) space in the freelist + size_t total_capacity = MetaspaceAux::allocated_capacity_bytes() + + MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes(); + return total_capacity; +} + MetaspaceCounters::MetaspaceCounters() : _capacity(NULL), _used(NULL), @@ -36,8 +46,8 @@ if (UsePerfData) { size_t min_capacity = MetaspaceAux::min_chunk_size(); size_t max_capacity = MetaspaceAux::reserved_in_bytes(); - size_t curr_capacity = MetaspaceAux::capacity_in_bytes(); - size_t used = MetaspaceAux::used_in_bytes(); + size_t curr_capacity = calc_total_capacity(); + size_t used = MetaspaceAux::allocated_used_bytes(); initialize(min_capacity, max_capacity, curr_capacity, used); } @@ -82,15 +92,13 @@ void MetaspaceCounters::update_capacity() { assert(UsePerfData, "Should not be called unless being used"); - assert(_capacity != NULL, "Should be initialized"); - size_t capacity_in_bytes = MetaspaceAux::capacity_in_bytes(); - _capacity->set_value(capacity_in_bytes); + size_t total_capacity = calc_total_capacity(); + _capacity->set_value(total_capacity); } void MetaspaceCounters::update_used() { assert(UsePerfData, "Should not be called unless being used"); - assert(_used != NULL, "Should be initialized"); - size_t used_in_bytes = MetaspaceAux::used_in_bytes(); + size_t used_in_bytes = MetaspaceAux::allocated_used_bytes(); _used->set_value(used_in_bytes); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/metaspaceCounters.hpp --- a/src/share/vm/memory/metaspaceCounters.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/metaspaceCounters.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -37,6 +37,7 @@ size_t max_capacity, size_t curr_capacity, size_t used); + size_t calc_total_capacity(); public: MetaspaceCounters(); ~MetaspaceCounters(); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/metaspaceShared.cpp --- a/src/share/vm/memory/metaspaceShared.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/metaspaceShared.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -243,6 +243,147 @@ bool reading() const { return false; } }; +// This is for dumping detailed statistics for the allocations +// in the shared spaces. +class DumpAllocClosure : public Metaspace::AllocRecordClosure { +public: + + // Here's poor man's enum inheritance +#define SHAREDSPACE_OBJ_TYPES_DO(f) \ + METASPACE_OBJ_TYPES_DO(f) \ + f(SymbolHashentry) \ + f(SymbolBuckets) \ + f(Other) + +#define SHAREDSPACE_OBJ_TYPE_DECLARE(name) name ## Type, +#define SHAREDSPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name; + + enum Type { + // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc + SHAREDSPACE_OBJ_TYPES_DO(SHAREDSPACE_OBJ_TYPE_DECLARE) + _number_of_types + }; + + static const char * type_name(Type type) { + switch(type) { + SHAREDSPACE_OBJ_TYPES_DO(SHAREDSPACE_OBJ_TYPE_NAME_CASE) + default: + ShouldNotReachHere(); + return NULL; + } + } + +public: + enum { + RO = 0, + RW = 1 + }; + + int _counts[2][_number_of_types]; + int _bytes [2][_number_of_types]; + int _which; + + DumpAllocClosure() { + memset(_counts, 0, sizeof(_counts)); + memset(_bytes, 0, sizeof(_bytes)); + }; + + void iterate_metaspace(Metaspace* space, int which) { + assert(which == RO || which == RW, "sanity"); + _which = which; + space->iterate(this); + } + + virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) { + assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity"); + _counts[_which][type] ++; + _bytes [_which][type] += byte_size; + } + + void dump_stats(int ro_all, int rw_all, int md_all, int mc_all); +}; + +void DumpAllocClosure::dump_stats(int ro_all, int rw_all, int md_all, int mc_all) { + rw_all += (md_all + mc_all); // md and mc are all mapped Read/Write + int other_bytes = md_all + mc_all; + + // Calculate size of data that was not allocated by Metaspace::allocate() + int symbol_count = _counts[RO][MetaspaceObj::SymbolType]; + int symhash_bytes = symbol_count * sizeof (HashtableEntry); + int symbuck_count = SymbolTable::the_table()->table_size(); + int symbuck_bytes = symbuck_count * sizeof(HashtableBucket); + + _counts[RW][SymbolHashentryType] = symbol_count; + _bytes [RW][SymbolHashentryType] = symhash_bytes; + other_bytes -= symhash_bytes; + + _counts[RW][SymbolBucketsType] = symbuck_count; + _bytes [RW][SymbolBucketsType] = symbuck_bytes; + other_bytes -= symbuck_bytes; + + // TODO: count things like dictionary, vtable, etc + _bytes[RW][OtherType] = other_bytes; + + // prevent divide-by-zero + if (ro_all < 1) { + ro_all = 1; + } + if (rw_all < 1) { + rw_all = 1; + } + + int all_ro_count = 0; + int all_ro_bytes = 0; + int all_rw_count = 0; + int all_rw_bytes = 0; + + const char *fmt = "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f"; + const char *sep = "--------------------+---------------------------+---------------------------+--------------------------"; + const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %"; + + tty->print_cr("Detailed metadata info (rw includes md and mc):"); + tty->print_cr(hdr); + tty->print_cr(sep); + for (int type = 0; type < int(_number_of_types); type ++) { + const char *name = type_name((Type)type); + int ro_count = _counts[RO][type]; + int ro_bytes = _bytes [RO][type]; + int rw_count = _counts[RW][type]; + int rw_bytes = _bytes [RW][type]; + int count = ro_count + rw_count; + int bytes = ro_bytes + rw_bytes; + + double ro_perc = 100.0 * double(ro_bytes) / double(ro_all); + double rw_perc = 100.0 * double(rw_bytes) / double(rw_all); + double perc = 100.0 * double(bytes) / double(ro_all + rw_all); + + tty->print_cr(fmt, name, + ro_count, ro_bytes, ro_perc, + rw_count, rw_bytes, rw_perc, + count, bytes, perc); + + all_ro_count += ro_count; + all_ro_bytes += ro_bytes; + all_rw_count += rw_count; + all_rw_bytes += rw_bytes; + } + + int all_count = all_ro_count + all_rw_count; + int all_bytes = all_ro_bytes + all_rw_bytes; + + double all_ro_perc = 100.0 * double(all_ro_bytes) / double(ro_all); + double all_rw_perc = 100.0 * double(all_rw_bytes) / double(rw_all); + double all_perc = 100.0 * double(all_bytes) / double(ro_all + rw_all); + + tty->print_cr(sep); + tty->print_cr(fmt, "Total", + all_ro_count, all_ro_bytes, all_ro_perc, + all_rw_count, all_rw_bytes, all_rw_perc, + all_count, all_bytes, all_perc); + + assert(all_ro_bytes == ro_all, "everything should have been counted"); + assert(all_rw_bytes == rw_all, "everything should have been counted"); +} // Populate the shared space. @@ -376,18 +517,17 @@ const char* fmt = "%s space: %9d [ %4.1f%% of total] out of %9d bytes [%4.1f%% used] at " PTR_FORMAT; Metaspace* ro_space = _loader_data->ro_metaspace(); Metaspace* rw_space = _loader_data->rw_metaspace(); - const size_t BPW = BytesPerWord; // Allocated size of each space (may not be all occupied) - const size_t ro_alloced = ro_space->capacity_words(Metaspace::NonClassType) * BPW; - const size_t rw_alloced = rw_space->capacity_words(Metaspace::NonClassType) * BPW; + const size_t ro_alloced = ro_space->capacity_bytes_slow(Metaspace::NonClassType); + const size_t rw_alloced = rw_space->capacity_bytes_slow(Metaspace::NonClassType); const size_t md_alloced = md_end-md_low; const size_t mc_alloced = mc_end-mc_low; const size_t total_alloced = ro_alloced + rw_alloced + md_alloced + mc_alloced; // Occupied size of each space. - const size_t ro_bytes = ro_space->used_words(Metaspace::NonClassType) * BPW; - const size_t rw_bytes = rw_space->used_words(Metaspace::NonClassType) * BPW; + const size_t ro_bytes = ro_space->used_bytes_slow(Metaspace::NonClassType); + const size_t rw_bytes = rw_space->used_bytes_slow(Metaspace::NonClassType); const size_t md_bytes = size_t(md_top - md_low); const size_t mc_bytes = size_t(mc_top - mc_low); @@ -455,6 +595,14 @@ mapinfo->close(); memmove(vtbl_list, saved_vtbl, vtbl_list_size * sizeof(void*)); + + if (PrintSharedSpaces) { + DumpAllocClosure dac; + dac.iterate_metaspace(_loader_data->ro_metaspace(), DumpAllocClosure::RO); + dac.iterate_metaspace(_loader_data->rw_metaspace(), DumpAllocClosure::RW); + + dac.dump_stats(int(ro_bytes), int(rw_bytes), int(md_bytes), int(mc_bytes)); + } } static void link_shared_classes(Klass* obj, TRAPS) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/oopFactory.hpp --- a/src/share/vm/memory/oopFactory.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/oopFactory.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" +#include "memory/referenceType.hpp" #include "memory/universe.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.hpp" diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/referenceProcessor.cpp --- a/src/share/vm/memory/referenceProcessor.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/referenceProcessor.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,8 @@ #include "precompiled.hpp" #include "classfile/javaClasses.hpp" #include "classfile/systemDictionary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_interface/collectedHeap.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/referencePolicy.hpp" @@ -184,11 +186,20 @@ // past clock value. } -void ReferenceProcessor::process_discovered_references( +size_t ReferenceProcessor::total_count(DiscoveredList lists[]) { + size_t total = 0; + for (uint i = 0; i < _max_num_q; ++i) { + total += lists[i].length(); + } + return total; +} + +ReferenceProcessorStats ReferenceProcessor::process_discovered_references( BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, - AbstractRefProcTaskExecutor* task_executor) { + AbstractRefProcTaskExecutor* task_executor, + GCTimer* gc_timer) { NOT_PRODUCT(verify_ok_to_handle_reflists()); assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); @@ -206,34 +217,43 @@ _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); bool trace_time = PrintGCDetails && PrintReferenceGC; + // Soft references + size_t soft_count = 0; { - TraceTime tt("SoftReference", trace_time, false, gclog_or_tty); - process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, - is_alive, keep_alive, complete_gc, task_executor); + GCTraceTime tt("SoftReference", trace_time, false, gc_timer); + soft_count = + process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, + is_alive, keep_alive, complete_gc, task_executor); } update_soft_ref_master_clock(); // Weak references + size_t weak_count = 0; { - TraceTime tt("WeakReference", trace_time, false, gclog_or_tty); - process_discovered_reflist(_discoveredWeakRefs, NULL, true, - is_alive, keep_alive, complete_gc, task_executor); + GCTraceTime tt("WeakReference", trace_time, false, gc_timer); + weak_count = + process_discovered_reflist(_discoveredWeakRefs, NULL, true, + is_alive, keep_alive, complete_gc, task_executor); } // Final references + size_t final_count = 0; { - TraceTime tt("FinalReference", trace_time, false, gclog_or_tty); - process_discovered_reflist(_discoveredFinalRefs, NULL, false, - is_alive, keep_alive, complete_gc, task_executor); + GCTraceTime tt("FinalReference", trace_time, false, gc_timer); + final_count = + process_discovered_reflist(_discoveredFinalRefs, NULL, false, + is_alive, keep_alive, complete_gc, task_executor); } // Phantom references + size_t phantom_count = 0; { - TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty); - process_discovered_reflist(_discoveredPhantomRefs, NULL, false, - is_alive, keep_alive, complete_gc, task_executor); + GCTraceTime tt("PhantomReference", trace_time, false, gc_timer); + phantom_count = + process_discovered_reflist(_discoveredPhantomRefs, NULL, false, + is_alive, keep_alive, complete_gc, task_executor); } // Weak global JNI references. It would make more sense (semantically) to @@ -242,7 +262,7 @@ // thus use JNI weak references to circumvent the phantom references and // resurrect a "post-mortem" object. { - TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty); + GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer); if (task_executor != NULL) { task_executor->set_single_threaded_mode(); } @@ -251,6 +271,8 @@ process_phaseGraalNMethods(keep_alive, complete_gc); #endif } + + return ReferenceProcessorStats(soft_count, weak_count, final_count, phantom_count); } #ifndef PRODUCT @@ -259,7 +281,6 @@ class AlwaysAliveClosure: public BoolObjectClosure { public: virtual bool do_object_b(oop obj) { return true; } - virtual void do_object(oop obj) { assert(false, "Don't call"); } }; class CountHandleClosure: public OopClosure { @@ -894,7 +915,7 @@ balance_queues(_discoveredPhantomRefs); } -void +size_t ReferenceProcessor::process_discovered_reflist( DiscoveredList refs_lists[], ReferencePolicy* policy, @@ -917,12 +938,11 @@ must_balance) { balance_queues(refs_lists); } + + size_t total_list_count = total_count(refs_lists); + if (PrintReferenceGC && PrintGCDetails) { - size_t total = 0; - for (uint i = 0; i < _max_num_q; ++i) { - total += refs_lists[i].length(); - } - gclog_or_tty->print(", %u refs", total); + gclog_or_tty->print(", %u refs", total_list_count); } // Phase 1 (soft refs only): @@ -967,6 +987,8 @@ is_alive, keep_alive, complete_gc); } } + + return total_list_count; } void ReferenceProcessor::clean_up_discovered_references() { @@ -1282,14 +1304,15 @@ BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, - YieldClosure* yield) { + YieldClosure* yield, + GCTimer* gc_timer) { NOT_PRODUCT(verify_ok_to_handle_reflists()); // Soft references { - TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, - false, gclog_or_tty); + GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, + false, gc_timer); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; @@ -1301,8 +1324,8 @@ // Weak references { - TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, - false, gclog_or_tty); + GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, + false, gc_timer); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; @@ -1314,8 +1337,8 @@ // Final references { - TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, - false, gclog_or_tty); + GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, + false, gc_timer); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; @@ -1327,8 +1350,8 @@ // Phantom references { - TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, - false, gclog_or_tty); + GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, + false, gc_timer); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/referenceProcessor.hpp --- a/src/share/vm/memory/referenceProcessor.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/referenceProcessor.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,8 +26,12 @@ #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP #include "memory/referencePolicy.hpp" +#include "memory/referenceProcessorStats.hpp" +#include "memory/referenceType.hpp" #include "oops/instanceRefKlass.hpp" +class GCTimer; + // ReferenceProcessor class encapsulates the per-"collector" processing // of java.lang.Reference objects for GC. The interface is useful for supporting // a generational abstraction, in particular when there are multiple @@ -204,6 +208,10 @@ }; class ReferenceProcessor : public CHeapObj { + + private: + size_t total_count(DiscoveredList lists[]); + protected: // Compatibility with pre-4965777 JDK's static bool _pending_list_uses_discovered_field; @@ -282,13 +290,13 @@ } // Process references with a certain reachability level. - void process_discovered_reflist(DiscoveredList refs_lists[], - ReferencePolicy* policy, - bool clear_referent, - BoolObjectClosure* is_alive, - OopClosure* keep_alive, - VoidClosure* complete_gc, - AbstractRefProcTaskExecutor* task_executor); + size_t process_discovered_reflist(DiscoveredList refs_lists[], + ReferencePolicy* policy, + bool clear_referent, + BoolObjectClosure* is_alive, + OopClosure* keep_alive, + VoidClosure* complete_gc, + AbstractRefProcTaskExecutor* task_executor); void process_phaseJNI(BoolObjectClosure* is_alive, OopClosure* keep_alive, @@ -353,7 +361,8 @@ void preclean_discovered_references(BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, - YieldClosure* yield); + YieldClosure* yield, + GCTimer* gc_timer); // Delete entries in the discovered lists that have // either a null referent or are not active. Such @@ -504,12 +513,13 @@ bool discover_reference(oop obj, ReferenceType rt); // Process references found during GC (called by the garbage collector) - void process_discovered_references(BoolObjectClosure* is_alive, - OopClosure* keep_alive, - VoidClosure* complete_gc, - AbstractRefProcTaskExecutor* task_executor); + ReferenceProcessorStats + process_discovered_references(BoolObjectClosure* is_alive, + OopClosure* keep_alive, + VoidClosure* complete_gc, + AbstractRefProcTaskExecutor* task_executor, + GCTimer *gc_timer); - public: // Enqueue references at end of GC (called by the garbage collector) bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/referenceProcessorStats.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/memory/referenceProcessorStats.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_REFERENCEPROCESSORSTATS_HPP +#define SHARE_VM_MEMORY_REFERENCEPROCESSORSTATS_HPP + +#include "utilities/globalDefinitions.hpp" + +class ReferenceProcessor; + +// ReferenceProcessorStats contains statistics about how many references that +// have been traversed when processing references during garbage collection. +class ReferenceProcessorStats { + size_t _soft_count; + size_t _weak_count; + size_t _final_count; + size_t _phantom_count; + + public: + ReferenceProcessorStats() : + _soft_count(0), + _weak_count(0), + _final_count(0), + _phantom_count(0) {} + + ReferenceProcessorStats(size_t soft_count, + size_t weak_count, + size_t final_count, + size_t phantom_count) : + _soft_count(soft_count), + _weak_count(weak_count), + _final_count(final_count), + _phantom_count(phantom_count) + {} + + size_t soft_count() const { + return _soft_count; + } + + size_t weak_count() const { + return _weak_count; + } + + size_t final_count() const { + return _final_count; + } + + size_t phantom_count() const { + return _phantom_count; + } +}; +#endif diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/referenceType.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/memory/referenceType.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_REFRERENCETYPE_HPP +#define SHARE_VM_MEMORY_REFRERENCETYPE_HPP + +#include "utilities/debug.hpp" + +// ReferenceType is used to distinguish between java/lang/ref/Reference subclasses + +enum ReferenceType { + REF_NONE, // Regular class + REF_OTHER, // Subclass of java/lang/ref/Reference, but not subclass of one of the classes below + REF_SOFT, // Subclass of java/lang/ref/SoftReference + REF_WEAK, // Subclass of java/lang/ref/WeakReference + REF_FINAL, // Subclass of java/lang/ref/FinalReference + REF_PHANTOM // Subclass of java/lang/ref/PhantomReference +}; + +#endif // SHARE_VM_MEMORY_REFRERENCETYPE_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/sharedHeap.cpp --- a/src/share/vm/memory/sharedHeap.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/sharedHeap.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -45,6 +45,7 @@ SH_PS_FlatProfiler_oops_do, SH_PS_Management_oops_do, SH_PS_SystemDictionary_oops_do, + SH_PS_ClassLoaderDataGraph_oops_do, SH_PS_jvmti_oops_do, SH_PS_StringTable_oops_do, SH_PS_CodeCache_oops_do, @@ -173,15 +174,21 @@ if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) { if (so & SO_AllClasses) { SystemDictionary::oops_do(roots); - ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging); } else if (so & SO_SystemClasses) { SystemDictionary::always_strong_oops_do(roots); - ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging); } else { fatal("We should always have selected either SO_AllClasses or SO_SystemClasses"); } } + if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) { + if (so & SO_AllClasses) { + ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging); + } else if (so & SO_SystemClasses) { + ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging); + } + } + if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) { if (so & SO_Strings) { StringTable::oops_do(roots); @@ -212,20 +219,18 @@ class AlwaysTrueClosure: public BoolObjectClosure { public: - void do_object(oop p) { ShouldNotReachHere(); } bool do_object_b(oop p) { return true; } }; static AlwaysTrueClosure always_true; void SharedHeap::process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure) { + CodeBlobClosure* code_roots) { // Global (weak) JNI handles JNIHandles::weak_oops_do(&always_true, root_closure); CodeCache::blobs_do(code_roots); - StringTable::oops_do(root_closure); - } + StringTable::oops_do(root_closure); +} void SharedHeap::set_barrier_set(BarrierSet* bs) { _barrier_set = bs; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/sharedHeap.hpp --- a/src/share/vm/memory/sharedHeap.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/sharedHeap.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -249,8 +249,7 @@ // JNI weak roots, the code cache, system dictionary, symbol table, // string table. void process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure); + CodeBlobClosure* code_roots); // The functions below are helper functions that a subclass of // "SharedHeap" can use in the implementation of its virtual diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/space.hpp --- a/src/share/vm/memory/space.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/space.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -537,9 +537,8 @@ * Occasionally, we want to ensure a full compaction, which is determined \ * by the MarkSweepAlwaysCompactCount parameter. \ */ \ - int invocations = MarkSweep::total_invocations(); \ - bool skip_dead = (MarkSweepAlwaysCompactCount < 1) \ - ||((invocations % MarkSweepAlwaysCompactCount) != 0); \ + uint invocations = MarkSweep::total_invocations(); \ + bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ \ size_t allowed_deadspace = 0; \ if (skip_dead) { \ diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/universe.cpp --- a/src/share/vm/memory/universe.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/universe.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -228,11 +228,8 @@ void Universe::check_alignment(uintx size, uintx alignment, const char* name) { if (size < alignment || size % alignment != 0) { - ResourceMark rm; - stringStream st; - st.print("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment); - char* error = st.as_string(); - vm_exit_during_initialization(error); + vm_exit_during_initialization( + err_msg("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment)); } } @@ -822,12 +819,14 @@ // keep the Universe::narrow_oop_base() set in Universe::reserve_heap() Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); if (verbose) { - tty->print(", Compressed Oops with base: "PTR_FORMAT, Universe::narrow_oop_base()); + tty->print(", %s: "PTR_FORMAT, + narrow_oop_mode_to_string(HeapBasedNarrowOop), + Universe::narrow_oop_base()); } } else { Universe::set_narrow_oop_base(0); if (verbose) { - tty->print(", zero based Compressed Oops"); + tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop)); } #ifdef _WIN64 if (!Universe::narrow_oop_use_implicit_null_checks()) { @@ -842,7 +841,7 @@ } else { Universe::set_narrow_oop_shift(0); if (verbose) { - tty->print(", 32-bits Oops"); + tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop)); } } } @@ -916,7 +915,7 @@ } if (!total_rs.is_reserved()) { - vm_exit_during_initialization(err_msg("Could not reserve enough space for object heap %d bytes", total_reserved)); + vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K)); return total_rs; } @@ -949,6 +948,33 @@ } +const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) { + switch (mode) { + case UnscaledNarrowOop: + return "32-bits Oops"; + case ZeroBasedNarrowOop: + return "zero based Compressed Oops"; + case HeapBasedNarrowOop: + return "Compressed Oops with base"; + } + + ShouldNotReachHere(); + return ""; +} + + +Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() { + if (narrow_oop_base() != 0) { + return HeapBasedNarrowOop; + } + + if (narrow_oop_shift() != 0) { + return ZeroBasedNarrowOop; + } + + return UnscaledNarrowOop; +} + void universe2_init() { EXCEPTION_MARK; @@ -1270,7 +1296,7 @@ st->print_cr("}"); } -void Universe::verify(bool silent, VerifyOption option) { +void Universe::verify(VerifyOption option, const char* prefix, bool silent) { // The use of _verify_in_progress is a temporary work around for // 6320749. Don't bother with a creating a class to set and clear // it since it is only used in this method and the control flow is @@ -1287,11 +1313,12 @@ HandleMark hm; // Handles created during verification can be zapped _verify_count++; + if (!silent) gclog_or_tty->print(prefix); if (!silent) gclog_or_tty->print("[Verifying "); if (!silent) gclog_or_tty->print("threads "); Threads::verify(); + if (!silent) gclog_or_tty->print("heap "); heap()->verify(silent, option); - if (!silent) gclog_or_tty->print("syms "); SymbolTable::verify(); if (!silent) gclog_or_tty->print("strs "); @@ -1424,25 +1451,25 @@ } -void ActiveMethodOopsCache::add_previous_version(Method* const method) { +void ActiveMethodOopsCache::add_previous_version(Method* method) { assert(Thread::current()->is_VM_thread(), "only VMThread can add previous versions"); // Only append the previous method if it is executing on the stack. if (method->on_stack()) { - if (_prev_methods == NULL) { - // This is the first previous version so make some space. - // Start with 2 elements under the assumption that the class - // won't be redefined much. + if (_prev_methods == NULL) { + // This is the first previous version so make some space. + // Start with 2 elements under the assumption that the class + // won't be redefined much. _prev_methods = new (ResourceObj::C_HEAP, mtClass) GrowableArray(2, true); - } + } - // RC_TRACE macro has an embedded ResourceMark - RC_TRACE(0x00000100, - ("add: %s(%s): adding prev version ref for cached method @%d", - method->name()->as_C_string(), method->signature()->as_C_string(), - _prev_methods->length())); + // RC_TRACE macro has an embedded ResourceMark + RC_TRACE(0x00000100, + ("add: %s(%s): adding prev version ref for cached method @%d", + method->name()->as_C_string(), method->signature()->as_C_string(), + _prev_methods->length())); _prev_methods->append(method); } @@ -1463,16 +1490,17 @@ MetadataFactory::free_metadata(method->method_holder()->class_loader_data(), method); } else { // RC_TRACE macro has an embedded ResourceMark - RC_TRACE(0x00000400, ("add: %s(%s): previous cached method @%d is alive", - method->name()->as_C_string(), method->signature()->as_C_string(), i)); + RC_TRACE(0x00000400, + ("add: %s(%s): previous cached method @%d is alive", + method->name()->as_C_string(), method->signature()->as_C_string(), i)); } } } // end add_previous_version() -bool ActiveMethodOopsCache::is_same_method(Method* const method) const { +bool ActiveMethodOopsCache::is_same_method(const Method* method) const { InstanceKlass* ik = InstanceKlass::cast(klass()); - Method* check_method = ik->method_with_idnum(method_idnum()); + const Method* check_method = ik->method_with_idnum(method_idnum()); assert(check_method != NULL, "sanity check"); if (check_method == method) { // done with the easy case diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/memory/universe.hpp --- a/src/share/vm/memory/universe.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/memory/universe.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -90,8 +90,8 @@ ActiveMethodOopsCache() { _prev_methods = NULL; } ~ActiveMethodOopsCache(); - void add_previous_version(Method* const method); - bool is_same_method(Method* const method) const; + void add_previous_version(Method* method); + bool is_same_method(const Method* method) const; }; @@ -253,19 +253,6 @@ return m; } - // Narrow Oop encoding mode: - // 0 - Use 32-bits oops without encoding when - // NarrowOopHeapBaseMin + heap_size < 4Gb - // 1 - Use zero based compressed oops with encoding when - // NarrowOopHeapBaseMin + heap_size < 32Gb - // 2 - Use compressed oops with heap base + encoding. - enum NARROW_OOP_MODE { - UnscaledNarrowOop = 0, - ZeroBasedNarrowOop = 1, - HeapBasedNarrowOop = 2 - }; - static char* preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode); - static char* preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode); static void set_narrow_oop_base(address base) { assert(UseCompressedOops, "no compressed oops?"); _narrow_oop._base = base; @@ -380,6 +367,21 @@ static CollectedHeap* heap() { return _collectedHeap; } // For UseCompressedOops + // Narrow Oop encoding mode: + // 0 - Use 32-bits oops without encoding when + // NarrowOopHeapBaseMin + heap_size < 4Gb + // 1 - Use zero based compressed oops with encoding when + // NarrowOopHeapBaseMin + heap_size < 32Gb + // 2 - Use compressed oops with heap base + encoding. + enum NARROW_OOP_MODE { + UnscaledNarrowOop = 0, + ZeroBasedNarrowOop = 1, + HeapBasedNarrowOop = 2 + }; + static NARROW_OOP_MODE narrow_oop_mode(); + static const char* narrow_oop_mode_to_string(NARROW_OOP_MODE mode); + static char* preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode); + static char* preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode); static address narrow_oop_base() { return _narrow_oop._base; } static bool is_narrow_oop_base(void* addr) { return (narrow_oop_base() == (address)addr); } static int narrow_oop_shift() { return _narrow_oop._shift; } @@ -445,12 +447,12 @@ // Debugging static bool verify_in_progress() { return _verify_in_progress; } - static void verify(bool silent, VerifyOption option); - static void verify(bool silent) { - verify(silent, VerifyOption_Default /* option */); + static void verify(VerifyOption option, const char* prefix, bool silent = VerifySilently); + static void verify(const char* prefix, bool silent = VerifySilently) { + verify(VerifyOption_Default, prefix, silent); } - static void verify() { - verify(false /* silent */); + static void verify(bool silent = VerifySilently) { + verify("", silent); } static int verify_count() { return _verify_count; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/annotations.cpp --- a/src/share/vm/oops/annotations.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/annotations.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -33,7 +33,7 @@ // Allocate annotations in metadata area Annotations* Annotations::allocate(ClassLoaderData* loader_data, TRAPS) { - return new (loader_data, size(), true, THREAD) Annotations(); + return new (loader_data, size(), true, MetaspaceObj::AnnotationType, THREAD) Annotations(); } // helper diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/arrayKlass.cpp --- a/src/share/vm/oops/arrayKlass.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/arrayKlass.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -94,7 +94,7 @@ ResourceMark rm(THREAD); k->initialize_supers(super_klass(), CHECK); k->vtable()->initialize_vtable(false, CHECK); - java_lang_Class::create_mirror(k, CHECK); + java_lang_Class::create_mirror(k, Handle(NULL), CHECK); } GrowableArray* ArrayKlass::compute_secondary_supers(int num_extra_slots) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/constMethod.cpp --- a/src/share/vm/oops/constMethod.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/constMethod.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -40,7 +40,7 @@ MethodType method_type, TRAPS) { int size = ConstMethod::size(byte_code_size, sizes); - return new (loader_data, size, true, THREAD) ConstMethod( + return new (loader_data, size, true, MetaspaceObj::ConstMethodType, THREAD) ConstMethod( byte_code_size, sizes, method_type, size); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/constantPool.cpp --- a/src/share/vm/oops/constantPool.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/constantPool.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -40,6 +40,7 @@ #include "runtime/init.hpp" #include "runtime/javaCalls.hpp" #include "runtime/signature.hpp" +#include "runtime/synchronizer.hpp" #include "runtime/vframe.hpp" ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, TRAPS) { @@ -54,7 +55,7 @@ // the resolved_references array, which is recreated at startup time. // But that could be moved to InstanceKlass (although a pain to access from // assembly code). Maybe it could be moved to the cpCache which is RW. - return new (loader_data, size, false, THREAD) ConstantPool(tags); + return new (loader_data, size, false, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags); } ConstantPool::ConstantPool(Array* tags) { @@ -69,7 +70,6 @@ // only set to non-zero if constant pool is merged by RedefineClasses set_version(0); - set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock")); // initialize tag array int length = tags->length(); @@ -95,9 +95,6 @@ void ConstantPool::release_C_heap_structures() { // walk constant pool and decrement symbol reference counts unreference_symbols(); - - delete _lock; - set_lock(NULL); } objArrayOop ConstantPool::resolved_references() const { @@ -154,9 +151,6 @@ ClassLoaderData* loader_data = pool_holder()->class_loader_data(); set_resolved_references(loader_data->add_handle(refs_handle)); } - - // Also need to recreate the mutex. Make sure this matches the constructor - set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock")); } } @@ -167,7 +161,23 @@ set_resolved_reference_length( resolved_references() != NULL ? resolved_references()->length() : 0); set_resolved_references(NULL); - set_lock(NULL); +} + +oop ConstantPool::lock() { + if (_pool_holder) { + // We re-use the _pool_holder's init_lock to reduce footprint. + // Notes on deadlocks: + // [1] This lock is a Java oop, so it can be recursively locked by + // the same thread without self-deadlocks. + // [2] Deadlock will happen if there is circular dependency between + // the of two Java classes. However, in this case, + // the deadlock would have happened long before we reach + // ConstantPool::lock(), so reusing init_lock does not + // increase the possibility of deadlock. + return _pool_holder->init_lock(); + } else { + return NULL; + } } int ConstantPool::cp_to_object_index(int cp_index) { @@ -208,7 +218,9 @@ Symbol* name = NULL; Handle loader; - { MonitorLockerEx ml(this_oop->lock()); + { + oop cplock = this_oop->lock(); + ObjectLocker ol(cplock , THREAD, cplock != NULL); if (this_oop->tag_at(which).is_unresolved_klass()) { if (this_oop->tag_at(which).is_unresolved_klass_in_error()) { @@ -255,7 +267,8 @@ bool throw_orig_error = false; { - MonitorLockerEx ml(this_oop->lock()); + oop cplock = this_oop->lock(); + ObjectLocker ol(cplock, THREAD, cplock != NULL); // some other thread has beaten us and has resolved the class. if (this_oop->tag_at(which).is_klass()) { @@ -323,7 +336,8 @@ } return k(); } else { - MonitorLockerEx ml(this_oop->lock()); + oop cplock = this_oop->lock(); + ObjectLocker ol(cplock, THREAD, cplock != NULL); // Only updated constant pool - if it is resolved. do_resolve = this_oop->tag_at(which).is_unresolved_klass(); if (do_resolve) { @@ -619,7 +633,8 @@ int tag, TRAPS) { ResourceMark rm; Symbol* error = PENDING_EXCEPTION->klass()->name(); - MonitorLockerEx ml(this_oop->lock()); // lock cpool to change tag. + oop cplock = this_oop->lock(); + ObjectLocker ol(cplock, THREAD, cplock != NULL); // lock cpool to change tag. int error_tag = (tag == JVM_CONSTANT_MethodHandle) ? JVM_CONSTANT_MethodHandleInError : JVM_CONSTANT_MethodTypeInError; @@ -780,7 +795,8 @@ if (cache_index >= 0) { // Cache the oop here also. Handle result_handle(THREAD, result_oop); - MonitorLockerEx ml(this_oop->lock()); // don't know if we really need this + oop cplock = this_oop->lock(); + ObjectLocker ol(cplock, THREAD, cplock != NULL); // don't know if we really need this oop result = this_oop->resolved_references()->obj_at(cache_index); // Benign race condition: resolved_references may already be filled in while we were trying to lock. // The important thing here is that all threads pick up the same result. @@ -1043,24 +1059,14 @@ case JVM_CONSTANT_InvokeDynamic: { - int k1 = invoke_dynamic_bootstrap_method_ref_index_at(index1); - int k2 = cp2->invoke_dynamic_bootstrap_method_ref_index_at(index2); - bool match = compare_entry_to(k1, cp2, k2, CHECK_false); - if (!match) return false; - k1 = invoke_dynamic_name_and_type_ref_index_at(index1); - k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2); - match = compare_entry_to(k1, cp2, k2, CHECK_false); - if (!match) return false; - int argc = invoke_dynamic_argument_count_at(index1); - if (argc == cp2->invoke_dynamic_argument_count_at(index2)) { - for (int j = 0; j < argc; j++) { - k1 = invoke_dynamic_argument_index_at(index1, j); - k2 = cp2->invoke_dynamic_argument_index_at(index2, j); - match = compare_entry_to(k1, cp2, k2, CHECK_false); - if (!match) return false; - } - return true; // got through loop; all elements equal - } + int k1 = invoke_dynamic_name_and_type_ref_index_at(index1); + int k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2); + int i1 = invoke_dynamic_bootstrap_specifier_index(index1); + int i2 = cp2->invoke_dynamic_bootstrap_specifier_index(index2); + // separate statements and variables because CHECK_false is used + bool match_entry = compare_entry_to(k1, cp2, k2, CHECK_false); + bool match_operand = compare_operand_to(i1, cp2, i2, CHECK_false); + return (match_entry && match_operand); } break; case JVM_CONSTANT_String: @@ -1095,6 +1101,80 @@ } // end compare_entry_to() +// Resize the operands array with delta_len and delta_size. +// Used in RedefineClasses for CP merge. +void ConstantPool::resize_operands(int delta_len, int delta_size, TRAPS) { + int old_len = operand_array_length(operands()); + int new_len = old_len + delta_len; + int min_len = (delta_len > 0) ? old_len : new_len; + + int old_size = operands()->length(); + int new_size = old_size + delta_size; + int min_size = (delta_size > 0) ? old_size : new_size; + + ClassLoaderData* loader_data = pool_holder()->class_loader_data(); + Array* new_ops = MetadataFactory::new_array(loader_data, new_size, CHECK); + + // Set index in the resized array for existing elements only + for (int idx = 0; idx < min_len; idx++) { + int offset = operand_offset_at(idx); // offset in original array + operand_offset_at_put(new_ops, idx, offset + 2*delta_len); // offset in resized array + } + // Copy the bootstrap specifiers only + Copy::conjoint_memory_atomic(operands()->adr_at(2*old_len), + new_ops->adr_at(2*new_len), + (min_size - 2*min_len) * sizeof(u2)); + // Explicitly deallocate old operands array. + // Note, it is not needed for 7u backport. + if ( operands() != NULL) { // the safety check + MetadataFactory::free_array(loader_data, operands()); + } + set_operands(new_ops); +} // end resize_operands() + + +// Extend the operands array with the length and size of the ext_cp operands. +// Used in RedefineClasses for CP merge. +void ConstantPool::extend_operands(constantPoolHandle ext_cp, TRAPS) { + int delta_len = operand_array_length(ext_cp->operands()); + if (delta_len == 0) { + return; // nothing to do + } + int delta_size = ext_cp->operands()->length(); + + assert(delta_len > 0 && delta_size > 0, "extended operands array must be bigger"); + + if (operand_array_length(operands()) == 0) { + ClassLoaderData* loader_data = pool_holder()->class_loader_data(); + Array* new_ops = MetadataFactory::new_array(loader_data, delta_size, CHECK); + // The first element index defines the offset of second part + operand_offset_at_put(new_ops, 0, 2*delta_len); // offset in new array + set_operands(new_ops); + } else { + resize_operands(delta_len, delta_size, CHECK); + } + +} // end extend_operands() + + +// Shrink the operands array to a smaller array with new_len length. +// Used in RedefineClasses for CP merge. +void ConstantPool::shrink_operands(int new_len, TRAPS) { + int old_len = operand_array_length(operands()); + if (new_len == old_len) { + return; // nothing to do + } + assert(new_len < old_len, "shrunken operands array must be smaller"); + + int free_base = operand_next_offset_at(new_len - 1); + int delta_len = new_len - old_len; + int delta_size = 2*delta_len + free_base - operands()->length(); + + resize_operands(delta_len, delta_size, CHECK); + +} // end shrink_operands() + + void ConstantPool::copy_operands(constantPoolHandle from_cp, constantPoolHandle to_cp, TRAPS) { @@ -1357,6 +1437,46 @@ } // end find_matching_entry() +// Compare this constant pool's bootstrap specifier at idx1 to the constant pool +// cp2's bootstrap specifier at idx2. +bool ConstantPool::compare_operand_to(int idx1, constantPoolHandle cp2, int idx2, TRAPS) { + int k1 = operand_bootstrap_method_ref_index_at(idx1); + int k2 = cp2->operand_bootstrap_method_ref_index_at(idx2); + bool match = compare_entry_to(k1, cp2, k2, CHECK_false); + + if (!match) { + return false; + } + int argc = operand_argument_count_at(idx1); + if (argc == cp2->operand_argument_count_at(idx2)) { + for (int j = 0; j < argc; j++) { + k1 = operand_argument_index_at(idx1, j); + k2 = cp2->operand_argument_index_at(idx2, j); + match = compare_entry_to(k1, cp2, k2, CHECK_false); + if (!match) { + return false; + } + } + return true; // got through loop; all elements equal + } + return false; +} // end compare_operand_to() + +// Search constant pool search_cp for a bootstrap specifier that matches +// this constant pool's bootstrap specifier at pattern_i index. +// Return the index of a matching bootstrap specifier or (-1) if there is no match. +int ConstantPool::find_matching_operand(int pattern_i, + constantPoolHandle search_cp, int search_len, TRAPS) { + for (int i = 0; i < search_len; i++) { + bool found = compare_operand_to(pattern_i, search_cp, i, CHECK_(-1)); + if (found) { + return i; + } + } + return -1; // bootstrap specifier not found; return unused index (-1) +} // end find_matching_operand() + + #ifndef PRODUCT const char* ConstantPool::printable_name_at(int which) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/constantPool.hpp --- a/src/share/vm/oops/constantPool.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/constantPool.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -111,7 +111,6 @@ int _version; } _saved; - Monitor* _lock; void set_tags(Array* tags) { _tags = tags; } void tag_at_put(int which, jbyte t) { tags()->at_put(which, t); } @@ -355,7 +354,7 @@ Symbol* klass_name_at(int which); // Returns the name, w/o resolving. - Klass* resolved_klass_at(int which) { // Used by Compiler + Klass* resolved_klass_at(int which) const { // Used by Compiler guarantee(tag_at(which).is_klass(), "Corrupted constant pool"); // Must do an acquire here in case another thread resolved the klass // behind our back, lest we later load stale values thru the oop. @@ -567,6 +566,47 @@ _indy_argc_offset = 1, // u2 argc _indy_argv_offset = 2 // u2 argv[argc] }; + + // These functions are used in RedefineClasses for CP merge + + int operand_offset_at(int bootstrap_specifier_index) { + assert(0 <= bootstrap_specifier_index && + bootstrap_specifier_index < operand_array_length(operands()), + "Corrupted CP operands"); + return operand_offset_at(operands(), bootstrap_specifier_index); + } + int operand_bootstrap_method_ref_index_at(int bootstrap_specifier_index) { + int offset = operand_offset_at(bootstrap_specifier_index); + return operands()->at(offset + _indy_bsm_offset); + } + int operand_argument_count_at(int bootstrap_specifier_index) { + int offset = operand_offset_at(bootstrap_specifier_index); + int argc = operands()->at(offset + _indy_argc_offset); + return argc; + } + int operand_argument_index_at(int bootstrap_specifier_index, int j) { + int offset = operand_offset_at(bootstrap_specifier_index); + return operands()->at(offset + _indy_argv_offset + j); + } + int operand_next_offset_at(int bootstrap_specifier_index) { + int offset = operand_offset_at(bootstrap_specifier_index) + _indy_argv_offset + + operand_argument_count_at(bootstrap_specifier_index); + return offset; + } + // Compare a bootsrap specifier in the operands arrays + bool compare_operand_to(int bootstrap_specifier_index1, constantPoolHandle cp2, + int bootstrap_specifier_index2, TRAPS); + // Find a bootsrap specifier in the operands array + int find_matching_operand(int bootstrap_specifier_index, constantPoolHandle search_cp, + int operands_cur_len, TRAPS); + // Resize the operands array with delta_len and delta_size + void resize_operands(int delta_len, int delta_size, TRAPS); + // Extend the operands array with the length and size of the ext_cp operands + void extend_operands(constantPoolHandle ext_cp, TRAPS); + // Shrink the operands array to a smaller array with new_len length + void shrink_operands(int new_len, TRAPS); + + int invoke_dynamic_bootstrap_method_ref_index_at(int which) { assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool"); int op_base = invoke_dynamic_operand_base(which); @@ -784,8 +824,17 @@ void set_resolved_reference_length(int length) { _saved._resolved_reference_length = length; } int resolved_reference_length() const { return _saved._resolved_reference_length; } - void set_lock(Monitor* lock) { _lock = lock; } - Monitor* lock() { return _lock; } + + // lock() may return null -- constant pool updates may happen before this lock is + // initialized, because the _pool_holder has not been fully initialized and + // has not been registered into the system dictionary. In this case, no other + // thread can be modifying this constantpool, so no synchronization is + // necessary. + // + // Use cplock() like this: + // oop cplock = cp->lock(); + // ObjectLocker ol(cplock , THREAD, cplock != NULL); + oop lock(); // Decrease ref counts of symbols that are in the constant pool // when the holder class is unloaded diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/cpCache.cpp --- a/src/share/vm/oops/cpCache.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/cpCache.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -266,7 +266,8 @@ // the lock, so that when the losing writer returns, he can use the linked // cache entry. - MonitorLockerEx ml(cpool->lock()); + oop cplock = cpool->lock(); + ObjectLocker ol(cplock, Thread::current(), cplock != NULL); if (!is_f1_null()) { return; } @@ -541,7 +542,8 @@ const intStack& invokedynamic_map, TRAPS) { int size = ConstantPoolCache::size(length); - return new (loader_data, size, false, THREAD) ConstantPoolCache(length, index_map, invokedynamic_map); + return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD) + ConstantPoolCache(length, index_map, invokedynamic_map); } void ConstantPoolCache::initialize(const intArray& inverse_index_map, diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/instanceKlass.cpp --- a/src/share/vm/oops/instanceKlass.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/instanceKlass.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -54,6 +54,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/thread.inline.hpp" +#include "services/classLoadingService.hpp" #include "services/threadService.hpp" #include "utilities/dtrace.hpp" #include "utilities/macros.hpp" @@ -267,8 +268,6 @@ set_fields(NULL, 0); set_constants(NULL); set_class_loader_data(NULL); - set_protection_domain(NULL); - set_signers(NULL); set_source_file_name(NULL); set_source_debug_extension(NULL, 0); set_array_name(NULL); @@ -278,7 +277,6 @@ set_is_marked_dependent(false); set_init_state(InstanceKlass::allocated); set_init_thread(NULL); - set_init_lock(NULL); set_reference_type(rt); set_oop_map_cache(NULL); set_jni_ids(NULL); @@ -407,36 +405,11 @@ } set_inner_classes(NULL); - // Null out Java heap objects, although these won't be walked to keep - // alive once this InstanceKlass is deallocated. - set_protection_domain(NULL); - set_signers(NULL); - set_init_lock(NULL); - // We should deallocate the Annotations instance MetadataFactory::free_metadata(loader_data, annotations()); set_annotations(NULL); } -volatile oop InstanceKlass::init_lock() const { - volatile oop lock = _init_lock; // read once - assert((oop)lock != NULL || !is_not_initialized(), // initialized or in_error state - "only fully initialized state can have a null lock"); - return lock; -} - -// Set the initialization lock to null so the object can be GC'ed. Any racing -// threads to get this lock will see a null lock and will not lock. -// That's okay because they all check for initialized state after getting -// the lock and return. -void InstanceKlass::fence_and_clear_init_lock() { - // make sure previous stores are all done, notably the init_state. - OrderAccess::storestore(); - klass_oop_store(&_init_lock, NULL); - assert(!is_not_initialized(), "class must be initialized now"); -} - - bool InstanceKlass::should_be_initialized() const { return !is_initialized(); } @@ -469,11 +442,29 @@ } } +// JVMTI spec thinks there are signers and protection domain in the +// instanceKlass. These accessors pretend these fields are there. +// The hprof specification also thinks these fields are in InstanceKlass. +oop InstanceKlass::protection_domain() const { + // return the protection_domain from the mirror + return java_lang_Class::protection_domain(java_mirror()); +} + +// To remove these from requires an incompatible change and CCC request. +objArrayOop InstanceKlass::signers() const { + // return the signers from the mirror + return java_lang_Class::signers(java_mirror()); +} + +volatile oop InstanceKlass::init_lock() const { + // return the init lock from the mirror + return java_lang_Class::init_lock(java_mirror()); +} void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) { EXCEPTION_MARK; volatile oop init_lock = this_oop->init_lock(); - ObjectLocker ol(init_lock, THREAD, init_lock != NULL); + ObjectLocker ol(init_lock, THREAD); // abort if someone beat us to the initialization if (!this_oop->is_not_initialized()) return; // note: not equivalent to is_initialized() @@ -492,7 +483,6 @@ } else { // linking successfull, mark class as initialized this_oop->set_init_state (fully_initialized); - this_oop->fence_and_clear_init_lock(); // trace if (TraceClassInitialization) { ResourceMark rm(THREAD); @@ -619,7 +609,7 @@ // verification & rewriting { volatile oop init_lock = this_oop->init_lock(); - ObjectLocker ol(init_lock, THREAD, init_lock != NULL); + ObjectLocker ol(init_lock, THREAD); // rewritten will have been set if loader constraint error found // on an earlier link attempt // don't verify or rewrite if already rewritten @@ -742,7 +732,7 @@ // Step 1 { volatile oop init_lock = this_oop->init_lock(); - ObjectLocker ol(init_lock, THREAD, init_lock != NULL); + ObjectLocker ol(init_lock, THREAD); Thread *self = THREAD; // it's passed the current thread @@ -890,9 +880,8 @@ void InstanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) { volatile oop init_lock = this_oop->init_lock(); - ObjectLocker ol(init_lock, THREAD, init_lock != NULL); + ObjectLocker ol(init_lock, THREAD); this_oop->set_init_state(state); - this_oop->fence_and_clear_init_lock(); ol.notify_all(CHECK); } @@ -1903,16 +1892,6 @@ // Garbage collection -void InstanceKlass::oops_do(OopClosure* cl) { - Klass::oops_do(cl); - - cl->do_oop(adr_protection_domain()); - cl->do_oop(adr_signers()); - cl->do_oop(adr_init_lock()); - - // Don't walk the arrays since they are walked from the ClassLoaderData objects. -} - #ifdef ASSERT template void assert_is_in(T *p) { T heap_oop = oopDesc::load_heap_oop(p); @@ -2261,9 +2240,6 @@ m->remove_unshareable_info(); } - // Need to reinstate when reading back the class. - set_init_lock(NULL); - // do array classes also. array_klasses_do(remove_unshareable_in_class); } @@ -2295,13 +2271,6 @@ ik->itable()->initialize_itable(false, CHECK); } - // Allocate a simple java object for a lock. - // This needs to be a java object because during class initialization - // it can be held across a java call. - typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK); - Handle h(THREAD, (oop)r); - ik->set_init_lock(h()); - // restore constant pool resolved references ik->constants()->restore_unshareable_info(CHECK); @@ -2312,7 +2281,29 @@ m->clear_all_breakpoints(); } + +void InstanceKlass::notify_unload_class(InstanceKlass* ik) { + // notify the debugger + if (JvmtiExport::should_post_class_unload()) { + JvmtiExport::post_class_unload(ik); + } + + // notify ClassLoadingService of class unload + ClassLoadingService::notify_class_unloaded(ik); +} + +void InstanceKlass::release_C_heap_structures(InstanceKlass* ik) { + // Clean up C heap + ik->release_C_heap_structures(); + ik->constants()->release_C_heap_structures(); +} + void InstanceKlass::release_C_heap_structures() { + + // Can't release the constant pool here because the constant pool can be + // deallocated separately from the InstanceKlass for default methods and + // redefine classes. + // Deallocate oop map cache if (_oop_map_cache != NULL) { delete _oop_map_cache; @@ -2329,6 +2320,17 @@ FreeHeap(jmeths); } + // Deallocate MemberNameTable + { + Mutex* lock_or_null = SafepointSynchronize::is_at_safepoint() ? NULL : MemberNameTable_lock; + MutexLockerEx ml(lock_or_null, Mutex::_no_safepoint_check_flag); + MemberNameTable* mnt = member_names(); + if (mnt != NULL) { + delete mnt; + set_member_names(NULL); + } + } + int* indices = methods_cached_itable_indices_acquire(); if (indices != (int*)NULL) { release_set_methods_cached_itable_indices(NULL); @@ -2716,7 +2718,7 @@ OsrList_lock->unlock(); } -nmethod* InstanceKlass::lookup_osr_nmethod(Method* const m, int bci, int comp_level, bool match_level) const { +nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_level, bool match_level) const { // This is a short non-blocking critical region, so the no safepoint check is ok. OsrList_lock->lock_without_safepoint_check(); nmethod* osr = osr_nmethods_head(); @@ -2757,6 +2759,30 @@ return NULL; } +void InstanceKlass::add_member_name(int index, Handle mem_name) { + jweak mem_name_wref = JNIHandles::make_weak_global(mem_name); + MutexLocker ml(MemberNameTable_lock); + assert(0 <= index && index < idnum_allocated_count(), "index is out of bounds"); + DEBUG_ONLY(No_Safepoint_Verifier nsv); + + if (_member_names == NULL) { + _member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable(idnum_allocated_count()); + } + _member_names->add_member_name(index, mem_name_wref); +} + +oop InstanceKlass::get_member_name(int index) { + MutexLocker ml(MemberNameTable_lock); + assert(0 <= index && index < idnum_allocated_count(), "index is out of bounds"); + DEBUG_ONLY(No_Safepoint_Verifier nsv); + + if (_member_names == NULL) { + return NULL; + } + oop mem_name =_member_names->get_member_name(index); + return mem_name; +} + // ----------------------------------------------------------------------------------------------------- // Printing @@ -2817,10 +2843,7 @@ class_loader_data()->print_value_on(st); st->cr(); } - st->print(BULLET"protection domain: "); ((InstanceKlass*)this)->protection_domain()->print_value_on(st); st->cr(); st->print(BULLET"host class: "); host_klass()->print_value_on_maybe_null(st); st->cr(); - st->print(BULLET"signers: "); signers()->print_value_on(st); st->cr(); - st->print(BULLET"init_lock: "); ((oop)_init_lock)->print_value_on(st); st->cr(); if (source_file_name() != NULL) { st->print(BULLET"source file: "); source_file_name()->print_value_on(st); @@ -3021,7 +3044,6 @@ n += (sz->_method_ordering_bytes = sz->count_array(method_ordering())); n += (sz->_local_interfaces_bytes = sz->count_array(local_interfaces())); n += (sz->_transitive_interfaces_bytes = sz->count_array(transitive_interfaces())); - n += (sz->_signers_bytes = sz->count_array(signers())); n += (sz->_fields_bytes = sz->count_array(fields())); n += (sz->_inner_classes_bytes = sz->count_array(inner_classes())); sz->_ro_bytes += n; @@ -3187,15 +3209,10 @@ guarantee(constants()->is_metadata(), "should be in metaspace"); guarantee(constants()->is_constantPool(), "should be constant pool"); } - if (protection_domain() != NULL) { - guarantee(protection_domain()->is_oop(), "should be oop"); - } - if (host_klass() != NULL) { - guarantee(host_klass()->is_metadata(), "should be in metaspace"); - guarantee(host_klass()->is_klass(), "should be klass"); - } - if (signers() != NULL) { - guarantee(signers()->is_objArray(), "should be obj array"); + const Klass* host = host_klass(); + if (host != NULL) { + guarantee(host->is_metadata(), "should be in metaspace"); + guarantee(host->is_klass(), "should be klass"); } } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/instanceKlass.hpp --- a/src/share/vm/oops/instanceKlass.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/instanceKlass.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -26,6 +26,7 @@ #define SHARE_VM_OOPS_INSTANCEKLASS_HPP #include "classfile/classLoaderData.hpp" +#include "memory/referenceType.hpp" #include "oops/annotations.hpp" #include "oops/constMethod.hpp" #include "oops/fieldInfo.hpp" @@ -37,6 +38,7 @@ #include "utilities/accessFlags.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/macros.hpp" +#include "trace/traceMacros.hpp" // An InstanceKlass is the VM level representation of a Java class. // It contains all information needed for at class at execution runtime. @@ -58,8 +60,6 @@ // [fields ] // [constants ] // [class loader ] -// [protection domain ] -// [signers ] // [source file name ] // [inner classes ] // [static field size ] @@ -90,6 +90,7 @@ class nmethodBucket; class PreviousVersionNode; class JvmtiCachedClassFieldMap; +class MemberNameTable; // This is used in iterators below. class FieldClosure: public StackObj { @@ -179,15 +180,6 @@ static volatile int _total_instanceKlass_count; protected: - // Protection domain. - oop _protection_domain; - // Class signers. - objArrayOop _signers; - // Initialization lock. Must be one per class and it has to be a VM internal - // object so java code cannot lock it (like the mirror) - // It has to be an object not a Mutex because it's held through java calls. - volatile oop _init_lock; - // Annotations for this class Annotations* _annotations; // Array classes holding elements of this class. @@ -235,7 +227,7 @@ _misc_rewritten = 1 << 0, // methods rewritten. _misc_has_nonstatic_fields = 1 << 1, // for sizing with UseCompressedOops _misc_should_verify_class = 1 << 2, // allow caching of preverification - _misc_is_anonymous = 1 << 3, // has embedded _inner_classes field + _misc_is_anonymous = 1 << 3, // has embedded _host_klass field _misc_is_contended = 1 << 4, // marked with contended annotation _misc_has_default_methods = 1 << 5 // class/superclass/implemented interfaces has default methods }; @@ -246,6 +238,7 @@ int _vtable_len; // length of Java vtable (in words) int _itable_len; // length of Java itable (in words) OopMapCache* volatile _oop_map_cache; // OopMapCache for all methods in the klass (allocated lazily) + MemberNameTable* _member_names; // Member names JNIid* _jni_ids; // First JNI identifier for static fields in this class jmethodID* _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or NULL if none int* _methods_cached_itable_indices; // itable_index cache for JNI invoke corresponding to methods idnum, or NULL @@ -524,8 +517,10 @@ void set_constants(ConstantPool* c) { _constants = c; } // protection domain - oop protection_domain() { return _protection_domain; } - void set_protection_domain(oop pd) { klass_oop_store(&_protection_domain, pd); } + oop protection_domain() const; + + // signers + objArrayOop signers() const; // host class Klass* host_klass() const { @@ -572,10 +567,6 @@ } } - // signers - objArrayOop signers() const { return _signers; } - void set_signers(objArrayOop s) { klass_oop_store((oop*)&_signers, s); } - // source file name Symbol* source_file_name() const { return _source_file_name; } void set_source_file_name(Symbol* n); @@ -736,7 +727,7 @@ void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; }; void add_osr_nmethod(nmethod* n); void remove_osr_nmethod(nmethod* n); - nmethod* lookup_osr_nmethod(Method* const m, int bci, int level, bool match_level) const; + nmethod* lookup_osr_nmethod(const Method* m, int bci, int level, bool match_level) const; // Breakpoint support (see methods on Method* for details) BreakpointInfo* breakpoints() const { return _breakpoints; }; @@ -909,8 +900,6 @@ Method* method_at_itable(Klass* holder, int index, TRAPS); // Garbage collection - virtual void oops_do(OopClosure* cl); - void oop_follow_contents(oop obj); int oop_adjust_pointers(oop obj); @@ -932,7 +921,9 @@ // referenced by handles. bool on_stack() const { return _constants->on_stack(); } - void release_C_heap_structures(); + // callbacks for actions during class unloading + static void notify_unload_class(InstanceKlass* ik); + static void release_C_heap_structures(InstanceKlass* ik); // Parallel Scavenge and Parallel Old PARALLEL_GC_DECLS @@ -966,6 +957,7 @@ #endif // INCLUDE_ALL_GCS u2 idnum_allocated_count() const { return _idnum_allocated_count; } + private: // initialization state #ifdef ASSERT @@ -992,14 +984,13 @@ { OrderAccess::release_store_ptr(&_methods_cached_itable_indices, indices); } // Lock during initialization +public: + // Lock for (1) initialization; (2) access to the ConstantPool of this class. + // Must be one per class and it has to be a VM internal object so java code + // cannot lock it (like the mirror). + // It has to be an object not a Mutex because it's held through java calls. volatile oop init_lock() const; - void set_init_lock(oop value) { klass_oop_store(&_init_lock, value); } - void fence_and_clear_init_lock(); // after fully_initialized - - // Offsets for memory management - oop* adr_protection_domain() const { return (oop*)&this->_protection_domain;} - oop* adr_signers() const { return (oop*)&this->_signers;} - oop* adr_init_lock() const { return (oop*)&this->_init_lock;} +private: // Static methods that are used to implement member methods where an exposed this pointer // is needed due to possible GCs @@ -1020,6 +1011,8 @@ // Returns the array class with this class as element type Klass* array_klass_impl(bool or_null, TRAPS); + // Free CHeap allocated fields. + void release_C_heap_structures(); public: // CDS support - remove and restore oops from metadata. Oops are not shared. virtual void remove_unshareable_info(); @@ -1028,6 +1021,12 @@ // jvm support jint compute_modifier_flags(TRAPS) const; + // JSR-292 support + MemberNameTable* member_names() { return _member_names; } + void set_member_names(MemberNameTable* member_names) { _member_names = member_names; } + void add_member_name(int index, Handle member_name); + oop get_member_name(int index); + public: // JVMTI support jint jvmti_class_status() const; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/klass.cpp --- a/src/share/vm/oops/klass.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/klass.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -37,6 +37,7 @@ #include "oops/klass.inline.hpp" #include "oops/oop.inline2.hpp" #include "runtime/atomic.hpp" +#include "trace/traceMacros.hpp" #include "utilities/stack.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS @@ -50,7 +51,7 @@ if (_name != NULL) _name->increment_refcount(); } -bool Klass::is_subclass_of(Klass* k) const { +bool Klass::is_subclass_of(const Klass* k) const { // Run up the super chain and check if (this == k) return true; @@ -140,7 +141,7 @@ void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) { return Metaspace::allocate(loader_data, word_size, /*read_only*/false, - Metaspace::ClassType, CHECK_NULL); + MetaspaceObj::ClassType, CHECK_NULL); } Klass::Klass() { @@ -168,7 +169,7 @@ set_next_sibling(NULL); set_next_link(NULL); set_alloc_count(0); - TRACE_SET_KLASS_TRACE_ID(this, 0); + TRACE_INIT_ID(this); set_prototype_header(markOopDesc::prototype()); set_biased_lock_revocation_count(0); @@ -511,8 +512,9 @@ // (same order as class file parsing) loader_data->add_class(this); - // Recreate the class mirror - java_lang_Class::create_mirror(this, CHECK); + // Recreate the class mirror. The protection_domain is always null for + // boot loader, for now. + java_lang_Class::create_mirror(this, Handle(NULL), CHECK); } Klass* Klass::array_klass_or_null(int rank) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/klass.hpp --- a/src/share/vm/oops/klass.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/klass.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -398,9 +398,10 @@ // vtables virtual klassVtable* vtable() const { return NULL; } + virtual int vtable_length() const { return 0; } // subclass check - bool is_subclass_of(Klass* k) const; + bool is_subclass_of(const Klass* k) const; // subtype check: true if is_subclass_of, or if k is interface and receiver implements it bool is_subtype_of(Klass* k) const { juint off = k->super_check_offset(); @@ -449,7 +450,7 @@ Klass* array_klass_or_null(int rank); Klass* array_klass_or_null(); - virtual oop protection_domain() { return NULL; } + virtual oop protection_domain() const = 0; oop class_loader() const; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/klassVtable.cpp --- a/src/share/vm/oops/klassVtable.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/klassVtable.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -519,6 +519,9 @@ // check if a method is a miranda method, given a class's methods table and it's super // the caller must make sure that the method belongs to an interface implemented by the class bool klassVtable::is_miranda(Method* m, Array* class_methods, Klass* super) { + if (m->is_static()) { + return false; + } Symbol* name = m->name(); Symbol* signature = m->signature(); if (InstanceKlass::find_method(class_methods, name, signature) == NULL) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/method.cpp --- a/src/share/vm/oops/method.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/method.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -74,7 +74,7 @@ int size = Method::size(access_flags.is_native()); - return new (loader_data, size, false, THREAD) Method(cm, access_flags, size); + return new (loader_data, size, false, MetaspaceObj::MethodType, THREAD) Method(cm, access_flags, size); } Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) { @@ -91,7 +91,7 @@ set_hidden(false); set_dont_inline(false); set_method_data(NULL); - set_interpreter_throwout_count(0); + set_method_counters(NULL); set_vtable_index(Method::garbage_vtable_index); // Fix and bury in Method* @@ -105,21 +105,6 @@ } NOT_PRODUCT(set_compiled_invocation_count(0);) - set_interpreter_invocation_count(0); - invocation_counter()->init(); - backedge_counter()->init(); - clear_number_of_breakpoints(); - -#ifdef GRAAL - set_graal_invocation_time(0L); - set_graal_priority(0); -#endif - -#ifdef TIERED - set_rate(0); - set_prev_event_count(0); - set_prev_time(0); -#endif } // Release Method*. The nmethod will be gone when we get here because @@ -129,6 +114,8 @@ set_constMethod(NULL); MetadataFactory::free_metadata(loader_data, method_data()); set_method_data(NULL); + MetadataFactory::free_metadata(loader_data, method_counters()); + set_method_counters(NULL); // The nmethod will be gone when we get here. if (code() != NULL) _code = NULL; } @@ -328,7 +315,10 @@ // compiler does not bump invocation counter of compiled methods return true; } - else if (_invocation_counter.carry() || (method_data() != NULL && method_data()->invocation_counter()->carry())) { + else if ((method_counters() != NULL && + method_counters()->invocation_counter()->carry()) || + (method_data() != NULL && + method_data()->invocation_counter()->carry())) { // The carry bit is set when the counter overflows and causes // a compilation to occur. We don't know how many times // the counter has been reset, so we simply assume it has @@ -392,6 +382,18 @@ } } +MethodCounters* Method::build_method_counters(Method* m, TRAPS) { + methodHandle mh(m); + ClassLoaderData* loader_data = mh->method_holder()->class_loader_data(); + MethodCounters* counters = MethodCounters::allocate(loader_data, CHECK_NULL); + if (mh->method_counters() == NULL) { + mh->set_method_counters(counters); + } else { + MetadataFactory::free_metadata(loader_data, counters); + } + return mh->method_counters(); +} + void Method::cleanup_inline_caches() { // The current system doesn't use inline caches in the interpreter // => nothing to do (keep this method around for future use) @@ -799,8 +801,6 @@ set_signature_handler(NULL); } NOT_PRODUCT(set_compiled_invocation_count(0);) - invocation_counter()->reset(); - backedge_counter()->reset(); _adapter = NULL; _from_compiled_entry = NULL; @@ -813,8 +813,7 @@ assert(!DumpSharedSpaces || _method_data == NULL, "unexpected method data?"); set_method_data(NULL); - set_interpreter_throwout_count(0); - set_interpreter_invocation_count(0); + set_method_counters(NULL); } // Called when the method_holder is getting linked. Setup entrypoints so the method @@ -833,7 +832,9 @@ assert(entry != NULL, "interpreter entry must be non-null"); // Sets both _i2i_entry and _from_interpreted_entry set_interpreter_entry(entry); - if (is_native() && !is_method_handle_intrinsic()) { + + // Don't overwrite already registered native entries. + if (is_native() && !has_native_function()) { set_native_function( SharedRuntime::native_method_throw_unsatisfied_link_error_entry(), !native_bind_event_is_interesting); @@ -886,7 +887,7 @@ debug_only(No_Safepoint_Verifier nsv;) nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code); if (code == NULL && UseCodeCacheFlushing) { - nmethod *saved_code = CodeCache::find_and_remove_saved_code(this); + nmethod *saved_code = CodeCache::reanimate_saved_code(this); if (saved_code != NULL) { methodHandle method(this); assert( ! saved_code->is_osr_method(), "should not get here for osr" ); @@ -1558,33 +1559,39 @@ int Method::invocation_count() { + MethodCounters *mcs = method_counters(); if (TieredCompilation) { MethodData* const mdo = method_data(); - if (invocation_counter()->carry() || ((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) { + if (((mcs != NULL) ? mcs->invocation_counter()->carry() : false) || + ((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) { return InvocationCounter::count_limit; } else { - return invocation_counter()->count() + ((mdo != NULL) ? mdo->invocation_counter()->count() : 0); + return ((mcs != NULL) ? mcs->invocation_counter()->count() : 0) + + ((mdo != NULL) ? mdo->invocation_counter()->count() : 0); } } else { - return invocation_counter()->count(); + return (mcs == NULL) ? 0 : mcs->invocation_counter()->count(); } } int Method::backedge_count() { + MethodCounters *mcs = method_counters(); if (TieredCompilation) { MethodData* const mdo = method_data(); - if (backedge_counter()->carry() || ((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) { + if (((mcs != NULL) ? mcs->backedge_counter()->carry() : false) || + ((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) { return InvocationCounter::count_limit; } else { - return backedge_counter()->count() + ((mdo != NULL) ? mdo->backedge_counter()->count() : 0); + return ((mcs != NULL) ? mcs->backedge_counter()->count() : 0) + + ((mdo != NULL) ? mdo->backedge_counter()->count() : 0); } } else { - return backedge_counter()->count(); + return (mcs == NULL) ? 0 : mcs->backedge_counter()->count(); } } int Method::highest_comp_level() const { - MethodData* mdo = method_data(); + const MethodData* mdo = method_data(); if (mdo != NULL) { return mdo->highest_comp_level(); } else { @@ -1593,7 +1600,7 @@ } int Method::highest_osr_comp_level() const { - MethodData* mdo = method_data(); + const MethodData* mdo = method_data(); if (mdo != NULL) { return mdo->highest_osr_comp_level(); } else { @@ -1634,12 +1641,12 @@ assert(orig_bytecode() == code, "original bytecode must be the same"); } #endif + Thread *thread = Thread::current(); *method->bcp_from(_bci) = Bytecodes::_breakpoint; - method->incr_number_of_breakpoints(); + method->incr_number_of_breakpoints(thread); SystemDictionary::notice_modification(); { // Deoptimize all dependents on this method - Thread *thread = Thread::current(); HandleMark hm(thread); methodHandle mh(thread, method); Universe::flush_dependents_on_method(mh); @@ -1649,7 +1656,7 @@ void BreakpointInfo::clear(Method* method) { *method->bcp_from(_bci) = orig_bytecode(); assert(method->number_of_breakpoints() > 0, "must not go negative"); - method->decr_number_of_breakpoints(); + method->decr_number_of_breakpoints(Thread::current()); } // jmethodID handling @@ -1982,14 +1989,4 @@ md->is_methodData(), "should be method data"); } -#ifdef GRAAL -void Method::reset_counters() { - invocation_counter()->reset(); - backedge_counter()->reset(); - _interpreter_invocation_count = 0; - _interpreter_throwout_count = 0; -#ifndef PRODUCT - _compiled_invocation_count = 0; #endif -} -#endif diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/method.hpp --- a/src/share/vm/oops/method.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/method.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -31,6 +31,7 @@ #include "interpreter/invocationCounter.hpp" #include "oops/annotations.hpp" #include "oops/constantPool.hpp" +#include "oops/methodCounters.hpp" #include "oops/instanceKlass.hpp" #include "oops/oop.hpp" #include "oops/typeArrayOop.hpp" @@ -66,7 +67,7 @@ // | ConstMethod* (oop) | // |------------------------------------------------------| // | methodData (oop) | -// | interp_invocation_count | +// | methodCounters | // |------------------------------------------------------| // | access_flags | // | vtable_index | @@ -75,16 +76,6 @@ // |------------------------------------------------------| // | method_size | intrinsic_id| flags | // |------------------------------------------------------| -// | throwout_count | num_breakpoints | -// |------------------------------------------------------| -// | invocation_counter | -// | backedge_counter | -// |------------------------------------------------------| -// | prev_time (tiered only, 64 bit wide) | -// | | -// |------------------------------------------------------| -// | rate (tiered) | -// |------------------------------------------------------| // | code (pointer) | // | i2i (pointer) | // | adapter (pointer) | @@ -100,6 +91,7 @@ class LocalVariableTableElement; class AdapterHandlerEntry; class MethodData; +class MethodCounters; class ConstMethod; class InlineTableSizes; class KlassSizeStats; @@ -109,7 +101,7 @@ private: ConstMethod* _constMethod; // Method read-only data. MethodData* _method_data; - int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered) + MethodCounters* _method_counters; AccessFlags _access_flags; // Access flags int _vtable_index; // vtable index of this method (see VtableIndexFlag) // note: can have vtables with >2**16 elements (because of inheritance) @@ -124,19 +116,6 @@ _hidden : 1, _dont_inline : 1, : 3; - u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting - u2 _number_of_breakpoints; // fullspeed debugging support - InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations - InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations - -#ifdef GRAAL - jlong _graal_invocation_time; - int _graal_priority; -#endif -#ifdef TIERED - float _rate; // Events (invocation and backedge counter increments) per millisecond - jlong _prev_time; // Previous time the rate was acquired -#endif #ifndef PRODUCT int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) @@ -251,11 +230,31 @@ void clear_all_breakpoints(); // Tracking number of breakpoints, for fullspeed debugging. // Only mutated by VM thread. - u2 number_of_breakpoints() const { return _number_of_breakpoints; } - void incr_number_of_breakpoints() { ++_number_of_breakpoints; } - void decr_number_of_breakpoints() { --_number_of_breakpoints; } + u2 number_of_breakpoints() const { + if (method_counters() == NULL) { + return 0; + } else { + return method_counters()->number_of_breakpoints(); + } + } + void incr_number_of_breakpoints(TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->incr_number_of_breakpoints(); + } + } + void decr_number_of_breakpoints(TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->decr_number_of_breakpoints(); + } + } // Initialization only - void clear_number_of_breakpoints() { _number_of_breakpoints = 0; } + void clear_number_of_breakpoints() { + if (method_counters() != NULL) { + method_counters()->clear_number_of_breakpoints(); + } + } // index into InstanceKlass methods() array // note: also used by jfr @@ -292,14 +291,20 @@ void set_highest_osr_comp_level(int level); // Count of times method was exited via exception while interpreting - void interpreter_throwout_increment() { - if (_interpreter_throwout_count < 65534) { - _interpreter_throwout_count++; + void interpreter_throwout_increment(TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->interpreter_throwout_increment(); } } - int interpreter_throwout_count() const { return _interpreter_throwout_count; } - void set_interpreter_throwout_count(int count) { _interpreter_throwout_count = count; } + int interpreter_throwout_count() const { + if (method_counters() == NULL) { + return 0; + } else { + return method_counters()->interpreter_throwout_count(); + } + } // size of parameters int size_of_parameters() const { return constMethod()->size_of_parameters(); } @@ -343,51 +348,75 @@ MethodData* method_data() const { return _method_data; } + void set_method_data(MethodData* data) { _method_data = data; } - // invocation counter - InvocationCounter* invocation_counter() { return &_invocation_counter; } - InvocationCounter* backedge_counter() { return &_backedge_counter; } + MethodCounters* method_counters() const { + return _method_counters; + } + + + void set_method_counters(MethodCounters* counters) { + _method_counters = counters; + } #ifdef TIERED // We are reusing interpreter_invocation_count as a holder for the previous event count! // We can do that since interpreter_invocation_count is not used in tiered. - int prev_event_count() const { return _interpreter_invocation_count; } - void set_prev_event_count(int count) { _interpreter_invocation_count = count; } - jlong prev_time() const { return _prev_time; } - void set_prev_time(jlong time) { _prev_time = time; } - float rate() const { return _rate; } - void set_rate(float rate) { _rate = rate; } + int prev_event_count() const { + if (method_counters() == NULL) { + return 0; + } else { + return method_counters()->interpreter_invocation_count(); + } + } + void set_prev_event_count(int count, TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->set_interpreter_invocation_count(count); + } + } + jlong prev_time() const { + return method_counters() == NULL ? 0 : method_counters()->prev_time(); + } + void set_prev_time(jlong time, TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->set_prev_time(time); + } + } + float rate() const { + return method_counters() == NULL ? 0 : method_counters()->rate(); + } + void set_rate(float rate, TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->set_rate(rate); + } + } #endif int invocation_count(); int backedge_count(); -#ifdef GRAAL - void set_graal_invocation_time(jlong time) { _graal_invocation_time = time; } - jlong graal_invocation_time() { return _graal_invocation_time; } - - void set_graal_priority(int prio) { _graal_priority = prio; } - int graal_priority() { return _graal_priority; } - - void reset_counters(); -#endif // GRAAL - bool was_executed_more_than(int n); bool was_never_executed() { return !was_executed_more_than(0); } static void build_interpreter_method_data(methodHandle method, TRAPS); + static MethodCounters* build_method_counters(Method* m, TRAPS); + int interpreter_invocation_count() { if (TieredCompilation) return invocation_count(); - else return _interpreter_invocation_count; + else return (method_counters() == NULL) ? 0 : + method_counters()->interpreter_invocation_count(); } - void set_interpreter_invocation_count(int count) { _interpreter_invocation_count = count; } - int increment_interpreter_invocation_count() { + int increment_interpreter_invocation_count(TRAPS) { if (TieredCompilation) ShouldNotReachHere(); - return ++_interpreter_invocation_count; + MethodCounters* mcs = get_method_counters(CHECK_0); + return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count(); } #ifndef PRODUCT @@ -596,16 +625,12 @@ #endif /* CC_INTERP */ static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); } static ByteSize code_offset() { return byte_offset_of(Method, _code); } - static ByteSize invocation_counter_offset() { return byte_offset_of(Method, _invocation_counter); } - static ByteSize backedge_counter_offset() { return byte_offset_of(Method, _backedge_counter); } static ByteSize method_data_offset() { return byte_offset_of(Method, _method_data); } - static ByteSize interpreter_invocation_counter_offset() { return byte_offset_of(Method, _interpreter_invocation_count); } -#ifdef GRAAL - static ByteSize graal_invocation_time_offset() { return byte_offset_of(Method, _graal_invocation_time); } - static ByteSize graal_priority_offset() { return byte_offset_of(Method, _graal_priority); } -#endif + static ByteSize method_counters_offset() { + return byte_offset_of(Method, _method_counters); + } #ifndef PRODUCT static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); } #endif // not PRODUCT @@ -616,8 +641,6 @@ // for code generation static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); } - static int interpreter_invocation_counter_offset_in_bytes() - { return offset_of(Method, _interpreter_invocation_count); } static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); } static int intrinsic_id_size_in_bytes() { return sizeof(u1); } @@ -648,13 +671,15 @@ Symbol* signature, //anything at all TRAPS); static Klass* check_non_bcp_klass(Klass* klass); - // these operate only on invoke methods: + + // How many extra stack entries for invokedynamic when it's enabled + static const int extra_stack_entries_for_jsr292 = 1; + + // this operates only on invoke methods: // presize interpreter frames for extra interpreter stack entries, if needed - // method handles want to be able to push a few extra values (e.g., a bound receiver), and - // invokedynamic sometimes needs to push a bootstrap method, call site, and arglist, - // all without checking for a stack overflow - static int extra_stack_entries() { return EnableInvokeDynamic ? 2 : 0; } - static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize() + // Account for the extra appendix argument for invokehandle/invokedynamic + static int extra_stack_entries() { return EnableInvokeDynamic ? extra_stack_entries_for_jsr292 : 0; } + static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize // RedefineClasses() support: bool is_old() const { return access_flags().is_old(); } @@ -775,6 +800,13 @@ private: void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason); + MethodCounters* get_method_counters(TRAPS) { + if (_method_counters == NULL) { + build_method_counters(this, CHECK_AND_CLEAR_NULL); + } + return _method_counters; + } + public: bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); } void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); } @@ -956,7 +988,7 @@ u2 _length; public: - ExceptionTable(Method* m) { + ExceptionTable(const Method* m) { if (m->has_exception_handler()) { _table = m->exception_table_start(); _length = m->exception_table_length(); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/methodCounters.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/oops/methodCounters.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" +#include "oops/methodCounters.hpp" +#include "runtime/thread.inline.hpp" + +MethodCounters* MethodCounters::allocate(ClassLoaderData* loader_data, TRAPS) { + return new(loader_data, size(), false, MetaspaceObj::MethodCountersType, THREAD) MethodCounters(); +} + +void MethodCounters::clear_counters() { + invocation_counter()->reset(); + backedge_counter()->reset(); + set_interpreter_throwout_count(0); + set_interpreter_invocation_count(0); +} diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/methodCounters.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/oops/methodCounters.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_METHODCOUNTERS_HPP +#define SHARE_VM_OOPS_METHODCOUNTERS_HPP + +#include "oops/metadata.hpp" +#include "interpreter/invocationCounter.hpp" + +class MethodCounters: public MetaspaceObj { + friend class VMStructs; + private: + int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered) + u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting + u2 _number_of_breakpoints; // fullspeed debugging support + InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations + InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations + +#ifdef GRAAL + jlong _graal_invocation_time; + int _graal_priority; +#endif +#ifdef TIERED + float _rate; // Events (invocation and backedge counter increments) per millisecond + jlong _prev_time; // Previous time the rate was acquired +#endif + + MethodCounters() : _interpreter_invocation_count(0), + _interpreter_throwout_count(0), + _number_of_breakpoints(0) +#ifdef TIERED + , _rate(0), + _prev_time(0) +#endif + { + invocation_counter()->init(); + backedge_counter()->init(); + } + + public: + static MethodCounters* allocate(ClassLoaderData* loader_data, TRAPS); + + void deallocate_contents(ClassLoaderData* loader_data) {} + DEBUG_ONLY(bool on_stack() { return false; }) // for template + + static int size() { return sizeof(MethodCounters) / wordSize; } + + bool is_klass() const { return false; } + + void clear_counters(); + + int interpreter_invocation_count() { + return _interpreter_invocation_count; + } + void set_interpreter_invocation_count(int count) { + _interpreter_invocation_count = count; + } + int increment_interpreter_invocation_count() { + return ++_interpreter_invocation_count; + } + + void interpreter_throwout_increment() { + if (_interpreter_throwout_count < 65534) { + _interpreter_throwout_count++; + } + } + int interpreter_throwout_count() const { + return _interpreter_throwout_count; + } + void set_interpreter_throwout_count(int count) { + _interpreter_throwout_count = count; + } + + u2 number_of_breakpoints() const { return _number_of_breakpoints; } + void incr_number_of_breakpoints() { ++_number_of_breakpoints; } + void decr_number_of_breakpoints() { --_number_of_breakpoints; } + void clear_number_of_breakpoints() { _number_of_breakpoints = 0; } + +#ifdef GRAAL + void set_graal_invocation_time(jlong time) { _graal_invocation_time = time; } + jlong graal_invocation_time() { return _graal_invocation_time; } + + void set_graal_priority(int prio) { _graal_priority = prio; } + int graal_priority() { return _graal_priority; } +#endif // GRAAL + + +#ifdef TIERED + jlong prev_time() const { return _prev_time; } + void set_prev_time(jlong time) { _prev_time = time; } + float rate() const { return _rate; } + void set_rate(float rate) { _rate = rate; } +#endif + + // invocation counter + InvocationCounter* invocation_counter() { return &_invocation_counter; } + InvocationCounter* backedge_counter() { return &_backedge_counter; } + + static ByteSize interpreter_invocation_counter_offset() { + return byte_offset_of(MethodCounters, _interpreter_invocation_count); + } + + static ByteSize invocation_counter_offset() { + return byte_offset_of(MethodCounters, _invocation_counter); + } + + static ByteSize backedge_counter_offset() { + return byte_offset_of(MethodCounters, _backedge_counter); + } + + static int interpreter_invocation_counter_offset_in_bytes() { + return offset_of(MethodCounters, _interpreter_invocation_count); + } + +}; +#endif //SHARE_VM_OOPS_METHODCOUNTERS_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/methodData.cpp --- a/src/share/vm/oops/methodData.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/methodData.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -400,7 +400,8 @@ MethodData* MethodData::allocate(ClassLoaderData* loader_data, methodHandle method, TRAPS) { int size = MethodData::compute_allocation_size_in_words(method); - return new (loader_data, size, false, THREAD) MethodData(method(), size, CHECK_NULL); + return new (loader_data, size, false, MetaspaceObj::MethodDataType, THREAD) + MethodData(method(), size, CHECK_NULL); } int MethodData::bytecode_cell_count(Bytecodes::Code code) { @@ -774,14 +775,17 @@ } else { int iic = method->interpreter_invocation_count(); if (mileage < iic) mileage = iic; - InvocationCounter* ic = method->invocation_counter(); - InvocationCounter* bc = method->backedge_counter(); - int icval = ic->count(); - if (ic->carry()) icval += CompileThreshold; - if (mileage < icval) mileage = icval; - int bcval = bc->count(); - if (bc->carry()) bcval += CompileThreshold; - if (mileage < bcval) mileage = bcval; + MethodCounters* mcs = method->method_counters(); + if (mcs != NULL) { + InvocationCounter* ic = mcs->invocation_counter(); + InvocationCounter* bc = mcs->backedge_counter(); + int icval = ic->count(); + if (ic->carry()) icval += CompileThreshold; + if (mileage < icval) mileage = icval; + int bcval = bc->count(); + if (bc->carry()) bcval += CompileThreshold; + if (mileage < bcval) mileage = bcval; + } } return mileage; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/methodData.hpp --- a/src/share/vm/oops/methodData.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/methodData.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1424,9 +1424,9 @@ void set_would_profile(bool p) { _would_profile = p; } bool would_profile() const { return _would_profile; } - int highest_comp_level() { return _highest_comp_level; } + int highest_comp_level() const { return _highest_comp_level; } void set_highest_comp_level(int level) { _highest_comp_level = level; } - int highest_osr_comp_level() { return _highest_osr_comp_level; } + int highest_osr_comp_level() const { return _highest_osr_comp_level; } void set_highest_osr_comp_level(int level) { _highest_osr_comp_level = level; } int num_loops() const { return _num_loops; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/objArrayKlass.hpp --- a/src/share/vm/oops/objArrayKlass.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/objArrayKlass.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -75,7 +75,7 @@ void copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS); // Compute protection domain - oop protection_domain() { return bottom_klass()->protection_domain(); } + oop protection_domain() const { return bottom_klass()->protection_domain(); } private: // Either oop or narrowOop depending on UseCompressedOops. diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/oop.cpp --- a/src/share/vm/oops/oop.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/oop.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -103,11 +103,17 @@ // When String table needs to rehash unsigned int oopDesc::new_hash(jint seed) { + EXCEPTION_MARK; ResourceMark rm; int length; - jchar* chars = java_lang_String::as_unicode_string(this, length); - // Use alternate hashing algorithm on the string - return AltHashing::murmur3_32(seed, chars, length); + jchar* chars = java_lang_String::as_unicode_string(this, length, THREAD); + if (chars != NULL) { + // Use alternate hashing algorithm on the string + return AltHashing::murmur3_32(seed, chars, length); + } else { + vm_exit_out_of_memory(length, OOM_MALLOC_ERROR, "unable to create Unicode strings for String table rehash"); + return 0; + } } VerifyOopClosure VerifyOopClosure::verify_oop; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/symbol.cpp --- a/src/share/vm/oops/symbol.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/symbol.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -55,7 +55,7 @@ address res; int alloc_size = size(len)*HeapWordSize; res = (address) Metaspace::allocate(loader_data, size(len), true, - Metaspace::NonClassType, CHECK_NULL); + MetaspaceObj::SymbolType, CHECK_NULL); return res; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/oops/typeArrayKlass.hpp --- a/src/share/vm/oops/typeArrayKlass.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/oops/typeArrayKlass.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -67,6 +67,8 @@ typeArrayOop allocate(int length, TRAPS) { return allocate_common(length, true, THREAD); } oop multi_allocate(int rank, jint* sizes, TRAPS); + oop protection_domain() const { return NULL; } + // Copying void copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/bytecodeInfo.cpp --- a/src/share/vm/opto/bytecodeInfo.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/bytecodeInfo.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -85,16 +85,35 @@ assert(!UseOldInlining, "do not use for old stuff"); } +/** + * Return true when EA is ON and a java constructor is called or + * a super constructor is called from an inlined java constructor. + * Also return true for boxing methods. + */ static bool is_init_with_ea(ciMethod* callee_method, ciMethod* caller_method, Compile* C) { - // True when EA is ON and a java constructor is called or - // a super constructor is called from an inlined java constructor. - return C->do_escape_analysis() && EliminateAllocations && - ( callee_method->is_initializer() || - (caller_method->is_initializer() && - caller_method != C->method() && - caller_method->holder()->is_subclass_of(callee_method->holder())) - ); + if (!C->do_escape_analysis() || !EliminateAllocations) { + return false; // EA is off + } + if (callee_method->is_initializer()) { + return true; // constuctor + } + if (caller_method->is_initializer() && + caller_method != C->method() && + caller_method->holder()->is_subclass_of(callee_method->holder())) { + return true; // super constructor is called from inlined constructor + } + if (C->eliminate_boxing() && callee_method->is_boxing_method()) { + return true; + } + return false; +} + +/** + * Force inlining unboxing accessor. + */ +static bool is_unboxing_method(ciMethod* callee_method, Compile* C) { + return C->eliminate_boxing() && callee_method->is_unboxing_method(); } // positive filter: should callee be inlined? @@ -144,6 +163,7 @@ // bump the max size if the call is frequent if ((freq >= InlineFrequencyRatio) || (call_site_count >= InlineFrequencyCount) || + is_unboxing_method(callee_method, C) || is_init_with_ea(callee_method, caller_method, C)) { max_inline_size = C->freq_inline_size(); @@ -241,8 +261,25 @@ return false; } + if (callee_method->should_not_inline()) { + set_msg("disallowed by CompilerOracle"); + return true; + } + +#ifndef PRODUCT + if (ciReplay::should_not_inline(callee_method)) { + set_msg("disallowed by ciReplay"); + return true; + } +#endif + // Now perform checks which are heuristic + if (is_unboxing_method(callee_method, C)) { + // Inline unboxing methods. + return false; + } + if (!callee_method->force_inline()) { if (callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode) { @@ -264,18 +301,6 @@ } } - if (callee_method->should_not_inline()) { - set_msg("disallowed by CompilerOracle"); - return true; - } - -#ifndef PRODUCT - if (ciReplay::should_not_inline(callee_method)) { - set_msg("disallowed by ciReplay"); - return true; - } -#endif - if (UseStringCache) { // Do not inline StringCache::profile() method used only at the beginning. if (callee_method->name() == ciSymbol::profile_name() && @@ -300,9 +325,8 @@ } if (is_init_with_ea(callee_method, caller_method, C)) { - // Escape Analysis: inline all executed constructors - + return false; } else if (!callee_method->was_executed_more_than(MIN2(MinInliningThreshold, CompileThreshold >> 1))) { set_msg("executed < MinInliningThreshold times"); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/c2_globals.hpp --- a/src/share/vm/opto/c2_globals.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/c2_globals.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -442,12 +442,15 @@ notproduct(bool, PrintEliminateLocks, false, \ "Print out when locks are eliminated") \ \ - diagnostic(bool, EliminateAutoBox, false, \ - "Private flag to control optimizations for autobox elimination") \ + product(bool, EliminateAutoBox, true, \ + "Control optimizations for autobox elimination") \ \ product(intx, AutoBoxCacheMax, 128, \ "Sets max value cached by the java.lang.Integer autobox cache") \ \ + experimental(bool, AggressiveUnboxing, false, \ + "Control optimizations for aggressive boxing elimination") \ + \ product(bool, DoEscapeAnalysis, true, \ "Perform escape analysis") \ \ diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/c2compiler.cpp --- a/src/share/vm/opto/c2compiler.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/c2compiler.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -125,9 +125,10 @@ bool subsume_loads = SubsumeLoads; bool do_escape_analysis = DoEscapeAnalysis && !env->jvmti_can_access_local_variables(); + bool eliminate_boxing = EliminateAutoBox; while (!env->failing()) { // Attempt to compile while subsuming loads into machine instructions. - Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis); + Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis, eliminate_boxing); // Check result and retry if appropriate. @@ -142,6 +143,12 @@ do_escape_analysis = false; continue; // retry } + if (C.has_boxed_value()) { + // Recompile without boxing elimination regardless failure reason. + assert(eliminate_boxing, "must make progress"); + eliminate_boxing = false; + continue; // retry + } // Pass any other failure reason up to the ciEnv. // Note that serious, irreversible failures are already logged // on the ciEnv via env->record_method_not_compilable(). diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/callGenerator.cpp --- a/src/share/vm/opto/callGenerator.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/callGenerator.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -134,7 +134,7 @@ kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); } - CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(tf(), target, method(), kit.bci()); + CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci()); _call_node = call; // Save the call node in case we need it later if (!is_static) { // Make an explicit receiver null_check as part of this call. @@ -304,29 +304,34 @@ void LateInlineCallGenerator::do_late_inline() { // Can't inline it - if (call_node() == NULL || call_node()->outcnt() == 0 || - call_node()->in(0) == NULL || call_node()->in(0)->is_top()) { + CallStaticJavaNode* call = call_node(); + if (call == NULL || call->outcnt() == 0 || + call->in(0) == NULL || call->in(0)->is_top()) { return; } - const TypeTuple *r = call_node()->tf()->domain(); + const TypeTuple *r = call->tf()->domain(); for (int i1 = 0; i1 < method()->arg_size(); i1++) { - if (call_node()->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { + if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); return; } } - if (call_node()->in(TypeFunc::Memory)->is_top()) { + if (call->in(TypeFunc::Memory)->is_top()) { assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); return; } - CallStaticJavaNode* call = call_node(); + Compile* C = Compile::current(); + // Remove inlined methods from Compiler's lists. + if (call->is_macro()) { + C->remove_macro_node(call); + } // Make a clone of the JVMState that appropriate to use for driving a parse - Compile* C = Compile::current(); - JVMState* jvms = call->jvms()->clone_shallow(C); + JVMState* old_jvms = call->jvms(); + JVMState* jvms = old_jvms->clone_shallow(C); uint size = call->req(); SafePointNode* map = new (C) SafePointNode(size, jvms); for (uint i1 = 0; i1 < size; i1++) { @@ -340,16 +345,23 @@ map->set_req(TypeFunc::Memory, mem); } - // Make enough space for the expression stack and transfer the incoming arguments - int nargs = method()->arg_size(); + uint nargs = method()->arg_size(); + // blow away old call arguments + Node* top = C->top(); + for (uint i1 = 0; i1 < nargs; i1++) { + map->set_req(TypeFunc::Parms + i1, top); + } jvms->set_map(map); + + // Make enough space in the expression stack to transfer + // the incoming arguments and return value. map->ensure_stack(jvms, jvms->method()->max_stack()); - if (nargs > 0) { - for (int i1 = 0; i1 < nargs; i1++) { - map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1)); - } + for (uint i1 = 0; i1 < nargs; i1++) { + map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1)); } + // This check is done here because for_method_handle_inline() method + // needs jvms for inlined state. if (!do_late_inline_check(jvms)) { map->disconnect_inputs(NULL, C); return; @@ -480,6 +492,26 @@ return new LateInlineStringCallGenerator(method, inline_cg); } +class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { + + public: + LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : + LateInlineCallGenerator(method, inline_cg) {} + + virtual JVMState* generate(JVMState* jvms) { + Compile *C = Compile::current(); + C->print_inlining_skip(this); + + C->add_boxing_late_inline(this); + + JVMState* new_jvms = DirectCallGenerator::generate(jvms); + return new_jvms; + } +}; + +CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { + return new LateInlineBoxingCallGenerator(method, inline_cg); +} //---------------------------WarmCallGenerator-------------------------------- // Internal class which handles initial deferral of inlining decisions. diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/callGenerator.hpp --- a/src/share/vm/opto/callGenerator.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/callGenerator.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -125,6 +125,7 @@ static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg); static CallGenerator* for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const); static CallGenerator* for_string_late_inline(ciMethod* m, CallGenerator* inline_cg); + static CallGenerator* for_boxing_late_inline(ciMethod* m, CallGenerator* inline_cg); // How to make a call but defer the decision whether to inline or not. static CallGenerator* for_warm_call(WarmCallInfo* ci, diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/callnode.cpp --- a/src/share/vm/opto/callnode.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/callnode.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -523,7 +523,9 @@ void JVMState::dump_on(outputStream* st) const { - if (_map && !((uintptr_t)_map & 1)) { + bool print_map = _map && !((uintptr_t)_map & 1) && + ((caller() == NULL) || (caller()->map() != _map)); + if (print_map) { if (_map->len() > _map->req()) { // _map->has_exceptions() Node* ex = _map->in(_map->req()); // _map->next_exception() // skip the first one; it's already being printed @@ -532,7 +534,10 @@ ex->dump(1); } } - _map->dump(2); + _map->dump(Verbose ? 2 : 1); + } + if (caller() != NULL) { + caller()->dump_on(st); } st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); @@ -546,9 +551,6 @@ _method->print_codes_on(bci(), bci()+1, st); } } - if (caller() != NULL) { - caller()->dump_on(st); - } } // Extra way to dump a jvms from the debugger, @@ -584,6 +586,15 @@ return n; } +/** + * Reset map for all callers + */ +void JVMState::set_map_deep(SafePointNode* map) { + for (JVMState* p = this; p->_caller != NULL; p = p->_caller) { + p->set_map(map); + } +} + //============================================================================= uint CallNode::cmp( const Node &n ) const { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } @@ -663,17 +674,49 @@ // Determine whether the call could modify the field of the specified // instance at the specified offset. // -bool CallNode::may_modify(const TypePtr *addr_t, PhaseTransform *phase) { - const TypeOopPtr *adrInst_t = addr_t->isa_oopptr(); - - // If not an OopPtr or not an instance type, assume the worst. - // Note: currently this method is called only for instance types. - if (adrInst_t == NULL || !adrInst_t->is_known_instance()) { - return true; +bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { + assert((t_oop != NULL), "sanity"); + if (t_oop->is_known_instance()) { + // The instance_id is set only for scalar-replaceable allocations which + // are not passed as arguments according to Escape Analysis. + return false; } - // The instance_id is set only for scalar-replaceable allocations which - // are not passed as arguments according to Escape Analysis. - return false; + if (t_oop->is_ptr_to_boxed_value()) { + ciKlass* boxing_klass = t_oop->klass(); + if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) { + // Skip unrelated boxing methods. + Node* proj = proj_out(TypeFunc::Parms); + if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) { + return false; + } + } + if (is_CallJava() && as_CallJava()->method() != NULL) { + ciMethod* meth = as_CallJava()->method(); + if (meth->is_accessor()) { + return false; + } + // May modify (by reflection) if an boxing object is passed + // as argument or returned. + if (returns_pointer() && (proj_out(TypeFunc::Parms) != NULL)) { + Node* proj = proj_out(TypeFunc::Parms); + const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr(); + if ((inst_t != NULL) && (!inst_t->klass_is_exact() || + (inst_t->klass() == boxing_klass))) { + return true; + } + } + const TypeTuple* d = tf()->domain(); + for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { + const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr(); + if ((inst_t != NULL) && (!inst_t->klass_is_exact() || + (inst_t->klass() == boxing_klass))) { + return true; + } + } + return false; + } + } + return true; } // Does this call have a direct reference to n other than debug information? @@ -1020,6 +1063,7 @@ int scloff = jvms->scloff(); int endoff = jvms->endoff(); assert(endoff == (int)req(), "no other states or debug info after me"); + assert(jvms->scl_size() == 0, "parsed code should not have scalar objects"); Node* top = Compile::current()->top(); for (uint i = 0; i < grow_by; i++) { ins_req(monoff, top); @@ -1035,6 +1079,7 @@ const int MonitorEdges = 2; assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); assert(req() == jvms()->endoff(), "correct sizing"); + assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects"); int nextmon = jvms()->scloff(); if (GenerateSynchronizationCode) { add_req(lock->box_node()); @@ -1050,6 +1095,7 @@ void SafePointNode::pop_monitor() { // Delete last monitor from debug info + assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects"); debug_only(int num_before_pop = jvms()->nof_monitors()); const int MonitorEdges = (1<scloff(); @@ -1154,6 +1200,7 @@ init_class_id(Class_Allocate); init_flags(Flag_is_macro); _is_scalar_replaceable = false; + _is_non_escaping = false; Node *topnode = C->top(); init_req( TypeFunc::Control , ctrl ); @@ -1169,8 +1216,6 @@ } //============================================================================= -uint AllocateArrayNode::size_of() const { return sizeof(*this); } - Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (remove_dead_region(phase, can_reshape)) return this; // Don't bother trying to transform a dead node @@ -1235,6 +1280,8 @@ // - the narrow_length is 0 // - the narrow_length is not wider than length assert(narrow_length_type == TypeInt::ZERO || + length_type->is_con() && narrow_length_type->is_con() && + (narrow_length_type->_hi <= length_type->_lo) || (narrow_length_type->_hi <= length_type->_hi && narrow_length_type->_lo >= length_type->_lo), "narrow type must be narrower than length type"); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/callnode.hpp --- a/src/share/vm/opto/callnode.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/callnode.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -49,6 +49,7 @@ class CallLeafNoFPNode; class AllocateNode; class AllocateArrayNode; +class BoxLockNode; class LockNode; class UnlockNode; class JVMState; @@ -235,7 +236,6 @@ int loc_size() const { return stkoff() - locoff(); } int stk_size() const { return monoff() - stkoff(); } - int arg_size() const { return monoff() - argoff(); } int mon_size() const { return scloff() - monoff(); } int scl_size() const { return endoff() - scloff(); } @@ -298,6 +298,7 @@ // Miscellaneous utility functions JVMState* clone_deep(Compile* C) const; // recursively clones caller chain JVMState* clone_shallow(Compile* C) const; // retains uncloned caller + void set_map_deep(SafePointNode *map);// reset map for all callers #ifndef PRODUCT void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; @@ -439,7 +440,7 @@ static bool needs_polling_address_input(); #ifndef PRODUCT - virtual void dump_spec(outputStream *st) const; + virtual void dump_spec(outputStream *st) const; #endif }; @@ -554,10 +555,10 @@ virtual bool guaranteed_safepoint() { return true; } // For macro nodes, the JVMState gets modified during expansion, so when cloning // the node the JVMState must be cloned. - virtual void clone_jvms() { } // default is not to clone + virtual void clone_jvms(Compile* C) { } // default is not to clone // Returns true if the call may modify n - virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase); + virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase); // Does this node have a use of n other than in debug information? bool has_non_debug_use(Node *n); // Returns the unique CheckCastPP of a call @@ -630,9 +631,15 @@ virtual uint cmp( const Node &n ) const; virtual uint size_of() const; // Size is bigger public: - CallStaticJavaNode(const TypeFunc* tf, address addr, ciMethod* method, int bci) + CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci) : CallJavaNode(tf, addr, method, bci), _name(NULL) { init_class_id(Class_CallStaticJava); + if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) { + init_flags(Flag_is_macro); + C->add_macro_node(this); + } + _is_scalar_replaceable = false; + _is_non_escaping = false; } CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, const TypePtr* adr_type) @@ -640,13 +647,31 @@ init_class_id(Class_CallStaticJava); // This node calls a runtime stub, which often has narrow memory effects. _adr_type = adr_type; + _is_scalar_replaceable = false; + _is_non_escaping = false; } - const char *_name; // Runtime wrapper name + const char *_name; // Runtime wrapper name + + // Result of Escape Analysis + bool _is_scalar_replaceable; + bool _is_non_escaping; // If this is an uncommon trap, return the request code, else zero. int uncommon_trap_request() const; static int extract_uncommon_trap_request(const Node* call); + bool is_boxing_method() const { + return is_macro() && (method() != NULL) && method()->is_boxing_method(); + } + // Later inlining modifies the JVMState, so we need to clone it + // when the call node is cloned (because it is macro node). + virtual void clone_jvms(Compile* C) { + if ((jvms() != NULL) && is_boxing_method()) { + set_jvms(jvms()->clone_deep(C)); + jvms()->set_map_deep(this); + } + } + virtual int Opcode() const; #ifndef PRODUCT virtual void dump_spec(outputStream *st) const; @@ -748,12 +773,12 @@ ParmLimit }; - static const TypeFunc* alloc_type() { + static const TypeFunc* alloc_type(const Type* t) { const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); fields[AllocSize] = TypeInt::POS; fields[KlassNode] = TypeInstPtr::NOTNULL; fields[InitialTest] = TypeInt::BOOL; - fields[ALength] = TypeInt::INT; // length (can be a bad length) + fields[ALength] = t; // length (can be a bad length) const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); @@ -766,21 +791,26 @@ return TypeFunc::make(domain, range); } - bool _is_scalar_replaceable; // Result of Escape Analysis + // Result of Escape Analysis + bool _is_scalar_replaceable; + bool _is_non_escaping; virtual uint size_of() const; // Size is bigger AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, Node *size, Node *klass_node, Node *initial_test); // Expansion modifies the JVMState, so we need to clone it - virtual void clone_jvms() { - set_jvms(jvms()->clone_deep(Compile::current())); + virtual void clone_jvms(Compile* C) { + if (jvms() != NULL) { + set_jvms(jvms()->clone_deep(C)); + jvms()->set_map_deep(this); + } } virtual int Opcode() const; virtual uint ideal_reg() const { return Op_RegP; } virtual bool guaranteed_safepoint() { return false; } // allocations do not modify their arguments - virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase) { return false;} + virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;} // Pattern-match a possible usage of AllocateNode. // Return null if no allocation is recognized. @@ -815,10 +845,6 @@ // are defined in graphKit.cpp, which sets up the bidirectional relation.) InitializeNode* initialization(); - // Return the corresponding storestore barrier (or null if none). - // Walks out edges to find it... - MemBarStoreStoreNode* storestore(); - // Convenience for initialization->maybe_set_complete(phase) bool maybe_set_complete(PhaseGVN* phase); }; @@ -840,7 +866,6 @@ set_req(AllocateNode::ALength, count_val); } virtual int Opcode() const; - virtual uint size_of() const; // Size is bigger virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); // Dig the length operand out of a array allocation site. @@ -918,7 +943,7 @@ void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } // locking does not modify its arguments - virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;} + virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;} #ifndef PRODUCT void create_lock_counter(JVMState* s); @@ -965,8 +990,11 @@ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); // Expansion modifies the JVMState, so we need to clone it - virtual void clone_jvms() { - set_jvms(jvms()->clone_deep(Compile::current())); + virtual void clone_jvms(Compile* C) { + if (jvms() != NULL) { + set_jvms(jvms()->clone_deep(C)); + jvms()->set_map_deep(this); + } } bool is_nested_lock_region(); // Is this Lock nested? diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/cfgnode.cpp --- a/src/share/vm/opto/cfgnode.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/cfgnode.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -806,7 +806,7 @@ Node *in = ophi->in(i); if (in == NULL || igvn->type(in) == Type::TOP) continue; - Node *opt = MemNode::optimize_simple_memory_chain(in, at, igvn); + Node *opt = MemNode::optimize_simple_memory_chain(in, t_oop, NULL, igvn); PhiNode *optphi = opt->is_Phi() ? opt->as_Phi() : NULL; if (optphi != NULL && optphi->adr_type() == TypePtr::BOTTOM) { opt = node_map[optphi->_idx]; @@ -1921,7 +1921,7 @@ const TypePtr* at = adr_type(); for( uint i=1; i= _max_lrg_id) { + return lrg; + } + + uint next = _uf_map[lrg]; + while (next != lrg) { // Scan chain of equivalences + assert(next < lrg, "always union smaller"); + lrg = next; // until find a fixed-point + next = _uf_map[lrg]; + } + return next; +} + //------------------------------Chaitin---------------------------------------- PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher) : PhaseRegAlloc(unique, cfg, matcher, @@ -153,13 +219,13 @@ #else NULL #endif - ), - _names(unique), _uf_map(unique), - _maxlrg(0), _live(0), - _spilled_once(Thread::current()->resource_area()), - _spilled_twice(Thread::current()->resource_area()), - _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0), - _oldphi(unique) + ) + , _lrg_map(unique) + , _live(0) + , _spilled_once(Thread::current()->resource_area()) + , _spilled_twice(Thread::current()->resource_area()) + , _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0) + , _oldphi(unique) #ifndef PRODUCT , _trace_spilling(TraceSpilling || C->method_has_option("TraceSpilling")) #endif @@ -168,7 +234,6 @@ _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg._outer_loop_freq); - uint i,j; // Build a list of basic blocks, sorted by frequency _blks = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks ); // Experiment with sorting strategies to speed compilation @@ -176,30 +241,30 @@ Block **buckets[NUMBUCKS]; // Array of buckets uint buckcnt[NUMBUCKS]; // Array of bucket counters double buckval[NUMBUCKS]; // Array of bucket value cutoffs - for( i = 0; i < NUMBUCKS; i++ ) { - buckets[i] = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks ); + for (uint i = 0; i < NUMBUCKS; i++) { + buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg._num_blocks); buckcnt[i] = 0; // Bump by three orders of magnitude each time cutoff *= 0.001; buckval[i] = cutoff; - for( j = 0; j < _cfg._num_blocks; j++ ) { + for (uint j = 0; j < _cfg._num_blocks; j++) { buckets[i][j] = NULL; } } // Sort blocks into buckets - for( i = 0; i < _cfg._num_blocks; i++ ) { - for( j = 0; j < NUMBUCKS; j++ ) { - if( (j == NUMBUCKS-1) || (_cfg._blocks[i]->_freq > buckval[j]) ) { + for (uint i = 0; i < _cfg._num_blocks; i++) { + for (uint j = 0; j < NUMBUCKS; j++) { + if ((j == NUMBUCKS - 1) || (_cfg._blocks[i]->_freq > buckval[j])) { // Assign block to end of list for appropriate bucket buckets[j][buckcnt[j]++] = _cfg._blocks[i]; - break; // kick out of inner loop + break; // kick out of inner loop } } } // Dump buckets into final block array uint blkcnt = 0; - for( i = 0; i < NUMBUCKS; i++ ) { - for( j = 0; j < buckcnt[i]; j++ ) { + for (uint i = 0; i < NUMBUCKS; i++) { + for (uint j = 0; j < buckcnt[i]; j++) { _blks[blkcnt++] = buckets[i][j]; } } @@ -207,6 +272,77 @@ assert(blkcnt == _cfg._num_blocks, "Block array not totally filled"); } +//------------------------------Union------------------------------------------ +// union 2 sets together. +void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) { + uint src = _lrg_map.find(src_n); + uint dst = _lrg_map.find(dst_n); + assert(src, ""); + assert(dst, ""); + assert(src < _lrg_map.max_lrg_id(), "oob"); + assert(dst < _lrg_map.max_lrg_id(), "oob"); + assert(src < dst, "always union smaller"); + _lrg_map.uf_map(dst, src); +} + +//------------------------------new_lrg---------------------------------------- +void PhaseChaitin::new_lrg(const Node *x, uint lrg) { + // Make the Node->LRG mapping + _lrg_map.extend(x->_idx,lrg); + // Make the Union-Find mapping an identity function + _lrg_map.uf_extend(lrg, lrg); +} + + +bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) { + Block *bcon = _cfg._bbs[con->_idx]; + uint cindex = bcon->find_node(con); + Node *con_next = bcon->_nodes[cindex+1]; + if (con_next->in(0) != con || !con_next->is_MachProj()) { + return false; // No MachProj's follow + } + + // Copy kills after the cloned constant + Node *kills = con_next->clone(); + kills->set_req(0, copy); + b->_nodes.insert(idx, kills); + _cfg._bbs.map(kills->_idx, b); + new_lrg(kills, max_lrg_id); + return true; +} + +//------------------------------compact---------------------------------------- +// Renumber the live ranges to compact them. Makes the IFG smaller. +void PhaseChaitin::compact() { + // Current the _uf_map contains a series of short chains which are headed + // by a self-cycle. All the chains run from big numbers to little numbers. + // The Find() call chases the chains & shortens them for the next Find call. + // We are going to change this structure slightly. Numbers above a moving + // wave 'i' are unchanged. Numbers below 'j' point directly to their + // compacted live range with no further chaining. There are no chains or + // cycles below 'i', so the Find call no longer works. + uint j=1; + uint i; + for (i = 1; i < _lrg_map.max_lrg_id(); i++) { + uint lr = _lrg_map.uf_live_range_id(i); + // Ignore unallocated live ranges + if (!lr) { + continue; + } + assert(lr <= i, ""); + _lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr)); + } + // Now change the Node->LR mapping to reflect the compacted names + uint unique = _lrg_map.size(); + for (i = 0; i < unique; i++) { + uint lrg_id = _lrg_map.live_range_id(i); + _lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id)); + } + + // Reset the Union-Find mapping + _lrg_map.reset_uf_map(j); +} + void PhaseChaitin::Register_Allocate() { // Above the OLD FP (and in registers) are the incoming arguments. Stack @@ -231,14 +367,12 @@ // all copy-related live ranges low and then using the max copy-related // live range as a cut-off for LIVE and the IFG. In other words, I can // build a subset of LIVE and IFG just for copies. - PhaseLive live(_cfg,_names,&live_arena); + PhaseLive live(_cfg, _lrg_map.names(), &live_arena); // Need IFG for coalescing and coloring - PhaseIFG ifg( &live_arena ); + PhaseIFG ifg(&live_arena); _ifg = &ifg; - if (C->unique() > _names.Size()) _names.extend(C->unique()-1, 0); - // Come out of SSA world to the Named world. Assign (virtual) registers to // Nodes. Use the same register for all inputs and the output of PhiNodes // - effectively ending SSA form. This requires either coalescing live @@ -258,9 +392,9 @@ _live = NULL; // Mark live as being not available rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); - ifg.init(_maxlrg); // Empty IFG + ifg.init(_lrg_map.max_lrg_id()); // Empty IFG gather_lrg_masks( false ); // Collect LRG masks - live.compute( _maxlrg ); // Compute liveness + live.compute(_lrg_map.max_lrg_id()); // Compute liveness _live = &live; // Mark LIVE as being available } @@ -270,19 +404,19 @@ // across any GC point where the derived value is live. So this code looks // at all the GC points, and "stretches" the live range of any base pointer // to the GC point. - if( stretch_base_pointer_live_ranges(&live_arena) ) { - NOT_PRODUCT( Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler); ) + if (stretch_base_pointer_live_ranges(&live_arena)) { + NOT_PRODUCT(Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler);) // Since some live range stretched, I need to recompute live _live = NULL; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); - ifg.init(_maxlrg); - gather_lrg_masks( false ); - live.compute( _maxlrg ); + ifg.init(_lrg_map.max_lrg_id()); + gather_lrg_masks(false); + live.compute(_lrg_map.max_lrg_id()); _live = &live; } // Create the interference graph using virtual copies - build_ifg_virtual( ); // Include stack slots this time + build_ifg_virtual(); // Include stack slots this time // Aggressive (but pessimistic) copy coalescing. // This pass works on virtual copies. Any virtual copies which are not @@ -296,8 +430,8 @@ // given Node and search them for an instance, i.e., time O(#MaxLRG)). _ifg->SquareUp(); - PhaseAggressiveCoalesce coalesce( *this ); - coalesce.coalesce_driver( ); + PhaseAggressiveCoalesce coalesce(*this); + coalesce.coalesce_driver(); // Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do // not match the Phi itself, insert a copy. coalesce.insert_copies(_matcher); @@ -310,28 +444,36 @@ _live = NULL; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); - ifg.init(_maxlrg); + ifg.init(_lrg_map.max_lrg_id()); gather_lrg_masks( true ); - live.compute( _maxlrg ); + live.compute(_lrg_map.max_lrg_id()); _live = &live; } // Build physical interference graph uint must_spill = 0; - must_spill = build_ifg_physical( &live_arena ); + must_spill = build_ifg_physical(&live_arena); // If we have a guaranteed spill, might as well spill now - if( must_spill ) { - if( !_maxlrg ) return; + if (must_spill) { + if(!_lrg_map.max_lrg_id()) { + return; + } // Bail out if unique gets too large (ie - unique > MaxNodeLimit) C->check_node_count(10*must_spill, "out of nodes before split"); - if (C->failing()) return; - _maxlrg = Split(_maxlrg, &split_arena); // Split spilling LRG everywhere + if (C->failing()) { + return; + } + + uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere + _lrg_map.set_max_lrg_id(new_max_lrg_id); // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor) // or we failed to split C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split"); - if (C->failing()) return; + if (C->failing()) { + return; + } - NOT_PRODUCT( C->verify_graph_edges(); ) + NOT_PRODUCT(C->verify_graph_edges();) compact(); // Compact LRGs; return new lower max lrg @@ -340,23 +482,23 @@ _live = NULL; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); - ifg.init(_maxlrg); // Build a new interference graph + ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph gather_lrg_masks( true ); // Collect intersect mask - live.compute( _maxlrg ); // Compute LIVE + live.compute(_lrg_map.max_lrg_id()); // Compute LIVE _live = &live; } - build_ifg_physical( &live_arena ); + build_ifg_physical(&live_arena); _ifg->SquareUp(); _ifg->Compute_Effective_Degree(); // Only do conservative coalescing if requested - if( OptoCoalesce ) { + if (OptoCoalesce) { // Conservative (and pessimistic) copy coalescing of those spills - PhaseConservativeCoalesce coalesce( *this ); + PhaseConservativeCoalesce coalesce(*this); // If max live ranges greater than cutoff, don't color the stack. // This cutoff can be larger than below since it is only done once. - coalesce.coalesce_driver( ); + coalesce.coalesce_driver(); } - compress_uf_map_for_nodes(); + _lrg_map.compress_uf_map_for_nodes(); #ifdef ASSERT verify(&live_arena, true); @@ -390,13 +532,18 @@ } } - if( !_maxlrg ) return; - _maxlrg = Split(_maxlrg, &split_arena); // Split spilling LRG everywhere + if (!_lrg_map.max_lrg_id()) { + return; + } + uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere + _lrg_map.set_max_lrg_id(new_max_lrg_id); // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor) - C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after split"); - if (C->failing()) return; + C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split"); + if (C->failing()) { + return; + } - compact(); // Compact LRGs; return new lower max lrg + compact(); // Compact LRGs; return new lower max lrg // Nuke the live-ness and interference graph and LiveRanGe info { @@ -404,26 +551,26 @@ _live = NULL; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); - ifg.init(_maxlrg); + ifg.init(_lrg_map.max_lrg_id()); // Create LiveRanGe array. // Intersect register masks for all USEs and DEFs - gather_lrg_masks( true ); - live.compute( _maxlrg ); + gather_lrg_masks(true); + live.compute(_lrg_map.max_lrg_id()); _live = &live; } - must_spill = build_ifg_physical( &live_arena ); + must_spill = build_ifg_physical(&live_arena); _ifg->SquareUp(); _ifg->Compute_Effective_Degree(); // Only do conservative coalescing if requested - if( OptoCoalesce ) { + if (OptoCoalesce) { // Conservative (and pessimistic) copy coalescing - PhaseConservativeCoalesce coalesce( *this ); + PhaseConservativeCoalesce coalesce(*this); // Check for few live ranges determines how aggressive coalesce is. - coalesce.coalesce_driver( ); + coalesce.coalesce_driver(); } - compress_uf_map_for_nodes(); + _lrg_map.compress_uf_map_for_nodes(); #ifdef ASSERT verify(&live_arena, true); #endif @@ -435,7 +582,7 @@ // Select colors by re-inserting LRGs back into the IFG in reverse order. // Return whether or not something spills. - spills = Select( ); + spills = Select(); } // Count number of Simplify-Select trips per coloring success. @@ -452,9 +599,12 @@ // max_reg is past the largest *register* used. // Convert that to a frame_slot number. - if( _max_reg <= _matcher._new_SP ) + if (_max_reg <= _matcher._new_SP) { _framesize = C->out_preserve_stack_slots(); - else _framesize = _max_reg -_matcher._new_SP; + } + else { + _framesize = _max_reg -_matcher._new_SP; + } assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough"); // This frame must preserve the required fp alignment @@ -462,8 +612,9 @@ assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" ); #ifndef PRODUCT _total_framesize += _framesize; - if( (int)_framesize > _max_framesize ) + if ((int)_framesize > _max_framesize) { _max_framesize = _framesize; + } #endif // Convert CISC spills @@ -475,15 +626,17 @@ log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing()); } - if (C->failing()) return; + if (C->failing()) { + return; + } - NOT_PRODUCT( C->verify_graph_edges(); ) + NOT_PRODUCT(C->verify_graph_edges();) // Move important info out of the live_arena to longer lasting storage. - alloc_node_regs(_names.Size()); - for (uint i=0; i < _names.Size(); i++) { - if (_names[i]) { // Live range associated with Node? - LRG &lrg = lrgs(_names[i]); + alloc_node_regs(_lrg_map.size()); + for (uint i=0; i < _lrg_map.size(); i++) { + if (_lrg_map.live_range_id(i)) { // Live range associated with Node? + LRG &lrg = lrgs(_lrg_map.live_range_id(i)); if (!lrg.alive()) { set_bad(i); } else if (lrg.num_regs() == 1) { @@ -537,11 +690,11 @@ Node *n = b->_nodes[j]; // Pre-color to the zero live range, or pick virtual register const RegMask &rm = n->out_RegMask(); - _names.map( n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0 ); + _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0); } } // Reset the Union-Find mapping to be identity - reset_uf_map(lr_counter); + _lrg_map.reset_uf_map(lr_counter); } @@ -551,7 +704,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { // Nail down the frame pointer live range - uint fp_lrg = n2lidx(_cfg._root->in(1)->in(TypeFunc::FramePtr)); + uint fp_lrg = _lrg_map.live_range_id(_cfg._root->in(1)->in(TypeFunc::FramePtr)); lrgs(fp_lrg)._cost += 1e12; // Cost is infinite // For all blocks @@ -566,14 +719,14 @@ uint idx = n->is_Copy(); // Get virtual register number, same as LiveRanGe index - uint vreg = n2lidx(n); + uint vreg = _lrg_map.live_range_id(n); LRG &lrg = lrgs(vreg); if( vreg ) { // No vreg means un-allocable (e.g. memory) // Collect has-copy bit if( idx ) { lrg._has_copy = 1; - uint clidx = n2lidx(n->in(idx)); + uint clidx = _lrg_map.live_range_id(n->in(idx)); LRG ©_src = lrgs(clidx); copy_src._has_copy = 1; } @@ -773,8 +926,10 @@ } // Prepare register mask for each input for( uint k = input_edge_start; k < cnt; k++ ) { - uint vreg = n2lidx(n->in(k)); - if( !vreg ) continue; + uint vreg = _lrg_map.live_range_id(n->in(k)); + if (!vreg) { + continue; + } // If this instruction is CISC Spillable, add the flags // bit to its appropriate input @@ -857,7 +1012,7 @@ } // end for all blocks // Final per-liverange setup - for (uint i2=0; i2<_maxlrg; i2++) { + for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) { LRG &lrg = lrgs(i2); assert(!lrg._is_vector || !lrg._fat_proj, "sanity"); if (lrg.num_regs() > 1 && !lrg._fat_proj) { @@ -879,7 +1034,7 @@ // The bit is checked in Simplify. void PhaseChaitin::set_was_low() { #ifdef ASSERT - for( uint i = 1; i < _maxlrg; i++ ) { + for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { int size = lrgs(i).num_regs(); uint old_was_lo = lrgs(i)._was_lo; lrgs(i)._was_lo = 0; @@ -913,7 +1068,7 @@ // Compute cost/area ratio, in case we spill. Build the lo-degree list. void PhaseChaitin::cache_lrg_info( ) { - for( uint i = 1; i < _maxlrg; i++ ) { + for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { LRG &lrg = lrgs(i); // Check for being of low degree: means we can be trivially colored. @@ -949,10 +1104,10 @@ // Warm up the lo-degree no-copy list int lo_no_copy = 0; - for( uint i = 1; i < _maxlrg; i++ ) { - if( (lrgs(i).lo_degree() && !lrgs(i)._has_copy) || + for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { + if ((lrgs(i).lo_degree() && !lrgs(i)._has_copy) || !lrgs(i).alive() || - lrgs(i)._must_spill ) { + lrgs(i)._must_spill) { lrgs(i)._next = lo_no_copy; lo_no_copy = i; } @@ -1163,7 +1318,7 @@ OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) { // Check for "at_risk" LRG's - uint risk_lrg = Find(lrg._risk_bias); + uint risk_lrg = _lrg_map.find(lrg._risk_bias); if( risk_lrg != 0 ) { // Walk the colored neighbors of the "at_risk" candidate // Choose a color which is both legal and already taken by a neighbor @@ -1179,7 +1334,7 @@ } } - uint copy_lrg = Find(lrg._copy_bias); + uint copy_lrg = _lrg_map.find(lrg._copy_bias); if( copy_lrg != 0 ) { // If he has a color, if( !(*(_ifg->_yanked))[copy_lrg] ) { @@ -1423,10 +1578,10 @@ void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) { if( _spilled_once.test(src->_idx) ) { _spilled_once.set(dst->_idx); - lrgs(Find(dst))._was_spilled1 = 1; + lrgs(_lrg_map.find(dst))._was_spilled1 = 1; if( _spilled_twice.test(src->_idx) ) { _spilled_twice.set(dst->_idx); - lrgs(Find(dst))._was_spilled2 = 1; + lrgs(_lrg_map.find(dst))._was_spilled2 = 1; } } } @@ -1471,7 +1626,7 @@ MachNode *mach = n->as_Mach(); inp = mach->operand_index(inp); Node *src = n->in(inp); // Value to load or store - LRG &lrg_cisc = lrgs( Find_const(src) ); + LRG &lrg_cisc = lrgs(_lrg_map.find_const(src)); OptoReg::Name src_reg = lrg_cisc.reg(); // Doubles record the HIGH register of an adjacent pair. src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs()); @@ -1554,9 +1709,9 @@ Block *startb = _cfg._bbs[C->top()->_idx]; startb->_nodes.insert(startb->find_node(C->top()), base ); _cfg._bbs.map( base->_idx, startb ); - assert (n2lidx(base) == 0, "should not have LRG yet"); + assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet"); } - if (n2lidx(base) == 0) { + if (_lrg_map.live_range_id(base) == 0) { new_lrg(base, maxlrg++); } assert(base->in(0) == _cfg._root && @@ -1566,7 +1721,7 @@ } // Check for AddP-related opcodes - if( !derived->is_Phi() ) { + if (!derived->is_Phi()) { assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, err_msg_res("but is: %s", derived->Name())); Node *base = derived->in(AddPNode::Base); derived_base_map[derived->_idx] = base; @@ -1629,9 +1784,9 @@ // base pointer that is live across the Safepoint for oopmap building. The // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the // required edge set. -bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) { +bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) { int must_recompute_live = false; - uint maxlrg = _maxlrg; + uint maxlrg = _lrg_map.max_lrg_id(); Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique()); memset( derived_base_map, 0, sizeof(Node*)*C->unique() ); @@ -1669,15 +1824,18 @@ } // Get value being defined - uint lidx = n2lidx(n); - if( lidx && lidx < _maxlrg /* Ignore the occasional brand-new live range */) { + uint lidx = _lrg_map.live_range_id(n); + // Ignore the occasional brand-new live range + if (lidx && lidx < _lrg_map.max_lrg_id()) { // Remove from live-out set liveout.remove(lidx); // Copies do not define a new value and so do not interfere. // Remove the copies source from the liveout set before interfering. uint idx = n->is_Copy(); - if( idx ) liveout.remove( n2lidx(n->in(idx)) ); + if (idx) { + liveout.remove(_lrg_map.live_range_id(n->in(idx))); + } } // Found a safepoint? @@ -1695,21 +1853,21 @@ derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity"); // If its an OOP with a non-zero offset, then it is derived. if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) { - Node *base = find_base_for_derived( derived_base_map, derived, maxlrg ); - assert( base->_idx < _names.Size(), "" ); + Node *base = find_base_for_derived(derived_base_map, derived, maxlrg); + assert(base->_idx < _lrg_map.size(), ""); // Add reaching DEFs of derived pointer and base pointer as a // pair of inputs - n->add_req( derived ); - n->add_req( base ); + n->add_req(derived); + n->add_req(base); // See if the base pointer is already live to this point. // Since I'm working on the SSA form, live-ness amounts to // reaching def's. So if I find the base's live range then // I know the base's def reaches here. - if( (n2lidx(base) >= _maxlrg ||// (Brand new base (hence not live) or - !liveout.member( n2lidx(base) ) ) && // not live) AND - (n2lidx(base) > 0) && // not a constant - _cfg._bbs[base->_idx] != b ) { // base not def'd in blk) + if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or + !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND + (_lrg_map.live_range_id(base) > 0) && // not a constant + _cfg._bbs[base->_idx] != b) { // base not def'd in blk) // Base pointer is not currently live. Since I stretched // the base pointer to here and it crosses basic-block // boundaries, the global live info is now incorrect. @@ -1721,11 +1879,12 @@ } // End of if found a GC point // Make all inputs live - if( !n->is_Phi() ) { // Phi function uses come from prior block - for( uint k = 1; k < n->req(); k++ ) { - uint lidx = n2lidx(n->in(k)); - if( lidx < _maxlrg ) - liveout.insert( lidx ); + if (!n->is_Phi()) { // Phi function uses come from prior block + for (uint k = 1; k < n->req(); k++) { + uint lidx = _lrg_map.live_range_id(n->in(k)); + if (lidx < _lrg_map.max_lrg_id()) { + liveout.insert(lidx); + } } } @@ -1733,11 +1892,12 @@ liveout.clear(); // Free the memory used by liveout. } // End of forall blocks - _maxlrg = maxlrg; + _lrg_map.set_max_lrg_id(maxlrg); // If I created a new live range I need to recompute live - if( maxlrg != _ifg->_maxlrg ) + if (maxlrg != _ifg->_maxlrg) { must_recompute_live = true; + } return must_recompute_live != 0; } @@ -1745,16 +1905,17 @@ //------------------------------add_reference---------------------------------- // Extend the node to LRG mapping -void PhaseChaitin::add_reference( const Node *node, const Node *old_node ) { - _names.extend( node->_idx, n2lidx(old_node) ); + +void PhaseChaitin::add_reference(const Node *node, const Node *old_node) { + _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node)); } //------------------------------dump------------------------------------------- #ifndef PRODUCT -void PhaseChaitin::dump( const Node *n ) const { - uint r = (n->_idx < _names.Size() ) ? Find_const(n) : 0; +void PhaseChaitin::dump(const Node *n) const { + uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0; tty->print("L%d",r); - if( r && n->Opcode() != Op_Phi ) { + if (r && n->Opcode() != Op_Phi) { if( _node_regs ) { // Got a post-allocation copy of allocation? tty->print("["); OptoReg::Name second = get_reg_second(n); @@ -1775,11 +1936,13 @@ tty->print("/N%d\t",n->_idx); tty->print("%s === ", n->Name()); uint k; - for( k = 0; k < n->req(); k++) { + for (k = 0; k < n->req(); k++) { Node *m = n->in(k); - if( !m ) tty->print("_ "); + if (!m) { + tty->print("_ "); + } else { - uint r = (m->_idx < _names.Size() ) ? Find_const(m) : 0; + uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0; tty->print("L%d",r); // Data MultiNode's can have projections with no real registers. // Don't die while dumping them. @@ -1810,8 +1973,10 @@ if( k < n->len() && n->in(k) ) tty->print("| "); for( ; k < n->len(); k++ ) { Node *m = n->in(k); - if( !m ) break; - uint r = (m->_idx < _names.Size() ) ? Find_const(m) : 0; + if(!m) { + break; + } + uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0; tty->print("L%d",r); tty->print("/N%d ",m->_idx); } @@ -1839,7 +2004,7 @@ tty->print("{"); uint i; while ((i = elements.next()) != 0) { - tty->print("L%d ", Find_const(i)); + tty->print("L%d ", _lrg_map.find_const(i)); } tty->print_cr("}"); } @@ -1863,10 +2028,14 @@ // Dump LRG array tty->print("--- Live RanGe Array ---\n"); - for(uint i2 = 1; i2 < _maxlrg; i2++ ) { + for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) { tty->print("L%d: ",i2); - if( i2 < _ifg->_maxlrg ) lrgs(i2).dump( ); - else tty->print_cr("new LRG"); + if (i2 < _ifg->_maxlrg) { + lrgs(i2).dump(); + } + else { + tty->print_cr("new LRG"); + } } tty->print_cr(""); @@ -1939,7 +2108,7 @@ // Post allocation, use direct mappings, no LRG info available print_reg( get_reg_first(n), this, buf ); } else { - uint lidx = Find_const(n); // Grab LRG number + uint lidx = _lrg_map.find_const(n); // Grab LRG number if( !_ifg ) { sprintf(buf,"L%d",lidx); // No register binding yet } else if( !lidx ) { // Special, not allocated value @@ -1968,7 +2137,7 @@ if( WizardMode && (PrintCompilation || PrintOpto) ) { // Display which live ranges need to be split and the allocator's state tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt); - for( uint bidx = 1; bidx < _maxlrg; bidx++ ) { + for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) { if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) { tty->print("L%d: ", bidx); lrgs(bidx).dump(); @@ -2099,14 +2268,17 @@ void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const { tty->print_cr("---dump of L%d---",lidx); - if( _ifg ) { - if( lidx >= _maxlrg ) { + if (_ifg) { + if (lidx >= _lrg_map.max_lrg_id()) { tty->print("Attempt to print live range index beyond max live range.\n"); return; } tty->print("L%d: ",lidx); - if( lidx < _ifg->_maxlrg ) lrgs(lidx).dump( ); - else tty->print_cr("new LRG"); + if (lidx < _ifg->_maxlrg) { + lrgs(lidx).dump(); + } else { + tty->print_cr("new LRG"); + } } if( _ifg && lidx < _ifg->_maxlrg) { tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx)); @@ -2121,8 +2293,8 @@ // For all instructions for( uint j = 0; j < b->_nodes.size(); j++ ) { Node *n = b->_nodes[j]; - if( Find_const(n) == lidx ) { - if( !dump_once++ ) { + if (_lrg_map.find_const(n) == lidx) { + if (!dump_once++) { tty->cr(); b->dump_head( &_cfg._bbs ); } @@ -2133,11 +2305,13 @@ uint cnt = n->req(); for( uint k = 1; k < cnt; k++ ) { Node *m = n->in(k); - if (!m) continue; // be robust in the dumper - if( Find_const(m) == lidx ) { - if( !dump_once++ ) { + if (!m) { + continue; // be robust in the dumper + } + if (_lrg_map.find_const(m) == lidx) { + if (!dump_once++) { tty->cr(); - b->dump_head( &_cfg._bbs ); + b->dump_head(&_cfg._bbs); } dump(n); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/chaitin.hpp --- a/src/share/vm/opto/chaitin.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/chaitin.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -265,18 +265,118 @@ int effective_degree( uint lidx ) const; }; -// TEMPORARILY REPLACED WITH COMMAND LINE FLAG +// The LiveRangeMap class is responsible for storing node to live range id mapping. +// Each node is mapped to a live range id (a virtual register). Nodes that are +// not considered for register allocation are given live range id 0. +class LiveRangeMap VALUE_OBJ_CLASS_SPEC { + +private: + + uint _max_lrg_id; + + // Union-find map. Declared as a short for speed. + // Indexed by live-range number, it returns the compacted live-range number + LRG_List _uf_map; + + // Map from Nodes to live ranges + LRG_List _names; + + // Straight out of Tarjan's union-find algorithm + uint find_compress(const Node *node) { + uint lrg_id = find_compress(_names[node->_idx]); + _names.map(node->_idx, lrg_id); + return lrg_id; + } + + uint find_compress(uint lrg); + +public: + + const LRG_List& names() { + return _names; + } + + uint max_lrg_id() const { + return _max_lrg_id; + } + + void set_max_lrg_id(uint max_lrg_id) { + _max_lrg_id = max_lrg_id; + } + + uint size() const { + return _names.Size(); + } + + uint live_range_id(uint idx) const { + return _names[idx]; + } + + uint live_range_id(const Node *node) const { + return _names[node->_idx]; + } + + uint uf_live_range_id(uint lrg_id) const { + return _uf_map[lrg_id]; + } -//// !!!!! Magic Constants need to move into ad file -#ifdef SPARC -//#define FLOAT_PRESSURE 30 /* SFLT_REG_mask.Size() - 1 */ -//#define INT_PRESSURE 23 /* NOTEMP_I_REG_mask.Size() - 1 */ -#define FLOAT_INCREMENT(regs) regs -#else -//#define FLOAT_PRESSURE 6 -//#define INT_PRESSURE 6 -#define FLOAT_INCREMENT(regs) 1 -#endif + void map(uint idx, uint lrg_id) { + _names.map(idx, lrg_id); + } + + void uf_map(uint dst_lrg_id, uint src_lrg_id) { + _uf_map.map(dst_lrg_id, src_lrg_id); + } + + void extend(uint idx, uint lrg_id) { + _names.extend(idx, lrg_id); + } + + void uf_extend(uint dst_lrg_id, uint src_lrg_id) { + _uf_map.extend(dst_lrg_id, src_lrg_id); + } + + LiveRangeMap(uint unique) + : _names(unique) + , _uf_map(unique) + , _max_lrg_id(0) {} + + uint find_id( const Node *n ) { + uint retval = live_range_id(n); + assert(retval == find(n),"Invalid node to lidx mapping"); + return retval; + } + + // Reset the Union-Find map to identity + void reset_uf_map(uint max_lrg_id); + + // Make all Nodes map directly to their final live range; no need for + // the Union-Find mapping after this call. + void compress_uf_map_for_nodes(); + + uint find(uint lidx) { + uint uf_lidx = _uf_map[lidx]; + return (uf_lidx == lidx) ? uf_lidx : find_compress(lidx); + } + + // Convert a Node into a Live Range Index - a lidx + uint find(const Node *node) { + uint lidx = live_range_id(node); + uint uf_lidx = _uf_map[lidx]; + return (uf_lidx == lidx) ? uf_lidx : find_compress(node); + } + + // Like Find above, but no path compress, so bad asymptotic behavior + uint find_const(uint lrg) const; + + // Like Find above, but no path compress, so bad asymptotic behavior + uint find_const(const Node *node) const { + if(node->_idx >= _names.Size()) { + return 0; // not mapped, usual for debug dump + } + return find_const(_names[node->_idx]); + } +}; //------------------------------Chaitin---------------------------------------- // Briggs-Chaitin style allocation, mostly. @@ -286,7 +386,6 @@ int _trip_cnt; int _alternate; - uint _maxlrg; // Max live range number LRG &lrgs(uint idx) const { return _ifg->lrgs(idx); } PhaseLive *_live; // Liveness, used in the interference graph PhaseIFG *_ifg; // Interference graph (for original chunk) @@ -294,16 +393,6 @@ VectorSet _spilled_once; // Nodes that have been spilled VectorSet _spilled_twice; // Nodes that have been spilled twice - LRG_List _names; // Map from Nodes to Live RanGes - - // Union-find map. Declared as a short for speed. - // Indexed by live-range number, it returns the compacted live-range number - LRG_List _uf_map; - // Reset the Union-Find map to identity - void reset_uf_map( uint maxlrg ); - // Remove the need for the Union-Find mapping - void compress_uf_map_for_nodes( ); - // Combine the Live Range Indices for these 2 Nodes into a single live // range. Future requests for any Node in either live range will // return the live range index for the combined live range. @@ -322,7 +411,34 @@ // Helper functions for Split() uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray splits, int slidx ); uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray splits, int slidx ); - int clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ); + + bool clone_projs(Block *b, uint idx, Node *con, Node *copy, LiveRangeMap &lrg_map) { + bool found_projs = clone_projs_shared(b, idx, con, copy, lrg_map.max_lrg_id()); + + if(found_projs) { + uint max_lrg_id = lrg_map.max_lrg_id(); + lrg_map.set_max_lrg_id(max_lrg_id + 1); + } + + return found_projs; + } + + //------------------------------clone_projs------------------------------------ + // After cloning some rematerialized instruction, clone any MachProj's that + // follow it. Example: Intel zero is XOR, kills flags. Sparc FP constants + // use G3 as an address temp. + bool clone_projs(Block *b, uint idx, Node *con, Node *copy, uint &max_lrg_id) { + bool found_projs = clone_projs_shared(b, idx, con, copy, max_lrg_id); + + if(found_projs) { + max_lrg_id++; + } + + return found_projs; + } + + bool clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id); + Node *split_Rematerialize(Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru); // True if lidx is used before any real register is def'd in the block @@ -349,20 +465,11 @@ PhaseChaitin( uint unique, PhaseCFG &cfg, Matcher &matcher ); ~PhaseChaitin() {} - // Convert a Node into a Live Range Index - a lidx - uint Find( const Node *n ) { - uint lidx = n2lidx(n); - uint uf_lidx = _uf_map[lidx]; - return (uf_lidx == lidx) ? uf_lidx : Find_compress(n); - } - uint Find_const( uint lrg ) const; - uint Find_const( const Node *n ) const; + LiveRangeMap _lrg_map; // Do all the real work of allocate void Register_Allocate(); - uint n2lidx( const Node *n ) const { return _names[n->_idx]; } - float high_frequency_lrg() const { return _high_frequency_lrg; } #ifndef PRODUCT @@ -374,18 +481,6 @@ // all inputs to a PhiNode, effectively coalescing live ranges. Insert // copies as needed. void de_ssa(); - uint Find_compress( const Node *n ); - uint Find( uint lidx ) { - uint uf_lidx = _uf_map[lidx]; - return (uf_lidx == lidx) ? uf_lidx : Find_compress(lidx); - } - uint Find_compress( uint lidx ); - - uint Find_id( const Node *n ) { - uint retval = n2lidx(n); - assert(retval == Find(n),"Invalid node to lidx mapping"); - return retval; - } // Add edge between reg and everything in the vector. // Same as _ifg->add_vector(reg,live) EXCEPT use the RegMask diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/coalesce.cpp --- a/src/share/vm/opto/coalesce.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/coalesce.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -35,159 +35,11 @@ #include "opto/regmask.hpp" //============================================================================= -//------------------------------reset_uf_map----------------------------------- -void PhaseChaitin::reset_uf_map( uint maxlrg ) { - _maxlrg = maxlrg; - // Force the Union-Find mapping to be at least this large - _uf_map.extend(_maxlrg,0); - // Initialize it to be the ID mapping. - for( uint i=0; i<_maxlrg; i++ ) - _uf_map.map(i,i); -} - -//------------------------------compress_uf_map-------------------------------- -// Make all Nodes map directly to their final live range; no need for -// the Union-Find mapping after this call. -void PhaseChaitin::compress_uf_map_for_nodes( ) { - // For all Nodes, compress mapping - uint unique = _names.Size(); - for( uint i=0; i_idx]); - _names.map(n->_idx,lrg); - return lrg; -} - -//------------------------------Find_const------------------------------------- -// Like Find above, but no path compress, so bad asymptotic behavior -uint PhaseChaitin::Find_const( uint lrg ) const { - if( !lrg ) return lrg; // Ignore the zero LRG - // Off the end? This happens during debugging dumps when you got - // brand new live ranges but have not told the allocator yet. - if( lrg >= _maxlrg ) return lrg; - uint next = _uf_map[lrg]; - while( next != lrg ) { // Scan chain of equivalences - assert( next < lrg, "always union smaller" ); - lrg = next; // until find a fixed-point - next = _uf_map[lrg]; - } - return next; -} - -//------------------------------Find------------------------------------------- -// Like Find above, but no path compress, so bad asymptotic behavior -uint PhaseChaitin::Find_const( const Node *n ) const { - if( n->_idx >= _names.Size() ) return 0; // not mapped, usual for debug dump - return Find_const( _names[n->_idx] ); -} - -//------------------------------Union------------------------------------------ -// union 2 sets together. -void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) { - uint src = Find(src_n); - uint dst = Find(dst_n); - assert( src, "" ); - assert( dst, "" ); - assert( src < _maxlrg, "oob" ); - assert( dst < _maxlrg, "oob" ); - assert( src < dst, "always union smaller" ); - _uf_map.map(dst,src); -} - -//------------------------------new_lrg---------------------------------------- -void PhaseChaitin::new_lrg( const Node *x, uint lrg ) { - // Make the Node->LRG mapping - _names.extend(x->_idx,lrg); - // Make the Union-Find mapping an identity function - _uf_map.extend(lrg,lrg); -} - -//------------------------------clone_projs------------------------------------ -// After cloning some rematerialized instruction, clone any MachProj's that -// follow it. Example: Intel zero is XOR, kills flags. Sparc FP constants -// use G3 as an address temp. -int PhaseChaitin::clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ) { - Block *bcon = _cfg._bbs[con->_idx]; - uint cindex = bcon->find_node(con); - Node *con_next = bcon->_nodes[cindex+1]; - if( con_next->in(0) != con || !con_next->is_MachProj() ) - return false; // No MachProj's follow - - // Copy kills after the cloned constant - Node *kills = con_next->clone(); - kills->set_req( 0, copy ); - b->_nodes.insert( idx, kills ); - _cfg._bbs.map( kills->_idx, b ); - new_lrg( kills, maxlrg++ ); - return true; -} - -//------------------------------compact---------------------------------------- -// Renumber the live ranges to compact them. Makes the IFG smaller. -void PhaseChaitin::compact() { - // Current the _uf_map contains a series of short chains which are headed - // by a self-cycle. All the chains run from big numbers to little numbers. - // The Find() call chases the chains & shortens them for the next Find call. - // We are going to change this structure slightly. Numbers above a moving - // wave 'i' are unchanged. Numbers below 'j' point directly to their - // compacted live range with no further chaining. There are no chains or - // cycles below 'i', so the Find call no longer works. - uint j=1; - uint i; - for( i=1; i < _maxlrg; i++ ) { - uint lr = _uf_map[i]; - // Ignore unallocated live ranges - if( !lr ) continue; - assert( lr <= i, "" ); - _uf_map.map(i, ( lr == i ) ? j++ : _uf_map[lr]); - } - if( false ) // PrintOptoCompactLiveRanges - printf("Compacted %d LRs from %d\n",i-j,i); - // Now change the Node->LR mapping to reflect the compacted names - uint unique = _names.Size(); - for( i=0; iprint("L%d/N%d ",r,n->_idx); } @@ -235,9 +87,9 @@ //------------------------------combine_these_two------------------------------ // Combine the live ranges def'd by these 2 Nodes. N2 is an input to N1. -void PhaseCoalesce::combine_these_two( Node *n1, Node *n2 ) { - uint lr1 = _phc.Find(n1); - uint lr2 = _phc.Find(n2); +void PhaseCoalesce::combine_these_two(Node *n1, Node *n2) { + uint lr1 = _phc._lrg_map.find(n1); + uint lr2 = _phc._lrg_map.find(n2); if( lr1 != lr2 && // Different live ranges already AND !_phc._ifg->test_edge_sq( lr1, lr2 ) ) { // Do not interfere LRG *lrg1 = &_phc.lrgs(lr1); @@ -306,14 +158,18 @@ // I am about to clobber the dst_name, so the copy must be inserted // after the last use. Last use is really first-use on a backwards scan. uint i = b->end_idx()-1; - while( 1 ) { + while(1) { Node *n = b->_nodes[i]; // Check for end of virtual copies; this is also the end of the // parallel renaming effort. - if( n->_idx < _unique ) break; + if (n->_idx < _unique) { + break; + } uint idx = n->is_Copy(); assert( idx || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" ); - if( idx && _phc.Find(n->in(idx)) == dst_name ) break; + if (idx && _phc._lrg_map.find(n->in(idx)) == dst_name) { + break; + } i--; } uint last_use_idx = i; @@ -324,24 +180,29 @@ // There can be only 1 kill that exits any block and that is // the last kill. Thus it is the first kill on a backwards scan. i = b->end_idx()-1; - while( 1 ) { + while (1) { Node *n = b->_nodes[i]; // Check for end of virtual copies; this is also the end of the // parallel renaming effort. - if( n->_idx < _unique ) break; + if (n->_idx < _unique) { + break; + } assert( n->is_Copy() || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" ); - if( _phc.Find(n) == src_name ) { + if (_phc._lrg_map.find(n) == src_name) { kill_src_idx = i; break; } i--; } // Need a temp? Last use of dst comes after the kill of src? - if( last_use_idx >= kill_src_idx ) { + if (last_use_idx >= kill_src_idx) { // Need to break a cycle with a temp uint idx = copy->is_Copy(); Node *tmp = copy->clone(); - _phc.new_lrg(tmp,_phc._maxlrg++); + uint max_lrg_id = _phc._lrg_map.max_lrg_id(); + _phc.new_lrg(tmp, max_lrg_id); + _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1); + // Insert new temp between copy and source tmp ->set_req(idx,copy->in(idx)); copy->set_req(idx,tmp); @@ -359,14 +220,14 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { // We do LRGs compressing and fix a liveout data only here since the other // place in Split() is guarded by the assert which we never hit. - _phc.compress_uf_map_for_nodes(); + _phc._lrg_map.compress_uf_map_for_nodes(); // Fix block's liveout data for compressed live ranges. - for(uint lrg = 1; lrg < _phc._maxlrg; lrg++ ) { - uint compressed_lrg = _phc.Find(lrg); - if( lrg != compressed_lrg ) { - for( uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++ ) { + for (uint lrg = 1; lrg < _phc._lrg_map.max_lrg_id(); lrg++) { + uint compressed_lrg = _phc._lrg_map.find(lrg); + if (lrg != compressed_lrg) { + for (uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++) { IndexSet *liveout = _phc._live->live(_phc._cfg._blocks[bidx]); - if( liveout->member(lrg) ) { + if (liveout->member(lrg)) { liveout->remove(lrg); liveout->insert(compressed_lrg); } @@ -392,8 +253,9 @@ uint cidx = copy->is_Copy(); if( cidx ) { Node *def = copy->in(cidx); - if( _phc.Find(copy) == _phc.Find(def) ) - n->set_req(k,def); + if (_phc._lrg_map.find(copy) == _phc._lrg_map.find(def)) { + n->set_req(k, def); + } } } @@ -401,7 +263,7 @@ uint cidx = n->is_Copy(); if( cidx ) { Node *def = n->in(cidx); - if( _phc.Find(n) == _phc.Find(def) ) { + if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) { n->replace_by(def); n->set_req(cidx,NULL); b->_nodes.remove(l); @@ -410,16 +272,18 @@ } } - if( n->is_Phi() ) { + if (n->is_Phi()) { // Get the chosen name for the Phi - uint phi_name = _phc.Find( n ); + uint phi_name = _phc._lrg_map.find(n); // Ignore the pre-allocated specials - if( !phi_name ) continue; + if (!phi_name) { + continue; + } // Check for mismatch inputs to Phi - for( uint j = 1; jin(j); - uint src_name = _phc.Find(m); - if( src_name != phi_name ) { + uint src_name = _phc._lrg_map.find(m); + if (src_name != phi_name) { Block *pred = _phc._cfg._bbs[b->pred(j)->_idx]; Node *copy; assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); @@ -430,18 +294,18 @@ // Insert the copy in the predecessor basic block pred->add_inst(copy); // Copy any flags as well - _phc.clone_projs( pred, pred->end_idx(), m, copy, _phc._maxlrg ); + _phc.clone_projs(pred, pred->end_idx(), m, copy, _phc._lrg_map); } else { const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()]; - copy = new (C) MachSpillCopyNode(m,*rm,*rm); + copy = new (C) MachSpillCopyNode(m, *rm, *rm); // Find a good place to insert. Kinda tricky, use a subroutine insert_copy_with_overlap(pred,copy,phi_name,src_name); } // Insert the copy in the use-def chain - n->set_req( j, copy ); + n->set_req(j, copy); _phc._cfg._bbs.map( copy->_idx, pred ); // Extend ("register allocate") the names array for the copy. - _phc._names.extend( copy->_idx, phi_name ); + _phc._lrg_map.extend(copy->_idx, phi_name); } // End of if Phi names do not match } // End of for all inputs to Phi } else { // End of if Phi @@ -450,39 +314,40 @@ uint idx; if( n->is_Mach() && (idx=n->as_Mach()->two_adr()) ) { // Get the chosen name for the Node - uint name = _phc.Find( n ); - assert( name, "no 2-address specials" ); + uint name = _phc._lrg_map.find(n); + assert (name, "no 2-address specials"); // Check for name mis-match on the 2-address input Node *m = n->in(idx); - if( _phc.Find(m) != name ) { + if (_phc._lrg_map.find(m) != name) { Node *copy; assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); // At this point it is unsafe to extend live ranges (6550579). // Rematerialize only constants as we do for Phi above. - if( m->is_Mach() && m->as_Mach()->is_Con() && - m->as_Mach()->rematerialize() ) { + if(m->is_Mach() && m->as_Mach()->is_Con() && + m->as_Mach()->rematerialize()) { copy = m->clone(); // Insert the copy in the basic block, just before us - b->_nodes.insert( l++, copy ); - if( _phc.clone_projs( b, l, m, copy, _phc._maxlrg ) ) + b->_nodes.insert(l++, copy); + if(_phc.clone_projs(b, l, m, copy, _phc._lrg_map)) { l++; + } } else { const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()]; - copy = new (C) MachSpillCopyNode( m, *rm, *rm ); + copy = new (C) MachSpillCopyNode(m, *rm, *rm); // Insert the copy in the basic block, just before us - b->_nodes.insert( l++, copy ); + b->_nodes.insert(l++, copy); } // Insert the copy in the use-def chain - n->set_req(idx, copy ); + n->set_req(idx, copy); // Extend ("register allocate") the names array for the copy. - _phc._names.extend( copy->_idx, name ); + _phc._lrg_map.extend(copy->_idx, name); _phc._cfg._bbs.map( copy->_idx, b ); } } // End of is two-adr // Insert a copy at a debug use for a lrg which has high frequency - if( b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs) ) { + if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs)) { // Walk the debug inputs to the node and check for lrg freq JVMState* jvms = n->jvms(); uint debug_start = jvms ? jvms->debug_start() : 999999; @@ -490,9 +355,11 @@ for(uint inpidx = debug_start; inpidx < debug_end; inpidx++) { // Do not split monitors; they are only needed for debug table // entries and need no code. - if( jvms->is_monitor_use(inpidx) ) continue; + if (jvms->is_monitor_use(inpidx)) { + continue; + } Node *inp = n->in(inpidx); - uint nidx = _phc.n2lidx(inp); + uint nidx = _phc._lrg_map.live_range_id(inp); LRG &lrg = lrgs(nidx); // If this lrg has a high frequency use/def @@ -519,8 +386,10 @@ // Insert the copy in the basic block, just before us b->_nodes.insert( l++, copy ); // Extend ("register allocate") the names array for the copy. - _phc.new_lrg( copy, _phc._maxlrg++ ); - _phc._cfg._bbs.map( copy->_idx, b ); + uint max_lrg_id = _phc._lrg_map.max_lrg_id(); + _phc.new_lrg(copy, max_lrg_id); + _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1); + _phc._cfg._bbs.map(copy->_idx, b); //tty->print_cr("Split a debug use in Aggressive Coalesce"); } // End of if high frequency use/def } // End of for all debug inputs @@ -583,17 +452,17 @@ uint idx; // 2-address instructions have a virtual Copy matching their input // to their output - if( n->is_Mach() && (idx = n->as_Mach()->two_adr()) ) { + if (n->is_Mach() && (idx = n->as_Mach()->two_adr())) { MachNode *mach = n->as_Mach(); - combine_these_two( mach, mach->in(idx) ); + combine_these_two(mach, mach->in(idx)); } } // End of for all instructions in block } //============================================================================= //------------------------------PhaseConservativeCoalesce---------------------- -PhaseConservativeCoalesce::PhaseConservativeCoalesce( PhaseChaitin &chaitin ) : PhaseCoalesce(chaitin) { - _ulr.initialize(_phc._maxlrg); +PhaseConservativeCoalesce::PhaseConservativeCoalesce(PhaseChaitin &chaitin) : PhaseCoalesce(chaitin) { + _ulr.initialize(_phc._lrg_map.max_lrg_id()); } //------------------------------verify----------------------------------------- @@ -673,10 +542,14 @@ // Else work back one in copy chain prev_copy = prev_copy->in(prev_copy->is_Copy()); } else { // Else collect interferences - uint lidx = _phc.Find(x); + uint lidx = _phc._lrg_map.find(x); // Found another def of live-range being stretched? - if( lidx == lr1 ) return max_juint; - if( lidx == lr2 ) return max_juint; + if(lidx == lr1) { + return max_juint; + } + if(lidx == lr2) { + return max_juint; + } // If we attempt to coalesce across a bound def if( lrgs(lidx).is_bound() ) { @@ -751,33 +624,43 @@ // See if I can coalesce a series of multiple copies together. I need the // final dest copy and the original src copy. They can be the same Node. // Compute the compatible register masks. -bool PhaseConservativeCoalesce::copy_copy( Node *dst_copy, Node *src_copy, Block *b, uint bindex ) { +bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block *b, uint bindex) { - if( !dst_copy->is_SpillCopy() ) return false; - if( !src_copy->is_SpillCopy() ) return false; + if (!dst_copy->is_SpillCopy()) { + return false; + } + if (!src_copy->is_SpillCopy()) { + return false; + } Node *src_def = src_copy->in(src_copy->is_Copy()); - uint lr1 = _phc.Find(dst_copy); - uint lr2 = _phc.Find(src_def ); + uint lr1 = _phc._lrg_map.find(dst_copy); + uint lr2 = _phc._lrg_map.find(src_def); // Same live ranges already? - if( lr1 == lr2 ) return false; + if (lr1 == lr2) { + return false; + } // Interfere? - if( _phc._ifg->test_edge_sq( lr1, lr2 ) ) return false; + if (_phc._ifg->test_edge_sq(lr1, lr2)) { + return false; + } // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK. - if( !lrgs(lr1)._is_oop && lrgs(lr2)._is_oop ) // not an oop->int cast + if (!lrgs(lr1)._is_oop && lrgs(lr2)._is_oop) { // not an oop->int cast return false; + } // Coalescing between an aligned live range and a mis-aligned live range? // No, no! Alignment changes how we count degree. - if( lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj ) + if (lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj) { return false; + } // Sort; use smaller live-range number Node *lr1_node = dst_copy; Node *lr2_node = src_def; - if( lr1 > lr2 ) { + if (lr1 > lr2) { uint tmp = lr1; lr1 = lr2; lr2 = tmp; lr1_node = src_def; lr2_node = dst_copy; } @@ -916,17 +799,5 @@ PhaseChaitin::_conserv_coalesce++; // Collect stats on success continue; } - - /* do not attempt pairs. About 1/2 of all pairs can be removed by - post-alloc. The other set are too few to bother. - Node *copy2 = copy1->in(idx1); - uint idx2 = copy2->is_Copy(); - if( !idx2 ) continue; - if( copy_copy(copy1,copy2,b,i) ) { - i--; // Retry, same location in block - PhaseChaitin::_conserv_coalesce_pair++; // Collect stats on success - continue; - } - */ } } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/coalesce.hpp --- a/src/share/vm/opto/coalesce.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/coalesce.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -41,23 +41,25 @@ public: // Coalesce copies - PhaseCoalesce( PhaseChaitin &chaitin ) : Phase(Coalesce), _phc(chaitin) { } + PhaseCoalesce(PhaseChaitin &phc) + : Phase(Coalesce) + , _phc(phc) {} virtual void verify() = 0; // Coalesce copies - void coalesce_driver( ); + void coalesce_driver(); // Coalesce copies in this block - virtual void coalesce( Block *b ) = 0; + virtual void coalesce(Block *b) = 0; // Attempt to coalesce live ranges defined by these 2 - void combine_these_two( Node *n1, Node *n2 ); + void combine_these_two(Node *n1, Node *n2); - LRG &lrgs( uint lidx ) { return _phc.lrgs(lidx); } + LRG &lrgs(uint lidx) { return _phc.lrgs(lidx); } #ifndef PRODUCT // Dump internally name - void dump( Node *n ) const; + void dump(Node *n) const; // Dump whole shebang void dump() const; #endif diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/compile.cpp --- a/src/share/vm/opto/compile.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/compile.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,6 +63,7 @@ #include "runtime/signature.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/timer.hpp" +#include "trace/tracing.hpp" #include "utilities/copy.hpp" #ifdef TARGET_ARCH_MODEL_x86_32 # include "adfiles/ad_x86_32.hpp" @@ -418,6 +419,7 @@ } // clean up the late inline lists remove_useless_late_inlines(&_string_late_inlines, useful); + remove_useless_late_inlines(&_boxing_late_inlines, useful); remove_useless_late_inlines(&_late_inlines, useful); debug_only(verify_graph_edges(true/*check for no_dead_code*/);) } @@ -485,6 +487,12 @@ tty->print_cr("** Bailout: Recompile without escape analysis **"); tty->print_cr("*********************************************************"); } + if (_eliminate_boxing != EliminateAutoBox && PrintOpto) { + // Recompiling without boxing elimination + tty->print_cr("*********************************************************"); + tty->print_cr("** Bailout: Recompile without boxing elimination **"); + tty->print_cr("*********************************************************"); + } if (env()->break_at_compile()) { // Open the debugger when compiling this method. tty->print("### Breaking when compiling: "); @@ -601,7 +609,8 @@ // the continuation bci for on stack replacement. -Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci, bool subsume_loads, bool do_escape_analysis ) +Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci, + bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing ) : Phase(Compiler), _env(ci_env), _log(ci_env->log()), @@ -617,6 +626,7 @@ _warm_calls(NULL), _subsume_loads(subsume_loads), _do_escape_analysis(do_escape_analysis), + _eliminate_boxing(eliminate_boxing), _failure_reason(NULL), _code_buffer("Compile::Fill_buffer"), _orig_pc_slot(0), @@ -638,6 +648,7 @@ _congraph(NULL), _late_inlines(comp_arena(), 2, 0, NULL), _string_late_inlines(comp_arena(), 2, 0, NULL), + _boxing_late_inlines(comp_arena(), 2, 0, NULL), _late_inlines_pos(0), _number_of_mh_late_inlines(0), _inlining_progress(false), @@ -776,7 +787,7 @@ if (failing()) return; - print_method("Before RemoveUseless", 3); + print_method(PHASE_BEFORE_REMOVEUSELESS, 3); // Remove clutter produced by parsing. if (!failing()) { @@ -906,6 +917,7 @@ _orig_pc_slot_offset_in_bytes(0), _subsume_loads(true), _do_escape_analysis(false), + _eliminate_boxing(false), _failure_reason(NULL), _code_buffer("Compile::Fill_buffer"), _has_method_handle_invokes(false), @@ -1016,6 +1028,7 @@ set_has_split_ifs(false); set_has_loops(has_method() && method()->has_loops()); // first approximation set_has_stringbuilder(false); + set_has_boxed_value(false); _trap_can_recompile = false; // no traps emitted yet _major_progress = true; // start out assuming good things will happen set_has_unsafe_access(false); @@ -1789,9 +1802,9 @@ { ResourceMark rm; - print_method("Before StringOpts", 3); + print_method(PHASE_BEFORE_STRINGOPTS, 3); PhaseStringOpts pso(initial_gvn(), for_igvn()); - print_method("After StringOpts", 3); + print_method(PHASE_AFTER_STRINGOPTS, 3); } // now inline anything that we skipped the first time around @@ -1807,6 +1820,38 @@ _string_late_inlines.trunc_to(0); } +// Late inlining of boxing methods +void Compile::inline_boxing_calls(PhaseIterGVN& igvn) { + if (_boxing_late_inlines.length() > 0) { + assert(has_boxed_value(), "inconsistent"); + + PhaseGVN* gvn = initial_gvn(); + set_inlining_incrementally(true); + + assert( igvn._worklist.size() == 0, "should be done with igvn" ); + for_igvn()->clear(); + gvn->replace_with(&igvn); + + while (_boxing_late_inlines.length() > 0) { + CallGenerator* cg = _boxing_late_inlines.pop(); + cg->do_late_inline(); + if (failing()) return; + } + _boxing_late_inlines.trunc_to(0); + + { + ResourceMark rm; + PhaseRemoveUseless pru(gvn, for_igvn()); + } + + igvn = PhaseIterGVN(gvn); + igvn.optimize(); + + set_inlining_progress(false); + set_inlining_incrementally(false); + } +} + void Compile::inline_incrementally_one(PhaseIterGVN& igvn) { assert(IncrementalInline, "incremental inlining should be on"); PhaseGVN* gvn = initial_gvn(); @@ -1831,7 +1876,7 @@ { ResourceMark rm; - PhaseRemoveUseless pru(C->initial_gvn(), C->for_igvn()); + PhaseRemoveUseless pru(gvn, for_igvn()); } igvn = PhaseIterGVN(gvn); @@ -1914,7 +1959,7 @@ NOT_PRODUCT( verify_graph_edges(); ) - print_method("After Parsing"); + print_method(PHASE_AFTER_PARSING); { // Iterative Global Value Numbering, including ideal transforms @@ -1925,16 +1970,29 @@ igvn.optimize(); } - print_method("Iter GVN 1", 2); + print_method(PHASE_ITER_GVN1, 2); if (failing()) return; - inline_incrementally(igvn); - - print_method("Incremental Inline", 2); + { + NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); ) + inline_incrementally(igvn); + } + + print_method(PHASE_INCREMENTAL_INLINE, 2); if (failing()) return; + if (eliminate_boxing()) { + NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); ) + // Inline valueOf() methods now. + inline_boxing_calls(igvn); + + print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2); + + if (failing()) return; + } + // No more new expensive nodes will be added to the list from here // so keep only the actual candidates for optimizations. cleanup_expensive_nodes(igvn); @@ -1945,7 +2003,7 @@ // Cleanup graph (remove dead nodes). TracePhase t2("idealLoop", &_t_idealLoop, true); PhaseIdealLoop ideal_loop( igvn, false, true ); - if (major_progress()) print_method("PhaseIdealLoop before EA", 2); + if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2); if (failing()) return; } ConnectionGraph::do_analysis(this, &igvn); @@ -1954,7 +2012,7 @@ // Optimize out fields loads from scalar replaceable allocations. igvn.optimize(); - print_method("Iter GVN after EA", 2); + print_method(PHASE_ITER_GVN_AFTER_EA, 2); if (failing()) return; @@ -1965,7 +2023,7 @@ igvn.set_delay_transform(false); igvn.optimize(); - print_method("Iter GVN after eliminating allocations and locks", 2); + print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2); if (failing()) return; } @@ -1981,7 +2039,7 @@ TracePhase t2("idealLoop", &_t_idealLoop, true); PhaseIdealLoop ideal_loop( igvn, true ); loop_opts_cnt--; - if (major_progress()) print_method("PhaseIdealLoop 1", 2); + if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2); if (failing()) return; } // Loop opts pass if partial peeling occurred in previous pass @@ -1989,7 +2047,7 @@ TracePhase t3("idealLoop", &_t_idealLoop, true); PhaseIdealLoop ideal_loop( igvn, false ); loop_opts_cnt--; - if (major_progress()) print_method("PhaseIdealLoop 2", 2); + if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2); if (failing()) return; } // Loop opts pass for loop-unrolling before CCP @@ -1997,7 +2055,7 @@ TracePhase t4("idealLoop", &_t_idealLoop, true); PhaseIdealLoop ideal_loop( igvn, false ); loop_opts_cnt--; - if (major_progress()) print_method("PhaseIdealLoop 3", 2); + if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2); } if (!failing()) { // Verify that last round of loop opts produced a valid graph @@ -2014,7 +2072,7 @@ TracePhase t2("ccp", &_t_ccp, true); ccp.do_transform(); } - print_method("PhaseCPP 1", 2); + print_method(PHASE_CPP1, 2); assert( true, "Break here to ccp.dump_old2new_map()"); @@ -2025,7 +2083,7 @@ igvn.optimize(); } - print_method("Iter GVN 2", 2); + print_method(PHASE_ITER_GVN2, 2); if (failing()) return; @@ -2038,7 +2096,7 @@ assert( cnt++ < 40, "infinite cycle in loop optimization" ); PhaseIdealLoop ideal_loop( igvn, true); loop_opts_cnt--; - if (major_progress()) print_method("PhaseIdealLoop iterations", 2); + if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2); if (failing()) return; } } @@ -2071,7 +2129,7 @@ } } - print_method("Optimize finished", 2); + print_method(PHASE_OPTIMIZE_FINISHED, 2); } @@ -2119,7 +2177,7 @@ cfg.GlobalCodeMotion(m,unique(),proj_list); if (failing()) return; - print_method("Global code motion", 2); + print_method(PHASE_GLOBAL_CODE_MOTION, 2); NOT_PRODUCT( verify_graph_edges(); ) @@ -2127,22 +2185,19 @@ } NOT_PRODUCT( verify_graph_edges(); ) - PhaseChaitin regalloc(unique(),cfg,m); + PhaseChaitin regalloc(unique(), cfg, m); _regalloc = ®alloc; { TracePhase t2("regalloc", &_t_registerAllocation, true); - // Perform any platform dependent preallocation actions. This is used, - // for example, to avoid taking an implicit null pointer exception - // using the frame pointer on win95. - _regalloc->pd_preallocate_hook(); - // Perform register allocation. After Chaitin, use-def chains are // no longer accurate (at spill code) and so must be ignored. // Node->LRG->reg mappings are still accurate. _regalloc->Register_Allocate(); // Bail out if the allocator builds too many nodes - if (failing()) return; + if (failing()) { + return; + } } // Prior to register allocation we kept empty basic blocks in case the @@ -2160,9 +2215,6 @@ cfg.fixup_flow(); } - // Perform any platform dependent postallocation verifications. - debug_only( _regalloc->pd_postallocate_verify_hook(); ) - // Apply peephole optimizations if( OptoPeephole ) { NOT_PRODUCT( TracePhase t2("peephole", &_t_peephole, TimeCompiler); ) @@ -2178,7 +2230,7 @@ Output(); } - print_method("Final Code"); + print_method(PHASE_FINAL_CODE); // He's dead, Jim. _cfg = (PhaseCFG*)0xdeadbeef; @@ -2902,6 +2954,7 @@ } break; case Op_MemBarStoreStore: + case Op_MemBarRelease: // Break the link with AllocateNode: it is no longer useful and // confuses register allocation. if (n->req() > MemBarNode::Precedent) { @@ -3264,8 +3317,16 @@ // Record the first failure reason. _failure_reason = reason; } + + EventCompilerFailure event; + if (event.should_commit()) { + event.set_compileID(Compile::compile_id()); + event.set_failure(reason); + event.commit(); + } + if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { - C->print_method(_failure_reason); + C->print_method(PHASE_FAILURE); } _root = NULL; // flush the graph, too } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/compile.hpp --- a/src/share/vm/opto/compile.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/compile.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,10 +36,12 @@ #include "libadt/vectset.hpp" #include "memory/resourceArea.hpp" #include "opto/idealGraphPrinter.hpp" +#include "opto/phasetype.hpp" #include "opto/phase.hpp" #include "opto/regmask.hpp" #include "runtime/deoptimization.hpp" #include "runtime/vmThread.hpp" +#include "trace/tracing.hpp" class Block; class Bundle; @@ -262,6 +264,7 @@ const bool _save_argument_registers; // save/restore arg regs for trampolines const bool _subsume_loads; // Load can be matched as part of a larger op. const bool _do_escape_analysis; // Do escape analysis. + const bool _eliminate_boxing; // Do boxing elimination. ciMethod* _method; // The method being compiled. int _entry_bci; // entry bci for osr methods. const TypeFunc* _tf; // My kind of signature @@ -287,6 +290,7 @@ bool _has_split_ifs; // True if the method _may_ have some split-if bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores. bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated + bool _has_boxed_value; // True if a boxed object is allocated int _max_vector_size; // Maximum size of generated vectors uint _trap_hist[trapHistLength]; // Cumulative traps bool _trap_can_recompile; // Have we emitted a recompiling trap? @@ -320,6 +324,7 @@ IdealGraphPrinter* _printer; #endif + // Node management uint _unique; // Counter for unique Node indices VectorSet _dead_node_list; // Set of dead nodes @@ -375,6 +380,8 @@ // main parsing has finished. GrowableArray _string_late_inlines; // same but for string operations + GrowableArray _boxing_late_inlines; // same but for boxing operations + int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining) uint _number_of_mh_late_inlines; // number of method handle late inlining still pending @@ -486,8 +493,12 @@ // instructions that subsume a load may result in an unschedulable // instruction sequence. bool subsume_loads() const { return _subsume_loads; } - // Do escape analysis. + /** Do escape analysis. */ bool do_escape_analysis() const { return _do_escape_analysis; } + /** Do boxing elimination. */ + bool eliminate_boxing() const { return _eliminate_boxing; } + /** Do aggressive boxing elimination. */ + bool aggressive_unboxing() const { return _eliminate_boxing && AggressiveUnboxing; } bool save_argument_registers() const { return _save_argument_registers; } @@ -527,6 +538,8 @@ void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } bool has_stringbuilder() const { return _has_stringbuilder; } void set_has_stringbuilder(bool z) { _has_stringbuilder = z; } + bool has_boxed_value() const { return _has_boxed_value; } + void set_has_boxed_value(bool z) { _has_boxed_value = z; } int max_vector_size() const { return _max_vector_size; } void set_max_vector_size(int s) { _max_vector_size = s; } void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; } @@ -563,28 +576,54 @@ bool has_method_handle_invokes() const { return _has_method_handle_invokes; } void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } + jlong _latest_stage_start_counter; + void begin_method() { #ifndef PRODUCT if (_printer) _printer->begin_method(this); #endif + C->_latest_stage_start_counter = os::elapsed_counter(); } - void print_method(const char * name, int level = 1) { + + void print_method(CompilerPhaseType cpt, int level = 1) { + EventCompilerPhase event(UNTIMED); + if (event.should_commit()) { + event.set_starttime(C->_latest_stage_start_counter); + event.set_endtime(os::elapsed_counter()); + event.set_phase((u1) cpt); + event.set_compileID(C->_compile_id); + event.set_phaseLevel(level); + event.commit(); + } + + #ifndef PRODUCT - if (_printer) _printer->print_method(this, name, level); + if (_printer) _printer->print_method(this, CompilerPhaseTypeHelper::to_string(cpt), level); #endif + C->_latest_stage_start_counter = os::elapsed_counter(); } - void end_method() { + + void end_method(int level = 1) { + EventCompilerPhase event(UNTIMED); + if (event.should_commit()) { + event.set_starttime(C->_latest_stage_start_counter); + event.set_endtime(os::elapsed_counter()); + event.set_phase((u1) PHASE_END); + event.set_compileID(C->_compile_id); + event.set_phaseLevel(level); + event.commit(); + } #ifndef PRODUCT if (_printer) _printer->end_method(); #endif } - int macro_count() { return _macro_nodes->length(); } - int predicate_count() { return _predicate_opaqs->length();} - int expensive_count() { return _expensive_nodes->length(); } - Node* macro_node(int idx) { return _macro_nodes->at(idx); } - Node* predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);} - Node* expensive_node(int idx) { return _expensive_nodes->at(idx); } + int macro_count() const { return _macro_nodes->length(); } + int predicate_count() const { return _predicate_opaqs->length();} + int expensive_count() const { return _expensive_nodes->length(); } + Node* macro_node(int idx) const { return _macro_nodes->at(idx); } + Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);} + Node* expensive_node(int idx) const { return _expensive_nodes->at(idx); } ConnectionGraph* congraph() { return _congraph;} void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;} void add_macro_node(Node * n) { @@ -766,7 +805,12 @@ // Decide how to build a call. // The profile factor is a discount to apply to this site's interp. profile. CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false); - bool should_delay_inlining(ciMethod* call_method, JVMState* jvms); + bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) { + return should_delay_string_inlining(call_method, jvms) || + should_delay_boxing_inlining(call_method, jvms); + } + bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms); + bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms); // Helper functions to identify inlining potential at call-site ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass, @@ -822,6 +866,10 @@ _string_late_inlines.push(cg); } + void add_boxing_late_inline(CallGenerator* cg) { + _boxing_late_inlines.push(cg); + } + void remove_useless_late_inlines(GrowableArray* inlines, Unique_Node_List &useful); void dump_inlining(); @@ -841,6 +889,7 @@ void inline_incrementally_one(PhaseIterGVN& igvn); void inline_incrementally(PhaseIterGVN& igvn); void inline_string_calls(bool parse_time); + void inline_boxing_calls(PhaseIterGVN& igvn); // Matching, CFG layout, allocation, code generation PhaseCFG* cfg() { return _cfg; } @@ -913,7 +962,8 @@ // replacement, entry_bci indicates the bytecode for which to compile a // continuation. Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, - int entry_bci, bool subsume_loads, bool do_escape_analysis); + int entry_bci, bool subsume_loads, bool do_escape_analysis, + bool eliminate_boxing); // Second major entry point. From the TypeFunc signature, generate code // to pass arguments from the Java calling convention to the C calling diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/doCall.cpp --- a/src/share/vm/opto/doCall.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/doCall.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -176,9 +176,12 @@ // Delay the inlining of this method to give us the // opportunity to perform some high level optimizations // first. - if (should_delay_inlining(callee, jvms)) { + if (should_delay_string_inlining(callee, jvms)) { assert(!delayed_forbidden, "strange"); return CallGenerator::for_string_late_inline(callee, cg); + } else if (should_delay_boxing_inlining(callee, jvms)) { + assert(!delayed_forbidden, "strange"); + return CallGenerator::for_boxing_late_inline(callee, cg); } else if ((should_delay || AlwaysIncrementalInline) && !delayed_forbidden) { return CallGenerator::for_late_inline(callee, cg); } @@ -276,7 +279,7 @@ // Return true for methods that shouldn't be inlined early so that // they are easier to analyze and optimize as intrinsics. -bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) { +bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms) { if (has_stringbuilder()) { if ((call_method->holder() == C->env()->StringBuilder_klass() || @@ -327,6 +330,13 @@ return false; } +bool Compile::should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms) { + if (eliminate_boxing() && call_method->is_boxing_method()) { + set_has_boxed_value(true); + return true; + } + return false; +} // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/escape.cpp --- a/src/share/vm/opto/escape.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/escape.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,15 +63,19 @@ // EA brings benefits only when the code has allocations and/or locks which // are represented by ideal Macro nodes. int cnt = C->macro_count(); - for( int i=0; i < cnt; i++ ) { + for (int i = 0; i < cnt; i++) { Node *n = C->macro_node(i); - if ( n->is_Allocate() ) + if (n->is_Allocate()) return true; - if( n->is_Lock() ) { + if (n->is_Lock()) { Node* obj = n->as_Lock()->obj_node()->uncast(); - if( !(obj->is_Parm() || obj->is_Con()) ) + if (!(obj->is_Parm() || obj->is_Con())) return true; } + if (n->is_CallStaticJava() && + n->as_CallStaticJava()->is_boxing_method()) { + return true; + } } return false; } @@ -115,7 +119,7 @@ { Compile::TracePhase t3("connectionGraph", &Phase::_t_connectionGraph, true); // 1. Populate Connection Graph (CG) with PointsTo nodes. - ideal_nodes.map(C->unique(), NULL); // preallocate space + ideal_nodes.map(C->live_nodes(), NULL); // preallocate space // Initialize worklist if (C->root() != NULL) { ideal_nodes.push(C->root()); @@ -152,8 +156,11 @@ // escape status of the associated Allocate node some of them // may be eliminated. storestore_worklist.append(n); + } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && + (n->req() > MemBarNode::Precedent)) { + record_for_optimizer(n); #ifdef ASSERT - } else if(n->is_AddP()) { + } else if (n->is_AddP()) { // Collect address nodes for graph verification. addp_worklist.append(n); #endif @@ -206,8 +213,15 @@ int non_escaped_length = non_escaped_worklist.length(); for (int next = 0; next < non_escaped_length; next++) { JavaObjectNode* ptn = non_escaped_worklist.at(next); - if (ptn->escape_state() == PointsToNode::NoEscape && - ptn->scalar_replaceable()) { + bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); + Node* n = ptn->ideal_node(); + if (n->is_Allocate()) { + n->as_Allocate()->_is_non_escaping = noescape; + } + if (n->is_CallStaticJava()) { + n->as_CallStaticJava()->_is_non_escaping = noescape; + } + if (noescape && ptn->scalar_replaceable()) { adjust_scalar_replaceable_state(ptn); if (ptn->scalar_replaceable()) { alloc_worklist.append(ptn->ideal_node()); @@ -263,7 +277,7 @@ // scalar replaceable objects. split_unique_types(alloc_worklist); if (C->failing()) return false; - C->print_method("After Escape Analysis", 2); + C->print_method(PHASE_AFTER_EA, 2); #ifdef ASSERT } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { @@ -330,8 +344,10 @@ // Don't mark as processed since call's arguments have to be processed. delayed_worklist->push(n); // Check if a call returns an object. - if (n->as_Call()->returns_pointer() && - n->as_Call()->proj_out(TypeFunc::Parms) != NULL) { + if ((n->as_Call()->returns_pointer() && + n->as_Call()->proj_out(TypeFunc::Parms) != NULL) || + (n->is_CallStaticJava() && + n->as_CallStaticJava()->is_boxing_method())) { add_call_node(n->as_Call()); } } @@ -387,8 +403,8 @@ case Op_ConNKlass: { // assume all oop constants globally escape except for null PointsToNode::EscapeState es; - if (igvn->type(n) == TypePtr::NULL_PTR || - igvn->type(n) == TypeNarrowOop::NULL_PTR) { + const Type* t = igvn->type(n); + if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { es = PointsToNode::NoEscape; } else { es = PointsToNode::GlobalEscape; @@ -468,6 +484,9 @@ Node* adr = n->in(MemNode::Address); const Type *adr_type = igvn->type(adr); adr_type = adr_type->make_ptr(); + if (adr_type == NULL) { + break; // skip dead nodes + } if (adr_type->isa_oopptr() || (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) && (adr_type == TypeRawPtr::NOTNULL && @@ -660,14 +679,18 @@ case Op_GetAndSetP: case Op_GetAndSetN: { Node* adr = n->in(MemNode::Address); - if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN) { - const Type* t = _igvn->type(n); - if (t->make_ptr() != NULL) { - add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); - } - } const Type *adr_type = _igvn->type(adr); adr_type = adr_type->make_ptr(); +#ifdef ASSERT + if (adr_type == NULL) { + n->dump(1); + assert(adr_type != NULL, "dead node should not be on list"); + break; + } +#endif + if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN) { + add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); + } if (adr_type->isa_oopptr() || (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) && (adr_type == TypeRawPtr::NOTNULL && @@ -797,6 +820,18 @@ // Returns a newly allocated unescaped object. add_java_object(call, PointsToNode::NoEscape); ptnode_adr(call_idx)->set_scalar_replaceable(false); + } else if (meth->is_boxing_method()) { + // Returns boxing object + PointsToNode::EscapeState es; + vmIntrinsics::ID intr = meth->intrinsic_id(); + if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { + // It does not escape if object is always allocated. + es = PointsToNode::NoEscape; + } else { + // It escapes globally if object could be loaded from cache. + es = PointsToNode::GlobalEscape; + } + add_java_object(call, es); } else { BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); call_analyzer->copy_dependencies(_compile->dependencies()); @@ -943,6 +978,9 @@ assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); #endif ciMethod* meth = call->as_CallJava()->method(); + if ((meth != NULL) && meth->is_boxing_method()) { + break; // Boxing methods do not modify any oops. + } BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; // fall-through if not a Java method or no analyzer information if (call_analyzer != NULL) { @@ -1791,9 +1829,8 @@ jobj2->ideal_node()->is_Con()) { // Klass or String constants compare. Need to be careful with // compressed pointers - compare types of ConN and ConP instead of nodes. - const Type* t1 = jobj1->ideal_node()->bottom_type()->make_ptr(); - const Type* t2 = jobj2->ideal_node()->bottom_type()->make_ptr(); - assert(t1 != NULL && t2 != NULL, "sanity"); + const Type* t1 = jobj1->ideal_node()->get_ptr_type(); + const Type* t2 = jobj2->ideal_node()->get_ptr_type(); if (t1->make_ptr() == t2->make_ptr()) { return _pcmp_eq; } else { @@ -2165,7 +2202,7 @@ int opcode = uncast_base->Opcode(); assert(opcode == Op_ConP || opcode == Op_ThreadLocal || opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || - (uncast_base->is_Mem() && uncast_base->bottom_type() == TypeRawPtr::NOTNULL) || + (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); } return base; @@ -2744,6 +2781,11 @@ // so it could be eliminated if it has no uses. alloc->as_Allocate()->_is_scalar_replaceable = true; } + if (alloc->is_CallStaticJava()) { + // Set the scalar_replaceable flag for boxing method + // so it could be eliminated if it has no uses. + alloc->as_CallStaticJava()->_is_scalar_replaceable = true; + } continue; } if (!n->is_CheckCastPP()) { // not unique CheckCastPP. @@ -2782,6 +2824,11 @@ // so it could be eliminated. alloc->as_Allocate()->_is_scalar_replaceable = true; } + if (alloc->is_CallStaticJava()) { + // Set the scalar_replaceable flag for boxing method + // so it could be eliminated. + alloc->as_CallStaticJava()->_is_scalar_replaceable = true; + } set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state // in order for an object to be scalar-replaceable, it must be: // - a direct allocation (not a call returning an object) @@ -2911,7 +2958,9 @@ // Load/store to instance's field memnode_worklist.append_if_missing(use); } else if (use->is_MemBar()) { - memnode_worklist.append_if_missing(use); + if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge + memnode_worklist.append_if_missing(use); + } } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes Node* addp2 = find_second_addp(use, n); if (addp2 != NULL) { @@ -3028,7 +3077,9 @@ continue; memnode_worklist.append_if_missing(use); } else if (use->is_MemBar()) { - memnode_worklist.append_if_missing(use); + if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge + memnode_worklist.append_if_missing(use); + } #ifdef ASSERT } else if(use->is_Mem()) { assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); @@ -3264,7 +3315,12 @@ if (ptn == NULL || !ptn->is_JavaObject()) continue; PointsToNode::EscapeState es = ptn->escape_state(); - if (ptn->ideal_node()->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) { + if ((es != PointsToNode::NoEscape) && !Verbose) { + continue; + } + Node* n = ptn->ideal_node(); + if (n->is_Allocate() || (n->is_CallStaticJava() && + n->as_CallStaticJava()->is_boxing_method())) { if (first) { tty->cr(); tty->print("======== Connection graph for "); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/graphKit.cpp --- a/src/share/vm/opto/graphKit.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/graphKit.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -333,6 +333,7 @@ assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals"); assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes"); assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS"); + assert(ex_jvms->scloff() == phi_map->_jvms->scloff(), "matching scalar replaced objects"); assert(ex_map->req() == phi_map->req(), "matching maps"); uint tos = ex_jvms->stkoff() + ex_jvms->sp(); Node* hidden_merge_mark = root(); @@ -409,7 +410,7 @@ while (dst->req() > orig_width) dst->del_req(dst->req()-1); } else { assert(dst->is_Phi(), "nobody else uses a hidden region"); - phi = (PhiNode*)dst; + phi = dst->as_Phi(); } if (add_multiple && src->in(0) == ex_control) { // Both are phis. @@ -1438,7 +1439,12 @@ } else { ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt); } - return _gvn.transform(ld); + ld = _gvn.transform(ld); + if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) { + // Improve graph before escape analysis and boxing elimination. + record_for_igvn(ld); + } + return ld; } Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, @@ -3144,7 +3150,7 @@ set_all_memory(mem); // Create new memory state AllocateNode* alloc - = new (C) AllocateNode(C, AllocateNode::alloc_type(), + = new (C) AllocateNode(C, AllocateNode::alloc_type(Type::TOP), control(), mem, i_o(), size, klass_node, initial_slow_test); @@ -3285,7 +3291,7 @@ // Create the AllocateArrayNode and its result projections AllocateArrayNode* alloc - = new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(), + = new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT), control(), mem, i_o(), size, klass_node, initial_slow_test, @@ -3326,10 +3332,9 @@ if (ptr == NULL) { // reduce dumb test in callers return NULL; } - if (ptr->is_CheckCastPP()) { // strip a raw-to-oop cast - ptr = ptr->in(1); - if (ptr == NULL) return NULL; - } + ptr = ptr->uncast(); // strip a raw-to-oop cast + if (ptr == NULL) return NULL; + if (ptr->is_Proj()) { Node* allo = ptr->in(0); if (allo != NULL && allo->is_Allocate()) { @@ -3374,19 +3379,6 @@ return NULL; } -// Trace Allocate -> Proj[Parm] -> MemBarStoreStore -MemBarStoreStoreNode* AllocateNode::storestore() { - ProjNode* rawoop = proj_out(AllocateNode::RawAddress); - if (rawoop == NULL) return NULL; - for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) { - Node* storestore = rawoop->fast_out(i); - if (storestore->is_MemBarStoreStore()) { - return storestore->as_MemBarStoreStore(); - } - } - return NULL; -} - //----------------------------- loop predicates --------------------------- //------------------------------add_predicate_impl---------------------------- @@ -3564,7 +3556,8 @@ Node* no_ctrl = NULL; Node* no_base = __ top(); - Node* zero = __ ConI(0); + Node* zero = __ ConI(0); + Node* zeroX = __ ConX(0); float likely = PROB_LIKELY(0.999); float unlikely = PROB_UNLIKELY(0.999); @@ -3590,7 +3583,9 @@ // if (!marking) __ if_then(marking, BoolTest::ne, zero); { - Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw); + BasicType index_bt = TypeX_X->basic_type(); + assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size."); + Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw); if (do_load) { // load original value @@ -3603,22 +3598,16 @@ Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); // is the queue for this thread full? - __ if_then(index, BoolTest::ne, zero, likely); { + __ if_then(index, BoolTest::ne, zeroX, likely); { // decrement the index - Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t))); - Node* next_indexX = next_index; -#ifdef _LP64 - // We could refine the type for what it's worth - // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue); - next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) ); -#endif + Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); // Now get the buffer location we will log the previous value into and store it - Node *log_addr = __ AddP(no_base, buffer, next_indexX); + Node *log_addr = __ AddP(no_base, buffer, next_index); __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw); // update the index - __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw); + __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw); } __ else_(); { @@ -3645,26 +3634,21 @@ Node* buffer, const TypeFunc* tf) { - Node* zero = __ ConI(0); + Node* zero = __ ConI(0); + Node* zeroX = __ ConX(0); Node* no_base = __ top(); BasicType card_bt = T_BYTE; // Smash zero into card. MUST BE ORDERED WRT TO STORE __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw); // Now do the queue work - __ if_then(index, BoolTest::ne, zero); { - - Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t))); - Node* next_indexX = next_index; -#ifdef _LP64 - // We could refine the type for what it's worth - // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue); - next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) ); -#endif // _LP64 - Node* log_addr = __ AddP(no_base, buffer, next_indexX); + __ if_then(index, BoolTest::ne, zeroX); { + + Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); + Node* log_addr = __ AddP(no_base, buffer, next_index); __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw); - __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw); + __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw); } __ else_(); { __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread()); @@ -3725,7 +3709,7 @@ // Now some values // Use ctrl to avoid hoisting these values past a safepoint, which could // potentially reset these fields in the JavaThread. - Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw); + Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw); Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); // Convert the store obj pointer to an int prior to doing math on it diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/idealGraphPrinter.cpp --- a/src/share/vm/opto/idealGraphPrinter.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/idealGraphPrinter.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -616,7 +616,7 @@ buffer[0] = 0; _chaitin->dump_register(node, buffer); print_prop("reg", buffer); - print_prop("lrg", _chaitin->n2lidx(node)); + print_prop("lrg", _chaitin->_lrg_map.live_range_id(node)); } node->_in_dump_cnt--; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/idealGraphPrinter.hpp --- a/src/share/vm/opto/idealGraphPrinter.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/idealGraphPrinter.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,9 +41,8 @@ class InlineTree; class ciMethod; -class IdealGraphPrinter -{ -private: +class IdealGraphPrinter : public CHeapObj { + private: static const char *INDENT; static const char *TOP_ELEMENT; @@ -121,7 +120,7 @@ IdealGraphPrinter(); ~IdealGraphPrinter(); -public: + public: static void clean_up(); static IdealGraphPrinter *printer(); @@ -135,8 +134,6 @@ void print_method(Compile* compile, const char *name, int level=1, bool clear_nodes = false); void print(Compile* compile, const char *name, Node *root, int level=1, bool clear_nodes = false); void print_xml(const char *name); - - }; #endif diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/ifg.cpp --- a/src/share/vm/opto/ifg.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/ifg.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -286,15 +286,14 @@ uint idx; uint last = 0; while ((idx = elements.next()) != 0) { - assert( idx != i, "Must have empty diagonal"); - assert( pc->Find_const(idx) == idx, "Must not need Find" ); - assert( _adjs[idx].member(i), "IFG not square" ); - assert( !(*_yanked)[idx], "No yanked neighbors" ); - assert( last < idx, "not sorted increasing"); + assert(idx != i, "Must have empty diagonal"); + assert(pc->_lrg_map.find_const(idx) == idx, "Must not need Find"); + assert(_adjs[idx].member(i), "IFG not square"); + assert(!(*_yanked)[idx], "No yanked neighbors"); + assert(last < idx, "not sorted increasing"); last = idx; } - assert( !lrgs(i)._degree_valid || - effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong" ); + assert(!lrgs(i)._degree_valid || effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong"); } } #endif @@ -342,10 +341,10 @@ Node *n = b->_nodes[j-1]; // Get value being defined - uint r = n2lidx(n); + uint r = _lrg_map.live_range_id(n); // Some special values do not allocate - if( r ) { + if (r) { // Remove from live-out set liveout->remove(r); @@ -353,16 +352,19 @@ // Copies do not define a new value and so do not interfere. // Remove the copies source from the liveout set before interfering. uint idx = n->is_Copy(); - if( idx ) liveout->remove( n2lidx(n->in(idx)) ); + if (idx) { + liveout->remove(_lrg_map.live_range_id(n->in(idx))); + } // Interfere with everything live - interfere_with_live( r, liveout ); + interfere_with_live(r, liveout); } // Make all inputs live - if( !n->is_Phi() ) { // Phi function uses come from prior block - for( uint k = 1; k < n->req(); k++ ) - liveout->insert( n2lidx(n->in(k)) ); + if (!n->is_Phi()) { // Phi function uses come from prior block + for(uint k = 1; k < n->req(); k++) { + liveout->insert(_lrg_map.live_range_id(n->in(k))); + } } // 2-address instructions always have the defined value live @@ -394,11 +396,12 @@ n->set_req( 2, tmp ); } // Defined value interferes with all inputs - uint lidx = n2lidx(n->in(idx)); - for( uint k = 1; k < n->req(); k++ ) { - uint kidx = n2lidx(n->in(k)); - if( kidx != lidx ) - _ifg->add_edge( r, kidx ); + uint lidx = _lrg_map.live_range_id(n->in(idx)); + for (uint k = 1; k < n->req(); k++) { + uint kidx = _lrg_map.live_range_id(n->in(k)); + if (kidx != lidx) { + _ifg->add_edge(r, kidx); + } } } } // End of forall instructions in block @@ -542,10 +545,10 @@ Node *n = b->_nodes[j - 1]; // Get value being defined - uint r = n2lidx(n); + uint r = _lrg_map.live_range_id(n); // Some special values do not allocate - if( r ) { + if(r) { // A DEF normally costs block frequency; rematerialized values are // removed from the DEF sight, so LOWER costs here. lrgs(r)._cost += n->rematerialize() ? 0 : b->_freq; @@ -556,9 +559,11 @@ Node *def = n->in(0); if( !n->is_Proj() || // Could also be a flags-projection of a dead ADD or such. - (n2lidx(def) && !liveout.member(n2lidx(def)) ) ) { + (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) { b->_nodes.remove(j - 1); - if( lrgs(r)._def == n ) lrgs(r)._def = 0; + if (lrgs(r)._def == n) { + lrgs(r)._def = 0; + } n->disconnect_inputs(NULL, C); _cfg._bbs.map(n->_idx,NULL); n->replace_by(C->top()); @@ -570,7 +575,7 @@ // Fat-projections kill many registers which cannot be used to // hold live ranges. - if( lrgs(r)._fat_proj ) { + if (lrgs(r)._fat_proj) { // Count the int-only registers RegMask itmp = lrgs(r).mask(); itmp.AND(*Matcher::idealreg2regmask[Op_RegI]); @@ -636,12 +641,12 @@ // Copies do not define a new value and so do not interfere. // Remove the copies source from the liveout set before interfering. uint idx = n->is_Copy(); - if( idx ) { - uint x = n2lidx(n->in(idx)); - if( liveout.remove( x ) ) { + if (idx) { + uint x = _lrg_map.live_range_id(n->in(idx)); + if (liveout.remove(x)) { lrgs(x)._area -= cost; // Adjust register pressure. - lower_pressure( &lrgs(x), j-1, b, pressure, hrp_index ); + lower_pressure(&lrgs(x), j-1, b, pressure, hrp_index); assert( pressure[0] == count_int_pressure (&liveout), "" ); assert( pressure[1] == count_float_pressure(&liveout), "" ); } @@ -727,18 +732,21 @@ // the flags and assumes it's dead. This keeps the (useless) // flag-setting behavior alive while also keeping the (useful) // memory update effect. - for( uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++ ) { + for (uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++) { Node *def = n->in(k); - uint x = n2lidx(def); - if( !x ) continue; + uint x = _lrg_map.live_range_id(def); + if (!x) { + continue; + } LRG &lrg = lrgs(x); // No use-side cost for spilling debug info - if( k < debug_start ) + if (k < debug_start) { // A USE costs twice block frequency (once for the Load, once // for a Load-delay). Rematerialized uses only cost once. lrg._cost += (def->rematerialize() ? b->_freq : (b->_freq + b->_freq)); + } // It is live now - if( liveout.insert( x ) ) { + if (liveout.insert(x)) { // Newly live things assumed live from here to top of block lrg._area += cost; // Adjust register pressure diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/ifnode.cpp --- a/src/share/vm/opto/ifnode.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/ifnode.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -673,7 +673,7 @@ // / Region // Node* IfNode::fold_compares(PhaseGVN* phase) { - if (!EliminateAutoBox || Opcode() != Op_If) return NULL; + if (!phase->C->eliminate_boxing() || Opcode() != Op_If) return NULL; Node* this_cmp = in(1)->in(1); if (this_cmp != NULL && this_cmp->Opcode() == Op_CmpI && diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/lcm.cpp --- a/src/share/vm/opto/lcm.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/lcm.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -219,9 +219,10 @@ // cannot reason about it; is probably not implicit null exception } else { const TypePtr* tptr; - if (UseCompressedOops && Universe::narrow_oop_shift() == 0) { + if (UseCompressedOops && (Universe::narrow_oop_shift() == 0 || + Universe::narrow_klass_shift() == 0)) { // 32-bits narrow oop can be the base of address expressions - tptr = base->bottom_type()->make_ptr(); + tptr = base->get_ptr_type(); } else { // only regular oops are expected here tptr = base->bottom_type()->is_ptr(); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/library_call.cpp --- a/src/share/vm/opto/library_call.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/library_call.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,6 +38,7 @@ #include "opto/subnode.hpp" #include "prims/nativeLookup.hpp" #include "runtime/sharedRuntime.hpp" +#include "trace/traceMacros.hpp" class LibraryIntrinsic : public InlineCallGenerator { // Extend the set of intrinsics known to the runtime: @@ -2783,7 +2784,7 @@ #ifdef _LP64 if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) { - load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->bottom_type()->make_ptr())); + load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type())); } #endif @@ -3703,7 +3704,7 @@ CallJavaNode* slow_call; if (is_static) { assert(!is_virtual, ""); - slow_call = new(C) CallStaticJavaNode(tf, + slow_call = new(C) CallStaticJavaNode(C, tf, SharedRuntime::get_resolve_static_call_stub(), method, bci()); } else if (is_virtual) { @@ -3722,7 +3723,7 @@ method, vtable_index, bci()); } else { // neither virtual nor static: opt_virtual null_check_receiver(); - slow_call = new(C) CallStaticJavaNode(tf, + slow_call = new(C) CallStaticJavaNode(C, tf, SharedRuntime::get_resolve_opt_virtual_call_stub(), method, bci()); slow_call->set_optimized_virtual(true); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/live.cpp --- a/src/share/vm/opto/live.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/live.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -44,7 +44,7 @@ // block is put on the worklist. // The locally live-in stuff is computed once and added to predecessor // live-out sets. This separate compilation is done in the outer loop below. -PhaseLive::PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) { +PhaseLive::PhaseLive( const PhaseCFG &cfg, const LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) { } void PhaseLive::compute(uint maxlrg) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/live.hpp --- a/src/share/vm/opto/live.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/live.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -80,7 +80,7 @@ Block_List *_worklist; // Worklist for iterative solution const PhaseCFG &_cfg; // Basic blocks - LRG_List &_names; // Mapping from Nodes to live ranges + const LRG_List &_names; // Mapping from Nodes to live ranges uint _maxlrg; // Largest live-range number Arena *_arena; @@ -91,7 +91,7 @@ void add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ); public: - PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena ); + PhaseLive(const PhaseCFG &cfg, const LRG_List &names, Arena *arena); ~PhaseLive() {} // Compute liveness info void compute(uint maxlrg); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/loopPredicate.cpp --- a/src/share/vm/opto/loopPredicate.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/loopPredicate.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -821,8 +821,8 @@ loop->dump_head(); } #endif - } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) { - assert(proj->_con == predicate_proj->_con, "must match"); + } else if ((cl != NULL) && (proj->_con == predicate_proj->_con) && + loop->is_range_check_if(iff, this, invar)) { // Range check for counted loops const Node* cmp = bol->in(1)->as_Cmp(); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/loopnode.cpp --- a/src/share/vm/opto/loopnode.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/loopnode.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -440,7 +440,7 @@ // ---- SUCCESS! Found A Trip-Counted Loop! ----- // assert(x->Opcode() == Op_Loop, "regular loops only"); - C->print_method("Before CountedLoop", 3); + C->print_method(PHASE_BEFORE_CLOOPS, 3); Node *hook = new (C) Node(6); @@ -791,7 +791,7 @@ } #endif - C->print_method("After CountedLoop", 3); + C->print_method(PHASE_AFTER_CLOOPS, 3); return true; } @@ -2164,7 +2164,7 @@ // Split shared headers and insert loop landing pads. // Do not bother doing this on the Root loop of course. if( !_verify_me && !_verify_only && _ltree_root->_child ) { - C->print_method("Before beautify loops", 3); + C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3); if( _ltree_root->_child->beautify_loops( this ) ) { // Re-build loop tree! _ltree_root->_child = NULL; @@ -2178,7 +2178,7 @@ // Reset loop nesting depth _ltree_root->set_nest( 0 ); - C->print_method("After beautify loops", 3); + C->print_method(PHASE_AFTER_BEAUTIFY_LOOPS, 3); } } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/loopnode.hpp --- a/src/share/vm/opto/loopnode.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/loopnode.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -965,7 +965,7 @@ // Has use internal to the vector set (ie. not in a phi at the loop head) bool has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ); // clone "n" for uses that are outside of loop - void clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ); + int clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ); // clone "n" for special uses that are in the not_peeled region void clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n, VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/loopopts.cpp --- a/src/share/vm/opto/loopopts.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/loopopts.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1939,8 +1939,8 @@ //------------------------------ clone_for_use_outside_loop ------------------------------------- // clone "n" for uses that are outside of loop -void PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) { - +int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) { + int cloned = 0; assert(worklist.size() == 0, "should be empty"); for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { Node* use = n->fast_out(j); @@ -1960,6 +1960,7 @@ // clone "n" and insert it between the inputs of "n" and the use outside the loop Node* n_clone = n->clone(); _igvn.replace_input_of(use, j, n_clone); + cloned++; Node* use_c; if (!use->is_Phi()) { use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0); @@ -1977,6 +1978,7 @@ } #endif } + return cloned; } @@ -2495,6 +2497,7 @@ // Evacuate nodes in peel region into the not_peeled region if possible uint new_phi_cnt = 0; + uint cloned_for_outside_use = 0; for (i = 0; i < peel_list.size();) { Node* n = peel_list.at(i); #if !defined(PRODUCT) @@ -2513,8 +2516,7 @@ // if not pinned and not a load (which maybe anti-dependent on a store) // and not a CMove (Matcher expects only bool->cmove). if ( n->in(0) == NULL && !n->is_Load() && !n->is_CMove() ) { - clone_for_use_outside_loop( loop, n, worklist ); - + cloned_for_outside_use += clone_for_use_outside_loop( loop, n, worklist ); sink_list.push(n); peel >>= n->_idx; // delete n from peel set. not_peel <<= n->_idx; // add n to not_peel set. @@ -2551,6 +2553,12 @@ // Inhibit more partial peeling on this loop assert(!head->is_partial_peel_loop(), "not partial peeled"); head->mark_partial_peel_failed(); + if (cloned_for_outside_use > 0) { + // Terminate this round of loop opts because + // the graph outside this loop was changed. + C->set_major_progress(); + return true; + } return false; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/machnode.cpp --- a/src/share/vm/opto/machnode.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/machnode.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -349,11 +349,11 @@ if (base == NodeSentinel) return TypePtr::BOTTOM; const Type* t = base->bottom_type(); - if (UseCompressedOops && Universe::narrow_oop_shift() == 0) { + if (t->isa_narrowoop() && Universe::narrow_oop_shift() == 0) { // 32-bit unscaled narrow oop can be the base of any address expression t = t->make_ptr(); } - if (UseCompressedKlassPointers && Universe::narrow_klass_shift() == 0) { + if (t->isa_narrowklass() && Universe::narrow_klass_shift() == 0) { // 32-bit unscaled narrow oop can be the base of any address expression t = t->make_ptr(); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/macro.cpp --- a/src/share/vm/opto/macro.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/macro.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -666,7 +666,7 @@ alloc->dump(); else res->dump(); - } else { + } else if (alloc->_is_scalar_replaceable) { tty->print("NotScalar (%s)", fail_eliminate); if (res == NULL) alloc->dump(); @@ -834,7 +834,7 @@ if (field_val->is_EncodeP()) { field_val = field_val->in(1); } else { - field_val = transform_later(new (C) DecodeNNode(field_val, field_val->bottom_type()->make_ptr())); + field_val = transform_later(new (C) DecodeNNode(field_val, field_val->get_ptr_type())); } } sfpt->add_req(field_val); @@ -845,18 +845,14 @@ // to the allocated object with "sobj" int start = jvms->debug_start(); int end = jvms->debug_end(); - for (int i = start; i < end; i++) { - if (sfpt->in(i) == res) { - sfpt->set_req(i, sobj); - } - } + sfpt->replace_edges_in_range(res, sobj, start, end); safepoints_done.append_if_missing(sfpt); // keep it for rollback } return true; } // Process users of eliminated allocation. -void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) { +void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) { Node* res = alloc->result_cast(); if (res != NULL) { for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) { @@ -899,6 +895,17 @@ // Process other users of allocation's projections // if (_resproj != NULL && _resproj->outcnt() != 0) { + // First disconnect stores captured by Initialize node. + // If Initialize node is eliminated first in the following code, + // it will kill such stores and DUIterator_Last will assert. + for (DUIterator_Fast jmax, j = _resproj->fast_outs(jmax); j < jmax; j++) { + Node *use = _resproj->fast_out(j); + if (use->is_AddP()) { + // raw memory addresses used only by the initialization + _igvn.replace_node(use, C->top()); + --j; --jmax; + } + } for (DUIterator_Last jmin, j = _resproj->last_outs(jmin); j >= jmin; ) { Node *use = _resproj->last_out(j); uint oc1 = _resproj->outcnt(); @@ -923,9 +930,6 @@ #endif _igvn.replace_node(mem_proj, mem); } - } else if (use->is_AddP()) { - // raw memory addresses used only by the initialization - _igvn.replace_node(use, C->top()); } else { assert(false, "only Initialize or AddP expected"); } @@ -953,8 +957,18 @@ } bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { - - if (!EliminateAllocations || !alloc->_is_scalar_replaceable) { + if (!EliminateAllocations || !alloc->_is_non_escaping) { + return false; + } + Node* klass = alloc->in(AllocateNode::KlassNode); + const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr(); + Node* res = alloc->result_cast(); + // Eliminate boxing allocations which are not used + // regardless scalar replacable status. + bool boxing_alloc = C->eliminate_boxing() && + tklass->klass()->is_instance_klass() && + tklass->klass()->as_instance_klass()->is_box_klass(); + if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != NULL))) { return false; } @@ -965,14 +979,22 @@ return false; } + if (!alloc->_is_scalar_replaceable) { + assert(res == NULL, "sanity"); + // We can only eliminate allocation if all debug info references + // are already replaced with SafePointScalarObject because + // we can't search for a fields value without instance_id. + if (safepoints.length() > 0) { + return false; + } + } + if (!scalar_replacement(alloc, safepoints)) { return false; } CompileLog* log = C->log(); if (log != NULL) { - Node* klass = alloc->in(AllocateNode::KlassNode); - const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr(); log->head("eliminate_allocation type='%d'", log->identify(tklass->klass())); JVMState* p = alloc->jvms(); @@ -997,6 +1019,43 @@ return true; } +bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) { + // EA should remove all uses of non-escaping boxing node. + if (!C->eliminate_boxing() || boxing->proj_out(TypeFunc::Parms) != NULL) { + return false; + } + + extract_call_projections(boxing); + + const TypeTuple* r = boxing->tf()->range(); + assert(r->cnt() > TypeFunc::Parms, "sanity"); + const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr(); + assert(t != NULL, "sanity"); + + CompileLog* log = C->log(); + if (log != NULL) { + log->head("eliminate_boxing type='%d'", + log->identify(t->klass())); + JVMState* p = boxing->jvms(); + while (p != NULL) { + log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); + p = p->caller(); + } + log->tail("eliminate_boxing"); + } + + process_users_of_allocation(boxing); + +#ifndef PRODUCT + if (PrintEliminateAllocations) { + tty->print("++++ Eliminated: %d ", boxing->_idx); + boxing->method()->print_short_name(tty); + tty->cr(); + } +#endif + + return true; +} //---------------------------set_eden_pointers------------------------- void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) { @@ -2384,6 +2443,9 @@ case Node::Class_AllocateArray: success = eliminate_allocate_node(n->as_Allocate()); break; + case Node::Class_CallStaticJava: + success = eliminate_boxing_node(n->as_CallStaticJava()); + break; case Node::Class_Lock: case Node::Class_Unlock: assert(!n->as_AbstractLock()->is_eliminated(), "sanity"); @@ -2424,6 +2486,11 @@ C->remove_macro_node(n); _igvn._worklist.push(n); success = true; + } else if (n->Opcode() == Op_CallStaticJava) { + // Remove it from macro list and put on IGVN worklist to optimize. + C->remove_macro_node(n); + _igvn._worklist.push(n); + success = true; } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) { _igvn.replace_node(n, n->in(1)); success = true; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/macro.hpp --- a/src/share/vm/opto/macro.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/macro.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -86,10 +86,11 @@ Node *value_from_mem(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc); Node *value_from_mem_phi(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc, Node_Stack *value_phis, int level); + bool eliminate_boxing_node(CallStaticJavaNode *boxing); bool eliminate_allocate_node(AllocateNode *alloc); bool can_eliminate_allocation(AllocateNode *alloc, GrowableArray & safepoints); bool scalar_replacement(AllocateNode *alloc, GrowableArray & safepoints_done); - void process_users_of_allocation(AllocateNode *alloc); + void process_users_of_allocation(CallNode *alloc); void eliminate_card_mark(Node *cm); void mark_eliminated_box(Node* box, Node* obj); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/matcher.cpp --- a/src/share/vm/opto/matcher.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/matcher.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -317,7 +317,7 @@ find_shared( C->root() ); find_shared( C->top() ); - C->print_method("Before Matching"); + C->print_method(PHASE_BEFORE_MATCHING); // Create new ideal node ConP #NULL even if it does exist in old space // to avoid false sharing if the corresponding mach node is not used. @@ -1282,16 +1282,6 @@ mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area; } - if (is_method_handle_invoke) { - // Kill some extra stack space in case method handles want to do - // a little in-place argument insertion. - // FIXME: Is this still necessary? - int regs_per_word = NOT_LP64(1) LP64_ONLY(2); // %%% make a global const! - out_arg_limit_per_call += Method::extra_stack_entries() * regs_per_word; - // Do not update mcall->_argsize because (a) the extra space is not - // pushed as arguments and (b) _argsize is dead (not used anywhere). - } - // Compute the max stack slot killed by any call. These will not be // available for debug info, and will be used to adjust FIRST_STACK_mask // after all call sites have been visited. @@ -1858,7 +1848,7 @@ for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) { // binary tree int newrule; - if( i == 0 ) + if( i == 0) newrule = kid->_rule[_leftOp[rule]]; else newrule = kid->_rule[_rightOp[rule]]; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/memnode.cpp --- a/src/share/vm/opto/memnode.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/memnode.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -103,11 +103,15 @@ #endif -Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) { - const TypeOopPtr *tinst = t_adr->isa_oopptr(); - if (tinst == NULL || !tinst->is_known_instance_field()) +Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) { + assert((t_oop != NULL), "sanity"); + bool is_instance = t_oop->is_known_instance_field(); + bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() && + (load != NULL) && load->is_Load() && + (phase->is_IterGVN() != NULL); + if (!(is_instance || is_boxed_value_load)) return mchain; // don't try to optimize non-instance types - uint instance_id = tinst->instance_id(); + uint instance_id = t_oop->instance_id(); Node *start_mem = phase->C->start()->proj_out(TypeFunc::Memory); Node *prev = NULL; Node *result = mchain; @@ -122,15 +126,24 @@ break; // hit one of our sentinels } else if (proj_in->is_Call()) { CallNode *call = proj_in->as_Call(); - if (!call->may_modify(t_adr, phase)) { + if (!call->may_modify(t_oop, phase)) { // returns false for instances result = call->in(TypeFunc::Memory); } } else if (proj_in->is_Initialize()) { AllocateNode* alloc = proj_in->as_Initialize()->allocation(); // Stop if this is the initialization for the object instance which // which contains this memory slice, otherwise skip over it. - if (alloc != NULL && alloc->_idx != instance_id) { + if ((alloc == NULL) || (alloc->_idx == instance_id)) { + break; + } + if (is_instance) { result = proj_in->in(TypeFunc::Memory); + } else if (is_boxed_value_load) { + Node* klass = alloc->in(AllocateNode::KlassNode); + const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr(); + if (tklass->klass_is_exact() && !tklass->klass()->equals(t_oop->klass())) { + result = proj_in->in(TypeFunc::Memory); // not related allocation + } } } else if (proj_in->is_MemBar()) { result = proj_in->in(TypeFunc::Memory); @@ -138,25 +151,26 @@ assert(false, "unexpected projection"); } } else if (result->is_ClearArray()) { - if (!ClearArrayNode::step_through(&result, instance_id, phase)) { + if (!is_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) { // Can not bypass initialization of the instance // we are looking for. break; } // Otherwise skip it (the call updated 'result' value). } else if (result->is_MergeMem()) { - result = step_through_mergemem(phase, result->as_MergeMem(), t_adr, NULL, tty); + result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, NULL, tty); } } return result; } -Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) { - const TypeOopPtr *t_oop = t_adr->isa_oopptr(); - bool is_instance = (t_oop != NULL) && t_oop->is_known_instance_field(); +Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase) { + const TypeOopPtr* t_oop = t_adr->isa_oopptr(); + if (t_oop == NULL) + return mchain; // don't try to optimize non-oop types + Node* result = optimize_simple_memory_chain(mchain, t_oop, load, phase); + bool is_instance = t_oop->is_known_instance_field(); PhaseIterGVN *igvn = phase->is_IterGVN(); - Node *result = mchain; - result = optimize_simple_memory_chain(result, t_adr, phase); if (is_instance && igvn != NULL && result->is_Phi()) { PhiNode *mphi = result->as_Phi(); assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); @@ -383,7 +397,7 @@ // Or Region for the check in LoadNode::Ideal(); // 'sub' should have sub->in(0) != NULL. assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start() || - sub->is_Region(), "expecting only these nodes"); + sub->is_Region() || sub->is_Call(), "expecting only these nodes"); // Get control edge of 'sub'. Node* orig_sub = sub; @@ -957,11 +971,14 @@ // of aliasing. Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const { Node* ld_adr = in(MemNode::Address); - + intptr_t ld_off = 0; + AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off); const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr(); - Compile::AliasType* atp = tp != NULL ? phase->C->alias_type(tp) : NULL; - if (EliminateAutoBox && atp != NULL && atp->index() >= Compile::AliasIdxRaw && - atp->field() != NULL && !atp->field()->is_volatile()) { + Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL; + // This is more general than load from boxing objects. + if (phase->C->eliminate_boxing() && (atp != NULL) && + (atp->index() >= Compile::AliasIdxRaw) && + (atp->field() != NULL) && !atp->field()->is_volatile()) { uint alias_idx = atp->index(); bool final = atp->field()->is_final(); Node* result = NULL; @@ -983,7 +1000,7 @@ Node* new_st = merge->memory_at(alias_idx); if (new_st == merge->base_memory()) { // Keep searching - current = merge->base_memory(); + current = new_st; continue; } // Save the new memory state for the slice and fall through @@ -1010,9 +1027,7 @@ intptr_t st_off = 0; AllocateNode* alloc = AllocateNode::Ideal_allocation(st_adr, phase, st_off); if (alloc == NULL) return NULL; - intptr_t ld_off = 0; - AllocateNode* allo2 = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off); - if (alloc != allo2) return NULL; + if (alloc != ld_alloc) return NULL; if (ld_off != st_off) return NULL; // At this point we have proven something like this setup: // A = Allocate(...) @@ -1029,14 +1044,12 @@ return st->in(MemNode::ValueIn); } - intptr_t offset = 0; // scratch - // A load from a freshly-created object always returns zero. // (This can happen after LoadNode::Ideal resets the load's memory input // to find_captured_store, which returned InitializeNode::zero_memory.) if (st->is_Proj() && st->in(0)->is_Allocate() && - st->in(0) == AllocateNode::Ideal_allocation(ld_adr, phase, offset) && - offset >= st->in(0)->as_Allocate()->minimum_header_size()) { + (st->in(0) == ld_alloc) && + (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) { // return a zero value for the load's basic type // (This is one of the few places where a generic PhaseTransform // can create new nodes. Think of it as lazily manifesting @@ -1048,15 +1061,27 @@ if (st->is_Proj() && st->in(0)->is_Initialize()) { InitializeNode* init = st->in(0)->as_Initialize(); AllocateNode* alloc = init->allocation(); - if (alloc != NULL && - alloc == AllocateNode::Ideal_allocation(ld_adr, phase, offset)) { + if ((alloc != NULL) && (alloc == ld_alloc)) { // examine a captured store value - st = init->find_captured_store(offset, memory_size(), phase); + st = init->find_captured_store(ld_off, memory_size(), phase); if (st != NULL) continue; // take one more trip around } } + // Load boxed value from result of valueOf() call is input parameter. + if (this->is_Load() && ld_adr->is_AddP() && + (tp != NULL) && tp->is_ptr_to_boxed_value()) { + intptr_t ignore = 0; + Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore); + if (base != NULL && base->is_Proj() && + base->as_Proj()->_con == TypeFunc::Parms && + base->in(0)->is_CallStaticJava() && + base->in(0)->as_CallStaticJava()->is_boxing_method()) { + return base->in(0)->in(TypeFunc::Parms); + } + } + break; } @@ -1065,11 +1090,13 @@ //----------------------is_instance_field_load_with_local_phi------------------ bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) { - if( in(MemNode::Memory)->is_Phi() && in(MemNode::Memory)->in(0) == ctrl && - in(MemNode::Address)->is_AddP() ) { - const TypeOopPtr* t_oop = in(MemNode::Address)->bottom_type()->isa_oopptr(); - // Only instances. - if( t_oop != NULL && t_oop->is_known_instance_field() && + if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl && + in(Address)->is_AddP() ) { + const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr(); + // Only instances and boxed values. + if( t_oop != NULL && + (t_oop->is_ptr_to_boxed_value() || + t_oop->is_known_instance_field()) && t_oop->offset() != Type::OffsetBot && t_oop->offset() != Type::OffsetTop) { return true; @@ -1083,7 +1110,7 @@ Node *LoadNode::Identity( PhaseTransform *phase ) { // If the previous store-maker is the right kind of Store, and the store is // to the same address, then we are equal to the value stored. - Node* mem = in(MemNode::Memory); + Node* mem = in(Memory); Node* value = can_see_stored_value(mem, phase); if( value ) { // byte, short & char stores truncate naturally. @@ -1105,15 +1132,22 @@ // instance's field to avoid infinite generation of phis in a loop. Node *region = mem->in(0); if (is_instance_field_load_with_local_phi(region)) { - const TypePtr *addr_t = in(MemNode::Address)->bottom_type()->isa_ptr(); + const TypeOopPtr *addr_t = in(Address)->bottom_type()->isa_oopptr(); int this_index = phase->C->get_alias_index(addr_t); int this_offset = addr_t->offset(); - int this_id = addr_t->is_oopptr()->instance_id(); + int this_iid = addr_t->instance_id(); + if (!addr_t->is_known_instance() && + addr_t->is_ptr_to_boxed_value()) { + // Use _idx of address base (could be Phi node) for boxed values. + intptr_t ignore = 0; + Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore); + this_iid = base->_idx; + } const Type* this_type = bottom_type(); for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { Node* phi = region->fast_out(i); if (phi->is_Phi() && phi != mem && - phi->as_Phi()->is_same_inst_field(this_type, this_id, this_index, this_offset)) { + phi->as_Phi()->is_same_inst_field(this_type, this_iid, this_index, this_offset)) { return phi; } } @@ -1122,170 +1156,106 @@ return this; } - -// Returns true if the AliasType refers to the field that holds the -// cached box array. Currently only handles the IntegerCache case. -static bool is_autobox_cache(Compile::AliasType* atp) { - if (atp != NULL && atp->field() != NULL) { - ciField* field = atp->field(); - ciSymbol* klass = field->holder()->name(); - if (field->name() == ciSymbol::cache_field_name() && - field->holder()->uses_default_loader() && - klass == ciSymbol::java_lang_Integer_IntegerCache()) { - return true; - } - } - return false; -} - -// Fetch the base value in the autobox array -static bool fetch_autobox_base(Compile::AliasType* atp, int& cache_offset) { - if (atp != NULL && atp->field() != NULL) { - ciField* field = atp->field(); - ciSymbol* klass = field->holder()->name(); - if (field->name() == ciSymbol::cache_field_name() && - field->holder()->uses_default_loader() && - klass == ciSymbol::java_lang_Integer_IntegerCache()) { - assert(field->is_constant(), "what?"); - ciObjArray* array = field->constant_value().as_object()->as_obj_array(); - // Fetch the box object at the base of the array and get its value - ciInstance* box = array->obj_at(0)->as_instance(); - ciInstanceKlass* ik = box->klass()->as_instance_klass(); - if (ik->nof_nonstatic_fields() == 1) { - // This should be true nonstatic_field_at requires calling - // nof_nonstatic_fields so check it anyway - ciConstant c = box->field_value(ik->nonstatic_field_at(0)); - cache_offset = c.as_int(); - } - return true; - } - } - return false; -} - -// Returns true if the AliasType refers to the value field of an -// autobox object. Currently only handles Integer. -static bool is_autobox_object(Compile::AliasType* atp) { - if (atp != NULL && atp->field() != NULL) { - ciField* field = atp->field(); - ciSymbol* klass = field->holder()->name(); - if (field->name() == ciSymbol::value_name() && - field->holder()->uses_default_loader() && - klass == ciSymbol::java_lang_Integer()) { - return true; - } - } - return false; -} - - // We're loading from an object which has autobox behaviour. // If this object is result of a valueOf call we'll have a phi // merging a newly allocated object and a load from the cache. // We want to replace this load with the original incoming // argument to the valueOf call. Node* LoadNode::eliminate_autobox(PhaseGVN* phase) { - Node* base = in(Address)->in(AddPNode::Base); - if (base->is_Phi() && base->req() == 3) { - AllocateNode* allocation = NULL; - int allocation_index = -1; - int load_index = -1; - for (uint i = 1; i < base->req(); i++) { - allocation = AllocateNode::Ideal_allocation(base->in(i), phase); - if (allocation != NULL) { - allocation_index = i; - load_index = 3 - allocation_index; - break; - } - } - bool has_load = ( allocation != NULL && - (base->in(load_index)->is_Load() || - base->in(load_index)->is_DecodeN() && - base->in(load_index)->in(1)->is_Load()) ); - if (has_load && in(Memory)->is_Phi() && in(Memory)->in(0) == base->in(0)) { - // Push the loads from the phi that comes from valueOf up - // through it to allow elimination of the loads and the recovery - // of the original value. - Node* mem_phi = in(Memory); - Node* offset = in(Address)->in(AddPNode::Offset); - Node* region = base->in(0); - - Node* in1 = clone(); - Node* in1_addr = in1->in(Address)->clone(); - in1_addr->set_req(AddPNode::Base, base->in(allocation_index)); - in1_addr->set_req(AddPNode::Address, base->in(allocation_index)); - in1_addr->set_req(AddPNode::Offset, offset); - in1->set_req(0, region->in(allocation_index)); - in1->set_req(Address, in1_addr); - in1->set_req(Memory, mem_phi->in(allocation_index)); - - Node* in2 = clone(); - Node* in2_addr = in2->in(Address)->clone(); - in2_addr->set_req(AddPNode::Base, base->in(load_index)); - in2_addr->set_req(AddPNode::Address, base->in(load_index)); - in2_addr->set_req(AddPNode::Offset, offset); - in2->set_req(0, region->in(load_index)); - in2->set_req(Address, in2_addr); - in2->set_req(Memory, mem_phi->in(load_index)); - - in1_addr = phase->transform(in1_addr); - in1 = phase->transform(in1); - in2_addr = phase->transform(in2_addr); - in2 = phase->transform(in2); - - PhiNode* result = PhiNode::make_blank(region, this); - result->set_req(allocation_index, in1); - result->set_req(load_index, in2); - return result; - } + assert(phase->C->eliminate_boxing(), "sanity"); + intptr_t ignore = 0; + Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore); + if ((base == NULL) || base->is_Phi()) { + // Push the loads from the phi that comes from valueOf up + // through it to allow elimination of the loads and the recovery + // of the original value. It is done in split_through_phi(). + return NULL; } else if (base->is_Load() || base->is_DecodeN() && base->in(1)->is_Load()) { - if (base->is_DecodeN()) { - // Get LoadN node which loads cached Integer object - base = base->in(1); - } - // Eliminate the load of Integer.value for integers from the cache + // Eliminate the load of boxed value for integer types from the cache // array by deriving the value from the index into the array. // Capture the offset of the load and then reverse the computation. - Node* load_base = base->in(Address)->in(AddPNode::Base); - if (load_base->is_DecodeN()) { - // Get LoadN node which loads IntegerCache.cache field - load_base = load_base->in(1); + + // Get LoadN node which loads a boxing object from 'cache' array. + if (base->is_DecodeN()) { + base = base->in(1); + } + if (!base->in(Address)->is_AddP()) { + return NULL; // Complex address } - if (load_base != NULL) { - Compile::AliasType* atp = phase->C->alias_type(load_base->adr_type()); - intptr_t cache_offset; - int shift = -1; - Node* cache = NULL; - if (is_autobox_cache(atp)) { - shift = exact_log2(type2aelembytes(T_OBJECT)); - cache = AddPNode::Ideal_base_and_offset(load_base->in(Address), phase, cache_offset); - } - if (cache != NULL && base->in(Address)->is_AddP()) { + AddPNode* address = base->in(Address)->as_AddP(); + Node* cache_base = address->in(AddPNode::Base); + if ((cache_base != NULL) && cache_base->is_DecodeN()) { + // Get ConP node which is static 'cache' field. + cache_base = cache_base->in(1); + } + if ((cache_base != NULL) && cache_base->is_Con()) { + const TypeAryPtr* base_type = cache_base->bottom_type()->isa_aryptr(); + if ((base_type != NULL) && base_type->is_autobox_cache()) { Node* elements[4]; - int count = base->in(Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements)); - int cache_low; - if (count > 0 && fetch_autobox_base(atp, cache_low)) { - int offset = arrayOopDesc::base_offset_in_bytes(memory_type()) - (cache_low << shift); - // Add up all the offsets making of the address of the load - Node* result = elements[0]; - for (int i = 1; i < count; i++) { - result = phase->transform(new (phase->C) AddXNode(result, elements[i])); + int shift = exact_log2(type2aelembytes(T_OBJECT)); + int count = address->unpack_offsets(elements, ARRAY_SIZE(elements)); + if ((count > 0) && elements[0]->is_Con() && + ((count == 1) || + (count == 2) && elements[1]->Opcode() == Op_LShiftX && + elements[1]->in(2) == phase->intcon(shift))) { + ciObjArray* array = base_type->const_oop()->as_obj_array(); + // Fetch the box object cache[0] at the base of the array and get its value + ciInstance* box = array->obj_at(0)->as_instance(); + ciInstanceKlass* ik = box->klass()->as_instance_klass(); + assert(ik->is_box_klass(), "sanity"); + assert(ik->nof_nonstatic_fields() == 1, "change following code"); + if (ik->nof_nonstatic_fields() == 1) { + // This should be true nonstatic_field_at requires calling + // nof_nonstatic_fields so check it anyway + ciConstant c = box->field_value(ik->nonstatic_field_at(0)); + BasicType bt = c.basic_type(); + // Only integer types have boxing cache. + assert(bt == T_BOOLEAN || bt == T_CHAR || + bt == T_BYTE || bt == T_SHORT || + bt == T_INT || bt == T_LONG, err_msg_res("wrong type = %s", type2name(bt))); + jlong cache_low = (bt == T_LONG) ? c.as_long() : c.as_int(); + if (cache_low != (int)cache_low) { + return NULL; // should not happen since cache is array indexed by value + } + jlong offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT) - (cache_low << shift); + if (offset != (int)offset) { + return NULL; // should not happen since cache is array indexed by value + } + // Add up all the offsets making of the address of the load + Node* result = elements[0]; + for (int i = 1; i < count; i++) { + result = phase->transform(new (phase->C) AddXNode(result, elements[i])); + } + // Remove the constant offset from the address and then + result = phase->transform(new (phase->C) AddXNode(result, phase->MakeConX(-(int)offset))); + // remove the scaling of the offset to recover the original index. + if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) { + // Peel the shift off directly but wrap it in a dummy node + // since Ideal can't return existing nodes + result = new (phase->C) RShiftXNode(result->in(1), phase->intcon(0)); + } else if (result->is_Add() && result->in(2)->is_Con() && + result->in(1)->Opcode() == Op_LShiftX && + result->in(1)->in(2) == phase->intcon(shift)) { + // We can't do general optimization: ((X<> Z ==> X + (Y>>Z) + // but for boxing cache access we know that X<C) RShiftXNode(result->in(2), phase->intcon(shift)); + result = new (phase->C) AddXNode(result->in(1)->in(1), phase->transform(add_con)); + } else { + result = new (phase->C) RShiftXNode(result, phase->intcon(shift)); + } +#ifdef _LP64 + if (bt != T_LONG) { + result = new (phase->C) ConvL2INode(phase->transform(result)); + } +#else + if (bt == T_LONG) { + result = new (phase->C) ConvI2LNode(phase->transform(result)); + } +#endif + return result; } - // Remove the constant offset from the address and then - // remove the scaling of the offset to recover the original index. - result = phase->transform(new (phase->C) AddXNode(result, phase->MakeConX(-offset))); - if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) { - // Peel the shift off directly but wrap it in a dummy node - // since Ideal can't return existing nodes - result = new (phase->C) RShiftXNode(result->in(1), phase->intcon(0)); - } else { - result = new (phase->C) RShiftXNode(result, phase->intcon(shift)); - } -#ifdef _LP64 - result = new (phase->C) ConvL2INode(phase->transform(result)); -#endif - return result; } } } @@ -1293,65 +1263,131 @@ return NULL; } -//------------------------------split_through_phi------------------------------ -// Split instance field load through Phi. -Node *LoadNode::split_through_phi(PhaseGVN *phase) { - Node* mem = in(MemNode::Memory); - Node* address = in(MemNode::Address); - const TypePtr *addr_t = phase->type(address)->isa_ptr(); - const TypeOopPtr *t_oop = addr_t->isa_oopptr(); - - assert(mem->is_Phi() && (t_oop != NULL) && - t_oop->is_known_instance_field(), "invalide conditions"); - - Node *region = mem->in(0); +static bool stable_phi(PhiNode* phi, PhaseGVN *phase) { + Node* region = phi->in(0); if (region == NULL) { - return NULL; // Wait stable graph + return false; // Wait stable graph } - uint cnt = mem->req(); + uint cnt = phi->req(); for (uint i = 1; i < cnt; i++) { Node* rc = region->in(i); if (rc == NULL || phase->type(rc) == Type::TOP) - return NULL; // Wait stable graph - Node *in = mem->in(i); - if (in == NULL) { + return false; // Wait stable graph + Node* in = phi->in(i); + if (in == NULL || phase->type(in) == Type::TOP) + return false; // Wait stable graph + } + return true; +} +//------------------------------split_through_phi------------------------------ +// Split instance or boxed field load through Phi. +Node *LoadNode::split_through_phi(PhaseGVN *phase) { + Node* mem = in(Memory); + Node* address = in(Address); + const TypeOopPtr *t_oop = phase->type(address)->isa_oopptr(); + + assert((t_oop != NULL) && + (t_oop->is_known_instance_field() || + t_oop->is_ptr_to_boxed_value()), "invalide conditions"); + + Compile* C = phase->C; + intptr_t ignore = 0; + Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore); + bool base_is_phi = (base != NULL) && base->is_Phi(); + bool load_boxed_values = t_oop->is_ptr_to_boxed_value() && C->aggressive_unboxing() && + (base != NULL) && (base == address->in(AddPNode::Base)) && + phase->type(base)->higher_equal(TypePtr::NOTNULL); + + if (!((mem->is_Phi() || base_is_phi) && + (load_boxed_values || t_oop->is_known_instance_field()))) { + return NULL; // memory is not Phi + } + + if (mem->is_Phi()) { + if (!stable_phi(mem->as_Phi(), phase)) { return NULL; // Wait stable graph } - } - // Check for loop invariant. - if (cnt == 3) { - for (uint i = 1; i < cnt; i++) { - Node *in = mem->in(i); - Node* m = MemNode::optimize_memory_chain(in, addr_t, phase); - if (m == mem) { - set_req(MemNode::Memory, mem->in(cnt - i)); // Skip this phi. - return this; + uint cnt = mem->req(); + // Check for loop invariant memory. + if (cnt == 3) { + for (uint i = 1; i < cnt; i++) { + Node* in = mem->in(i); + Node* m = optimize_memory_chain(in, t_oop, this, phase); + if (m == mem) { + set_req(Memory, mem->in(cnt - i)); + return this; // made change + } } } } + if (base_is_phi) { + if (!stable_phi(base->as_Phi(), phase)) { + return NULL; // Wait stable graph + } + uint cnt = base->req(); + // Check for loop invariant memory. + if (cnt == 3) { + for (uint i = 1; i < cnt; i++) { + if (base->in(i) == base) { + return NULL; // Wait stable graph + } + } + } + } + + bool load_boxed_phi = load_boxed_values && base_is_phi && (base->in(0) == mem->in(0)); + // Split through Phi (see original code in loopopts.cpp). - assert(phase->C->have_alias_type(addr_t), "instance should have alias type"); + assert(C->have_alias_type(t_oop), "instance should have alias type"); // Do nothing here if Identity will find a value // (to avoid infinite chain of value phis generation). if (!phase->eqv(this, this->Identity(phase))) return NULL; - // Skip the split if the region dominates some control edge of the address. - if (!MemNode::all_controls_dominate(address, region)) - return NULL; + // Select Region to split through. + Node* region; + if (!base_is_phi) { + assert(mem->is_Phi(), "sanity"); + region = mem->in(0); + // Skip if the region dominates some control edge of the address. + if (!MemNode::all_controls_dominate(address, region)) + return NULL; + } else if (!mem->is_Phi()) { + assert(base_is_phi, "sanity"); + region = base->in(0); + // Skip if the region dominates some control edge of the memory. + if (!MemNode::all_controls_dominate(mem, region)) + return NULL; + } else if (base->in(0) != mem->in(0)) { + assert(base_is_phi && mem->is_Phi(), "sanity"); + if (MemNode::all_controls_dominate(mem, base->in(0))) { + region = base->in(0); + } else if (MemNode::all_controls_dominate(address, mem->in(0))) { + region = mem->in(0); + } else { + return NULL; // complex graph + } + } else { + assert(base->in(0) == mem->in(0), "sanity"); + region = mem->in(0); + } const Type* this_type = this->bottom_type(); - int this_index = phase->C->get_alias_index(addr_t); - int this_offset = addr_t->offset(); - int this_iid = addr_t->is_oopptr()->instance_id(); - PhaseIterGVN *igvn = phase->is_IterGVN(); - Node *phi = new (igvn->C) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset); + int this_index = C->get_alias_index(t_oop); + int this_offset = t_oop->offset(); + int this_iid = t_oop->instance_id(); + if (!t_oop->is_known_instance() && load_boxed_values) { + // Use _idx of address base for boxed values. + this_iid = base->_idx; + } + PhaseIterGVN* igvn = phase->is_IterGVN(); + Node* phi = new (C) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset); for (uint i = 1; i < region->req(); i++) { - Node *x; + Node* x; Node* the_clone = NULL; - if (region->in(i) == phase->C->top()) { - x = phase->C->top(); // Dead path? Use a dead data op + if (region->in(i) == C->top()) { + x = C->top(); // Dead path? Use a dead data op } else { x = this->clone(); // Else clone up the data op the_clone = x; // Remember for possible deletion. @@ -1361,10 +1397,16 @@ } else { x->set_req(0, NULL); } - for (uint j = 1; j < this->req(); j++) { - Node *in = this->in(j); - if (in->is_Phi() && in->in(0) == region) - x->set_req(j, in->in(i)); // Use pre-Phi input for the clone + if (mem->is_Phi() && (mem->in(0) == region)) { + x->set_req(Memory, mem->in(i)); // Use pre-Phi input for the clone. + } + if (address->is_Phi() && address->in(0) == region) { + x->set_req(Address, address->in(i)); // Use pre-Phi input for the clone + } + if (base_is_phi && (base->in(0) == region)) { + Node* base_x = base->in(i); // Clone address for loads from boxed objects. + Node* adr_x = phase->transform(new (C) AddPNode(base_x,base_x,address->in(AddPNode::Offset))); + x->set_req(Address, adr_x); } } // Check for a 'win' on some paths @@ -1394,7 +1436,7 @@ if (y != x) { x = y; } else { - y = igvn->hash_find(x); + y = igvn->hash_find_insert(x); if (y) { x = y; } else { @@ -1405,8 +1447,9 @@ } } } - if (x != the_clone && the_clone != NULL) + if (x != the_clone && the_clone != NULL) { igvn->remove_dead_node(the_clone); + } phi->set_req(i, x); } // Record Phi @@ -1445,31 +1488,23 @@ // A method-invariant, non-null address (constant or 'this' argument). set_req(MemNode::Control, NULL); } - - if (EliminateAutoBox && can_reshape) { - assert(!phase->type(base)->higher_equal(TypePtr::NULL_PTR), "the autobox pointer should be non-null"); - Compile::AliasType* atp = phase->C->alias_type(adr_type()); - if (is_autobox_object(atp)) { - Node* result = eliminate_autobox(phase); - if (result != NULL) return result; - } - } } Node* mem = in(MemNode::Memory); const TypePtr *addr_t = phase->type(address)->isa_ptr(); - if (addr_t != NULL) { + if (can_reshape && (addr_t != NULL)) { // try to optimize our memory input - Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, phase); + Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase); if (opt_mem != mem) { set_req(MemNode::Memory, opt_mem); if (phase->type( opt_mem ) == Type::TOP) return NULL; return this; } const TypeOopPtr *t_oop = addr_t->isa_oopptr(); - if (can_reshape && opt_mem->is_Phi() && - (t_oop != NULL) && t_oop->is_known_instance_field()) { + if ((t_oop != NULL) && + (t_oop->is_known_instance_field() || + t_oop->is_ptr_to_boxed_value())) { PhaseIterGVN *igvn = phase->is_IterGVN(); if (igvn != NULL && igvn->_worklist.member(opt_mem)) { // Delay this transformation until memory Phi is processed. @@ -1479,6 +1514,11 @@ // Split instance field load through Phi. Node* result = split_through_phi(phase); if (result != NULL) return result; + + if (t_oop->is_ptr_to_boxed_value()) { + Node* result = eliminate_autobox(phase); + if (result != NULL) return result; + } } } @@ -1587,18 +1627,23 @@ // This can happen if a interface-typed array narrows to a class type. jt = _type; } - - if (EliminateAutoBox && adr->is_AddP()) { +#ifdef ASSERT + if (phase->C->eliminate_boxing() && adr->is_AddP()) { // The pointers in the autobox arrays are always non-null Node* base = adr->in(AddPNode::Base); - if (base != NULL && - !phase->type(base)->higher_equal(TypePtr::NULL_PTR)) { - Compile::AliasType* atp = C->alias_type(base->adr_type()); - if (is_autobox_cache(atp)) { - return jt->join(TypePtr::NOTNULL)->is_ptr(); + if ((base != NULL) && base->is_DecodeN()) { + // Get LoadN node which loads IntegerCache.cache field + base = base->in(1); + } + if ((base != NULL) && base->is_Con()) { + const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr(); + if ((base_type != NULL) && base_type->is_autobox_cache()) { + // It could be narrow oop + assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity"); } } } +#endif return jt; } } @@ -1638,6 +1683,10 @@ // Optimizations for constant objects ciObject* const_oop = tinst->const_oop(); if (const_oop != NULL) { + // For constant Boxed value treat the target field as a compile time constant. + if (tinst->is_ptr_to_boxed_value()) { + return tinst->get_const_boxed_value(); + } else // For constant CallSites treat the target field as a compile time constant. if (const_oop->is_call_site()) { ciCallSite* call_site = const_oop->as_call_site(); @@ -1759,7 +1808,8 @@ // (Also allow a variable load from a fresh array to produce zero.) const TypeOopPtr *tinst = tp->isa_oopptr(); bool is_instance = (tinst != NULL) && tinst->is_known_instance_field(); - if (ReduceFieldZeroing || is_instance) { + bool is_boxed_value = (tinst != NULL) && tinst->is_ptr_to_boxed_value(); + if (ReduceFieldZeroing || is_instance || is_boxed_value) { Node* value = can_see_stored_value(mem,phase); if (value != NULL && value->is_Con()) { assert(value->bottom_type()->higher_equal(_type),"sanity"); @@ -2883,24 +2933,38 @@ if (in(0) && in(0)->is_top()) return NULL; // Eliminate volatile MemBars for scalar replaced objects. - if (can_reshape && req() == (Precedent+1) && - (Opcode() == Op_MemBarAcquire || Opcode() == Op_MemBarVolatile)) { - // Volatile field loads and stores. - Node* my_mem = in(MemBarNode::Precedent); - if (my_mem != NULL && my_mem->is_Mem()) { - const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr(); - // Check for scalar replaced object reference. - if( t_oop != NULL && t_oop->is_known_instance_field() && - t_oop->offset() != Type::OffsetBot && - t_oop->offset() != Type::OffsetTop) { - // Replace MemBar projections by its inputs. - PhaseIterGVN* igvn = phase->is_IterGVN(); - igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory)); - igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control)); - // Must return either the original node (now dead) or a new node - // (Do not return a top here, since that would break the uniqueness of top.) - return new (phase->C) ConINode(TypeInt::ZERO); + if (can_reshape && req() == (Precedent+1)) { + bool eliminate = false; + int opc = Opcode(); + if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) { + // Volatile field loads and stores. + Node* my_mem = in(MemBarNode::Precedent); + if (my_mem != NULL && my_mem->is_Mem()) { + const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr(); + // Check for scalar replaced object reference. + if( t_oop != NULL && t_oop->is_known_instance_field() && + t_oop->offset() != Type::OffsetBot && + t_oop->offset() != Type::OffsetTop) { + eliminate = true; + } } + } else if (opc == Op_MemBarRelease) { + // Final field stores. + Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase); + if ((alloc != NULL) && alloc->is_Allocate() && + alloc->as_Allocate()->_is_non_escaping) { + // The allocated object does not escape. + eliminate = true; + } + } + if (eliminate) { + // Replace MemBar projections by its inputs. + PhaseIterGVN* igvn = phase->is_IterGVN(); + igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory)); + igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control)); + // Must return either the original node (now dead) or a new node + // (Do not return a top here, since that would break the uniqueness of top.) + return new (phase->C) ConINode(TypeInt::ZERO); } } return NULL; @@ -3113,9 +3177,7 @@ // within the initialization without creating a vicious cycle, such as: // { Foo p = new Foo(); p.next = p; } // True for constants and parameters and small combinations thereof. -bool InitializeNode::detect_init_independence(Node* n, - bool st_is_pinned, - int& count) { +bool InitializeNode::detect_init_independence(Node* n, int& count) { if (n == NULL) return true; // (can this really happen?) if (n->is_Proj()) n = n->in(0); if (n == this) return false; // found a cycle @@ -3135,7 +3197,6 @@ // a store is never pinned *before* the availability of its inputs. if (!MemNode::all_controls_dominate(n, this)) return false; // failed to prove a good control - } // Check data edges for possible dependencies on 'this'. @@ -3145,7 +3206,7 @@ if (m == NULL || m == n || m->is_top()) continue; uint first_i = n->find_edge(m); if (i != first_i) continue; // process duplicate edge just once - if (!detect_init_independence(m, st_is_pinned, count)) { + if (!detect_init_independence(m, count)) { return false; } } @@ -3176,7 +3237,7 @@ return FAIL; // wrong allocation! (store needs to float up) Node* val = st->in(MemNode::ValueIn); int complexity_count = 0; - if (!detect_init_independence(val, true, complexity_count)) + if (!detect_init_independence(val, complexity_count)) return FAIL; // stored value must be 'simple enough' // The Store can be captured only if nothing after the allocation diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/memnode.hpp --- a/src/share/vm/opto/memnode.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/memnode.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -75,8 +75,8 @@ PhaseTransform* phase); static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); - static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase); - static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase); + static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); + static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); // This one should probably be a phase-specific function: static bool all_controls_dominate(Node* dom, Node* sub); @@ -1099,7 +1099,7 @@ Node* make_raw_address(intptr_t offset, PhaseTransform* phase); - bool detect_init_independence(Node* n, bool st_is_pinned, int& count); + bool detect_init_independence(Node* n, int& count); void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, PhaseGVN* phase); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/multnode.cpp --- a/src/share/vm/opto/multnode.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/multnode.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "opto/callnode.hpp" #include "opto/matcher.hpp" #include "opto/multnode.hpp" #include "opto/opcodes.hpp" @@ -73,13 +74,26 @@ return (_con == TypeFunc::Control && def->is_CFG()); } +const Type* ProjNode::proj_type(const Type* t) const { + if (t == Type::TOP) { + return Type::TOP; + } + if (t == Type::BOTTOM) { + return Type::BOTTOM; + } + t = t->is_tuple()->field_at(_con); + Node* n = in(0); + if ((_con == TypeFunc::Parms) && + n->is_CallStaticJava() && n->as_CallStaticJava()->is_boxing_method()) { + // The result of autoboxing is always non-null on normal path. + t = t->join(TypePtr::NOTNULL); + } + return t; +} + const Type *ProjNode::bottom_type() const { - if (in(0) == NULL) return Type::TOP; - const Type *tb = in(0)->bottom_type(); - if( tb == Type::TOP ) return Type::TOP; - if( tb == Type::BOTTOM ) return Type::BOTTOM; - const TypeTuple *t = tb->is_tuple(); - return t->field_at(_con); + if (in(0) == NULL) return Type::TOP; + return proj_type(in(0)->bottom_type()); } const TypePtr *ProjNode::adr_type() const { @@ -115,11 +129,8 @@ //------------------------------Value------------------------------------------ const Type *ProjNode::Value( PhaseTransform *phase ) const { - if( !in(0) ) return Type::TOP; - const Type *t = phase->type(in(0)); - if( t == Type::TOP ) return t; - if( t == Type::BOTTOM ) return t; - return t->is_tuple()->field_at(_con); + if (in(0) == NULL) return Type::TOP; + return proj_type(phase->type(in(0))); } //------------------------------out_RegMask------------------------------------ diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/multnode.hpp --- a/src/share/vm/opto/multnode.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/multnode.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -60,6 +60,7 @@ virtual uint cmp( const Node &n ) const; virtual uint size_of() const; void check_con() const; // Called from constructor. + const Type* proj_type(const Type* t) const; public: ProjNode( Node *src, uint con, bool io_use = false ) @@ -83,6 +84,7 @@ virtual const Type *Value( PhaseTransform *phase ) const; virtual uint ideal_reg() const; virtual const RegMask &out_RegMask() const; + #ifndef PRODUCT virtual void dump_spec(outputStream *st) const; #endif diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/node.cpp --- a/src/share/vm/opto/node.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/node.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -67,7 +67,8 @@ } Compile::set_debug_idx(new_debug_idx); set_debug_idx( new_debug_idx ); - assert(Compile::current()->unique() < (UINT_MAX - 1), "Node limit exceeded UINT_MAX"); + assert(Compile::current()->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX"); + assert(Compile::current()->live_nodes() < (uint)MaxNodeLimit, "Live Node limit exceeded limit"); if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) { tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx); BREAKPOINT; @@ -471,9 +472,9 @@ //------------------------------clone------------------------------------------ // Clone a Node. Node *Node::clone() const { - Compile *compile = Compile::current(); + Compile* C = Compile::current(); uint s = size_of(); // Size of inherited Node - Node *n = (Node*)compile->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*)); + Node *n = (Node*)C->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*)); Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s); // Set the new input pointer array n->_in = (Node**)(((char*)n)+s); @@ -492,18 +493,18 @@ if (x != NULL) x->add_out(n); } if (is_macro()) - compile->add_macro_node(n); + C->add_macro_node(n); if (is_expensive()) - compile->add_expensive_node(n); + C->add_expensive_node(n); - n->set_idx(compile->next_unique()); // Get new unique index as well + n->set_idx(C->next_unique()); // Get new unique index as well debug_only( n->verify_construction() ); NOT_PRODUCT(nodes_created++); // Do not patch over the debug_idx of a clone, because it makes it // impossible to break on the clone's moment of creation. //debug_only( n->set_debug_idx( debug_idx() ) ); - compile->copy_node_notes_to(n, (Node*) this); + C->copy_node_notes_to(n, (Node*) this); // MachNode clone uint nopnds; @@ -518,13 +519,12 @@ (const void*)(&mthis->_opnds), 1)); mach->_opnds = to; for ( uint i = 0; i < nopnds; ++i ) { - to[i] = from[i]->clone(compile); + to[i] = from[i]->clone(C); } } // cloning CallNode may need to clone JVMState if (n->is_Call()) { - CallNode *call = n->as_Call(); - call->clone_jvms(); + n->as_Call()->clone_jvms(C); } return n; // Return the clone } @@ -811,6 +811,21 @@ return nrep; } +/** + * Replace input edges in the range pointing to 'old' node. + */ +int Node::replace_edges_in_range(Node* old, Node* neww, int start, int end) { + if (old == neww) return 0; // nothing to do + uint nrep = 0; + for (int i = start; i < end; i++) { + if (in(i) == old) { + set_req(i, neww); + nrep++; + } + } + return nrep; +} + //-------------------------disconnect_inputs----------------------------------- // NULL out all inputs to eliminate incoming Def-Use edges. // Return the number of edges between 'n' and 'this' @@ -1383,6 +1398,21 @@ return NULL; } + +/** + * Return a ptr type for nodes which should have it. + */ +const TypePtr* Node::get_ptr_type() const { + const TypePtr* tp = this->bottom_type()->make_ptr(); +#ifdef ASSERT + if (tp == NULL) { + this->dump(1); + assert((tp != NULL), "unexpected node type"); + } +#endif + return tp; +} + // Get a double constant from a ConstNode. // Returns the constant if it is a double ConstNode jdouble Node::getd() const { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/node.hpp --- a/src/share/vm/opto/node.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/node.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -410,6 +410,7 @@ // Find first occurrence of n among my edges: int find_edge(Node* n); int replace_edge(Node* old, Node* neww); + int replace_edges_in_range(Node* old, Node* neww, int start, int end); // NULL out all inputs to eliminate incoming Def-Use edges. // Return the number of edges between 'n' and 'this' int disconnect_inputs(Node *n, Compile *c); @@ -964,6 +965,8 @@ } const TypeLong* find_long_type() const; + const TypePtr* get_ptr_type() const; + // These guys are called by code generated by ADLC: intptr_t get_ptr() const; intptr_t get_narrowcon() const; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/output.cpp --- a/src/share/vm/opto/output.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/output.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "asm/assembler.inline.hpp" +#include "code/compiledIC.hpp" #include "code/debugInfo.hpp" #include "code/debugInfoRec.hpp" #include "compiler/compileBroker.hpp" @@ -41,8 +42,6 @@ #include "runtime/handles.inline.hpp" #include "utilities/xmlstream.hpp" -extern uint size_java_to_interp(); -extern uint reloc_java_to_interp(); extern uint size_exception_handler(); extern uint size_deopt_handler(); @@ -389,15 +388,15 @@ MachNode *mach = nj->as_Mach(); blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding reloc_size += mach->reloc(); - if( mach->is_MachCall() ) { + if (mach->is_MachCall()) { MachCallNode *mcall = mach->as_MachCall(); // This destination address is NOT PC-relative mcall->method_set((intptr_t)mcall->entry_point()); - if( mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method ) { - stub_size += size_java_to_interp(); - reloc_size += reloc_java_to_interp(); + if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) { + stub_size += CompiledStaticCall::to_interp_stub_size(); + reloc_size += CompiledStaticCall::reloc_to_interp_stub(); } } else if (mach->is_MachSafePoint()) { // If call/safepoint are adjacent, account for possible @@ -930,7 +929,7 @@ scval = new_loc_value( _regalloc, obj_reg, Location::oop ); } } else { - const TypePtr *tp = obj_node->bottom_type()->make_ptr(); + const TypePtr *tp = obj_node->get_ptr_type(); scval = new ConstantOopWriteValue(tp->is_oopptr()->const_oop()->constant_encoding()); } @@ -1047,21 +1046,6 @@ debug_info->end_non_safepoint(pc_offset); } - - -// helper for fill_buffer bailout logic -static void turn_off_compiler(Compile* C) { - if (CodeCache::largest_free_block() >= CodeCacheMinimumFreeSpace*10) { - // Do not turn off compilation if a single giant method has - // blown the code cache size. - C->record_failure("excessive request to CodeCache"); - } else { - // Let CompilerBroker disable further compilations. - C->record_failure("CodeCache is full"); - } -} - - //------------------------------init_buffer------------------------------------ CodeBuffer* Compile::init_buffer(uint* blk_starts) { @@ -1161,7 +1145,7 @@ // Have we run out of code space? if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) { - turn_off_compiler(this); + C->record_failure("CodeCache is full"); return NULL; } // Configure the code buffer. @@ -1479,7 +1463,7 @@ // Verify that there is sufficient space remaining cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size); if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) { - turn_off_compiler(this); + C->record_failure("CodeCache is full"); return; } @@ -1636,7 +1620,7 @@ // One last check for failed CodeBuffer::expand: if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) { - turn_off_compiler(this); + C->record_failure("CodeCache is full"); return; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/parse.hpp --- a/src/share/vm/opto/parse.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/parse.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -330,6 +330,7 @@ bool _wrote_final; // Did we write a final field? bool _count_invocations; // update and test invocation counter bool _method_data_update; // update method data oop + Node* _alloc_with_final; // An allocation node with final field // Variables which track Java semantics during bytecode parsing: @@ -370,6 +371,11 @@ void set_wrote_final(bool z) { _wrote_final = z; } bool count_invocations() const { return _count_invocations; } bool method_data_update() const { return _method_data_update; } + Node* alloc_with_final() const { return _alloc_with_final; } + void set_alloc_with_final(Node* n) { + assert((_alloc_with_final == NULL) || (_alloc_with_final == n), "different init objects?"); + _alloc_with_final = n; + } Block* block() const { return _block; } ciBytecodeStream& iter() { return _iter; } @@ -512,7 +518,7 @@ // loading from a constant field or the constant pool // returns false if push failed (non-perm field constants only, not ldcs) - bool push_constant(ciConstant con, bool require_constant = false); + bool push_constant(ciConstant con, bool require_constant = false, bool is_autobox_cache = false); // implementation of object creation bytecodes void emit_guard_for_new(ciInstanceKlass* klass); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/parse1.cpp --- a/src/share/vm/opto/parse1.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/parse1.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -390,6 +390,7 @@ _expected_uses = expected_uses; _depth = 1 + (caller->has_method() ? caller->depth() : 0); _wrote_final = false; + _alloc_with_final = NULL; _entry_bci = InvocationEntryBci; _tf = NULL; _block = NULL; @@ -723,6 +724,8 @@ // Note: iophi and memphi are not transformed until do_exits. Node* iophi = new (C) PhiNode(region, Type::ABIO); Node* memphi = new (C) PhiNode(region, Type::MEMORY, TypePtr::BOTTOM); + gvn().set_type_bottom(iophi); + gvn().set_type_bottom(memphi); _exits.set_i_o(iophi); _exits.set_all_memory(memphi); @@ -738,6 +741,7 @@ } int ret_size = type2size[ret_type->basic_type()]; Node* ret_phi = new (C) PhiNode(region, ret_type); + gvn().set_type_bottom(ret_phi); _exits.ensure_stack(ret_size); assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range"); assert(method()->return_type()->size() == ret_size, "tf agrees w/ method"); @@ -917,7 +921,7 @@ // such unusual early publications. But no barrier is needed on // exceptional returns, since they cannot publish normally. // - _exits.insert_mem_bar(Op_MemBarRelease); + _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final()); #ifndef PRODUCT if (PrintOpto && (Verbose || WizardMode)) { method()->print_name(); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/parse2.cpp --- a/src/share/vm/opto/parse2.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/parse2.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -987,7 +987,7 @@ uncommon_trap(Deoptimization::Reason_unreached, Deoptimization::Action_reinterpret, NULL, "cold"); - if (EliminateAutoBox) { + if (C->eliminate_boxing()) { // Mark the successor blocks as parsed branch_block->next_path_num(); next_block->next_path_num(); @@ -1012,7 +1012,7 @@ if (stopped()) { // Path is dead? explicit_null_checks_elided++; - if (EliminateAutoBox) { + if (C->eliminate_boxing()) { // Mark the successor block as parsed branch_block->next_path_num(); } @@ -1032,7 +1032,7 @@ if (stopped()) { // Path is dead? explicit_null_checks_elided++; - if (EliminateAutoBox) { + if (C->eliminate_boxing()) { // Mark the successor block as parsed next_block->next_path_num(); } @@ -1069,7 +1069,7 @@ uncommon_trap(Deoptimization::Reason_unreached, Deoptimization::Action_reinterpret, NULL, "cold"); - if (EliminateAutoBox) { + if (C->eliminate_boxing()) { // Mark the successor blocks as parsed branch_block->next_path_num(); next_block->next_path_num(); @@ -1135,7 +1135,7 @@ set_control(taken_branch); if (stopped()) { - if (EliminateAutoBox) { + if (C->eliminate_boxing()) { // Mark the successor block as parsed branch_block->next_path_num(); } @@ -1154,7 +1154,7 @@ // Branch not taken. if (stopped()) { - if (EliminateAutoBox) { + if (C->eliminate_boxing()) { // Mark the successor block as parsed next_block->next_path_num(); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/parse3.cpp --- a/src/share/vm/opto/parse3.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/parse3.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -150,6 +150,23 @@ // final field if (field->is_static()) { // final static field + if (C->eliminate_boxing()) { + // The pointers in the autobox arrays are always non-null. + ciSymbol* klass_name = field->holder()->name(); + if (field->name() == ciSymbol::cache_field_name() && + field->holder()->uses_default_loader() && + (klass_name == ciSymbol::java_lang_Character_CharacterCache() || + klass_name == ciSymbol::java_lang_Byte_ByteCache() || + klass_name == ciSymbol::java_lang_Short_ShortCache() || + klass_name == ciSymbol::java_lang_Integer_IntegerCache() || + klass_name == ciSymbol::java_lang_Long_LongCache())) { + bool require_const = true; + bool autobox_cache = true; + if (push_constant(field->constant_value(), require_const, autobox_cache)) { + return; + } + } + } if (push_constant(field->constant_value())) return; } @@ -304,11 +321,18 @@ // out of the constructor. if (is_field && field->is_final()) { set_wrote_final(true); + // Preserve allocation ptr to create precedent edge to it in membar + // generated on exit from constructor. + if (C->eliminate_boxing() && + adr_type->isa_oopptr() && adr_type->is_oopptr()->is_ptr_to_boxed_value() && + AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) { + set_alloc_with_final(obj); + } } } -bool Parse::push_constant(ciConstant constant, bool require_constant) { +bool Parse::push_constant(ciConstant constant, bool require_constant, bool is_autobox_cache) { switch (constant.basic_type()) { case T_BOOLEAN: push( intcon(constant.as_boolean()) ); break; case T_INT: push( intcon(constant.as_int()) ); break; @@ -329,7 +353,7 @@ push( zerocon(T_OBJECT) ); break; } else if (require_constant || oop_constant->should_be_constant()) { - push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant)) ); + push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant, is_autobox_cache)) ); break; } else { // we cannot inline the oop, but we can use it later to narrow a type diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/parseHelper.cpp --- a/src/share/vm/opto/parseHelper.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/parseHelper.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -284,6 +284,11 @@ klass == C->env()->StringBuffer_klass())) { C->set_has_stringbuilder(true); } + + // Keep track of boxed values for EliminateAutoBox optimizations. + if (C->eliminate_boxing() && klass->is_box_klass()) { + C->set_has_boxed_value(true); + } } #ifndef PRODUCT @@ -337,19 +342,21 @@ if (!count_invocations()) return; // Get the Method* node. - const TypePtr* adr_type = TypeMetadataPtr::make(method()); - Node *method_node = makecon(adr_type); + ciMethod* m = method(); + address counters_adr = m->ensure_method_counters(); - // Load the interpreter_invocation_counter from the Method*. - int offset = Method::interpreter_invocation_counter_offset_in_bytes(); - Node* adr_node = basic_plus_adr(method_node, method_node, offset); - Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type); + Node* ctrl = control(); + const TypePtr* adr_type = TypeRawPtr::make(counters_adr); + Node *counters_node = makecon(adr_type); + Node* adr_iic_node = basic_plus_adr(counters_node, counters_node, + MethodCounters::interpreter_invocation_counter_offset_in_bytes()); + Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type); test_counter_against_threshold(cnt, limit); // Add one to the counter and store Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1))); - store_to_memory( NULL, adr_node, incr, T_INT, adr_type ); + store_to_memory( ctrl, adr_iic_node, incr, T_INT, adr_type ); } //----------------------------method_data_addressing--------------------------- diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/phase.cpp --- a/src/share/vm/opto/phase.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/phase.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -64,6 +64,7 @@ // Subtimers for _t_optimizer elapsedTimer Phase::_t_iterGVN; elapsedTimer Phase::_t_iterGVN2; +elapsedTimer Phase::_t_incrInline; // Subtimers for _t_registerAllocation elapsedTimer Phase::_t_ctorChaitin; @@ -110,6 +111,7 @@ tty->print_cr (" macroEliminate : %3.3f sec", Phase::_t_macroEliminate.seconds()); } tty->print_cr (" iterGVN : %3.3f sec", Phase::_t_iterGVN.seconds()); + tty->print_cr (" incrInline : %3.3f sec", Phase::_t_incrInline.seconds()); tty->print_cr (" idealLoop : %3.3f sec", Phase::_t_idealLoop.seconds()); tty->print_cr (" idealLoopVerify: %3.3f sec", Phase::_t_idealLoopVerify.seconds()); tty->print_cr (" ccp : %3.3f sec", Phase::_t_ccp.seconds()); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/phase.hpp --- a/src/share/vm/opto/phase.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/phase.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -100,6 +100,7 @@ // Subtimers for _t_optimizer static elapsedTimer _t_iterGVN; static elapsedTimer _t_iterGVN2; + static elapsedTimer _t_incrInline; // Subtimers for _t_registerAllocation static elapsedTimer _t_ctorChaitin; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/phaseX.cpp --- a/src/share/vm/opto/phaseX.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/phaseX.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -882,7 +882,7 @@ return; } Node *n = _worklist.pop(); - if (++loop_count >= K * C->unique()) { + if (++loop_count >= K * C->live_nodes()) { debug_only(n->dump(4);) assert(false, "infinite loop in PhaseIterGVN::optimize"); C->record_method_not_compilable("infinite loop in PhaseIterGVN::optimize"); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/phasetype.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/opto/phasetype.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OPTO_PHASETYPE_HPP +#define SHARE_VM_OPTO_PHASETYPE_HPP + +enum CompilerPhaseType { + PHASE_BEFORE_STRINGOPTS, + PHASE_AFTER_STRINGOPTS, + PHASE_BEFORE_REMOVEUSELESS, + PHASE_AFTER_PARSING, + PHASE_ITER_GVN1, + PHASE_PHASEIDEAL_BEFORE_EA, + PHASE_ITER_GVN_AFTER_EA, + PHASE_ITER_GVN_AFTER_ELIMINATION, + PHASE_PHASEIDEALLOOP1, + PHASE_PHASEIDEALLOOP2, + PHASE_PHASEIDEALLOOP3, + PHASE_CPP1, + PHASE_ITER_GVN2, + PHASE_PHASEIDEALLOOP_ITERATIONS, + PHASE_OPTIMIZE_FINISHED, + PHASE_GLOBAL_CODE_MOTION, + PHASE_FINAL_CODE, + PHASE_AFTER_EA, + PHASE_BEFORE_CLOOPS, + PHASE_AFTER_CLOOPS, + PHASE_BEFORE_BEAUTIFY_LOOPS, + PHASE_AFTER_BEAUTIFY_LOOPS, + PHASE_BEFORE_MATCHING, + PHASE_INCREMENTAL_INLINE, + PHASE_INCREMENTAL_BOXING_INLINE, + PHASE_END, + PHASE_FAILURE, + + PHASE_NUM_TYPES +}; + +class CompilerPhaseTypeHelper { + public: + static const char* to_string(CompilerPhaseType cpt) { + switch (cpt) { + case PHASE_BEFORE_STRINGOPTS: return "Before StringOpts"; + case PHASE_AFTER_STRINGOPTS: return "After StringOpts"; + case PHASE_BEFORE_REMOVEUSELESS: return "Before RemoveUseless"; + case PHASE_AFTER_PARSING: return "After Parsing"; + case PHASE_ITER_GVN1: return "Iter GVN 1"; + case PHASE_PHASEIDEAL_BEFORE_EA: return "PhaseIdealLoop before EA"; + case PHASE_ITER_GVN_AFTER_EA: return "Iter GVN after EA"; + case PHASE_ITER_GVN_AFTER_ELIMINATION: return "Iter GVN after eliminating allocations and locks"; + case PHASE_PHASEIDEALLOOP1: return "PhaseIdealLoop 1"; + case PHASE_PHASEIDEALLOOP2: return "PhaseIdealLoop 2"; + case PHASE_PHASEIDEALLOOP3: return "PhaseIdealLoop 3"; + case PHASE_CPP1: return "PhaseCPP 1"; + case PHASE_ITER_GVN2: return "Iter GVN 2"; + case PHASE_PHASEIDEALLOOP_ITERATIONS: return "PhaseIdealLoop iterations"; + case PHASE_OPTIMIZE_FINISHED: return "Optimize finished"; + case PHASE_GLOBAL_CODE_MOTION: return "Global code motion"; + case PHASE_FINAL_CODE: return "Final Code"; + case PHASE_AFTER_EA: return "After Escape Analysis"; + case PHASE_BEFORE_CLOOPS: return "Before CountedLoop"; + case PHASE_AFTER_CLOOPS: return "After CountedLoop"; + case PHASE_BEFORE_BEAUTIFY_LOOPS: return "Before beautify loops"; + case PHASE_AFTER_BEAUTIFY_LOOPS: return "After beautify loops"; + case PHASE_BEFORE_MATCHING: return "Before Matching"; + case PHASE_INCREMENTAL_INLINE: return "Incremental Inline"; + case PHASE_INCREMENTAL_BOXING_INLINE: return "Incremental Boxing Inline"; + case PHASE_END: return "End"; + case PHASE_FAILURE: return "Failure"; + default: + ShouldNotReachHere(); + return NULL; + } + } +}; + +#endif //SHARE_VM_OPTO_PHASETYPE_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/postaloc.cpp --- a/src/share/vm/opto/postaloc.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/postaloc.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -56,7 +56,7 @@ int i; for( i=0; i < limit; i++ ) { if( def->is_Proj() && def->in(0)->is_Start() && - _matcher.is_save_on_entry(lrgs(n2lidx(def)).reg()) ) + _matcher.is_save_on_entry(lrgs(_lrg_map.live_range_id(def)).reg())) return true; // Direct use of callee-save proj if( def->is_Copy() ) // Copies carry value through def = def->in(def->is_Copy()); @@ -83,7 +83,7 @@ // Count 1 if deleting an instruction from the current block if( oldb == current_block ) blk_adjust++; _cfg._bbs.map(old->_idx,NULL); - OptoReg::Name old_reg = lrgs(n2lidx(old)).reg(); + OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg(); if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available? value->map(old_reg,NULL); // Yank from value/regnd maps regnd->map(old_reg,NULL); // This register's value is now unknown @@ -164,7 +164,7 @@ // Not every pair of physical registers are assignment compatible, // e.g. on sparc floating point registers are not assignable to integer // registers. - const LRG &def_lrg = lrgs(n2lidx(def)); + const LRG &def_lrg = lrgs(_lrg_map.live_range_id(def)); OptoReg::Name def_reg = def_lrg.reg(); const RegMask &use_mask = n->in_RegMask(idx); bool can_use = ( RegMask::can_represent(def_reg) ? (use_mask.Member(def_reg) != 0) @@ -209,11 +209,12 @@ // Skip through any number of copies (that don't mod oop-i-ness) Node *PhaseChaitin::skip_copies( Node *c ) { int idx = c->is_Copy(); - uint is_oop = lrgs(n2lidx(c))._is_oop; + uint is_oop = lrgs(_lrg_map.live_range_id(c))._is_oop; while (idx != 0) { guarantee(c->in(idx) != NULL, "must not resurrect dead copy"); - if (lrgs(n2lidx(c->in(idx)))._is_oop != is_oop) + if (lrgs(_lrg_map.live_range_id(c->in(idx)))._is_oop != is_oop) { break; // casting copy, not the same value + } c = c->in(idx); idx = c->is_Copy(); } @@ -225,8 +226,8 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List ®nd, bool can_change_regs ) { int blk_adjust = 0; - uint nk_idx = n2lidx(n->in(k)); - OptoReg::Name nk_reg = lrgs(nk_idx ).reg(); + uint nk_idx = _lrg_map.live_range_id(n->in(k)); + OptoReg::Name nk_reg = lrgs(nk_idx).reg(); // Remove obvious same-register copies Node *x = n->in(k); @@ -234,9 +235,13 @@ while( (idx=x->is_Copy()) != 0 ) { Node *copy = x->in(idx); guarantee(copy != NULL, "must not resurrect dead copy"); - if( lrgs(n2lidx(copy)).reg() != nk_reg ) break; + if(lrgs(_lrg_map.live_range_id(copy)).reg() != nk_reg) { + break; + } blk_adjust += use_prior_register(n,k,copy,current_block,value,regnd); - if( n->in(k) != copy ) break; // Failed for some cutout? + if (n->in(k) != copy) { + break; // Failed for some cutout? + } x = copy; // Progress, try again } @@ -256,7 +261,7 @@ if (val == x && nk_idx != 0 && regnd[nk_reg] != NULL && regnd[nk_reg] != x && - n2lidx(x) == n2lidx(regnd[nk_reg])) { + _lrg_map.live_range_id(x) == _lrg_map.live_range_id(regnd[nk_reg])) { // When rematerialzing nodes and stretching lifetimes, the // allocator will reuse the original def for multidef LRG instead // of the current reaching def because it can't know it's safe to @@ -270,7 +275,7 @@ if (val == x) return blk_adjust; // No progress? int n_regs = RegMask::num_registers(val->ideal_reg()); - uint val_idx = n2lidx(val); + uint val_idx = _lrg_map.live_range_id(val); OptoReg::Name val_reg = lrgs(val_idx).reg(); // See if it happens to already be in the correct register! @@ -499,12 +504,12 @@ for( j = 1; j < phi_dex; j++ ) { uint k; Node *phi = b->_nodes[j]; - uint pidx = n2lidx(phi); - OptoReg::Name preg = lrgs(n2lidx(phi)).reg(); + uint pidx = _lrg_map.live_range_id(phi); + OptoReg::Name preg = lrgs(_lrg_map.live_range_id(phi)).reg(); // Remove copies remaining on edges. Check for junk phi. Node *u = NULL; - for( k=1; kreq(); k++ ) { + for (k = 1; k < phi->req(); k++) { Node *x = phi->in(k); if( phi != x && u != x ) // Found a different input u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input @@ -555,10 +560,10 @@ // alive and well at the use (or else the allocator fubar'd). Take // advantage of this info to set a reaching def for the use-reg. uint k; - for( k = 1; k < n->req(); k++ ) { + for (k = 1; k < n->req(); k++) { Node *def = n->in(k); // n->in(k) is a USE; def is the DEF for this USE guarantee(def != NULL, "no disconnected nodes at this point"); - uint useidx = n2lidx(def); // useidx is the live range index for this USE + uint useidx = _lrg_map.live_range_id(def); // useidx is the live range index for this USE if( useidx ) { OptoReg::Name ureg = lrgs(useidx).reg(); @@ -566,7 +571,7 @@ int idx; // Skip occasional useless copy while( (idx=def->is_Copy()) != 0 && def->in(idx) != NULL && // NULL should not happen - ureg == lrgs(n2lidx(def->in(idx))).reg() ) + ureg == lrgs(_lrg_map.live_range_id(def->in(idx))).reg()) def = def->in(idx); Node *valdef = skip_copies(def); // tighten up val through non-useless copies value.map(ureg,valdef); // record improved reaching-def info @@ -594,8 +599,10 @@ j -= elide_copy( n, k, b, value, regnd, two_adr!=k ); // Unallocated Nodes define no registers - uint lidx = n2lidx(n); - if( !lidx ) continue; + uint lidx = _lrg_map.live_range_id(n); + if (!lidx) { + continue; + } // Update the register defined by this instruction OptoReg::Name nreg = lrgs(lidx).reg(); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/reg_split.cpp --- a/src/share/vm/opto/reg_split.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/reg_split.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -51,6 +51,15 @@ static const char out_of_nodes[] = "out of nodes during split"; +static bool contains_no_live_range_input(const Node* def) { + for (uint i = 1; i < def->req(); ++i) { + if (def->in(i) != NULL && def->in_RegMask(i).is_NotEmpty()) { + return false; + } + } + return true; +} + //------------------------------get_spillcopy_wide----------------------------- // Get a SpillCopy node with wide-enough masks. Use the 'wide-mask', the // wide ideal-register spill-mask if possible. If the 'wide-mask' does @@ -318,9 +327,13 @@ for( uint i = 1; i < def->req(); i++ ) { Node *in = def->in(i); // Check for single-def (LRG cannot redefined) - uint lidx = n2lidx(in); - if( lidx >= _maxlrg ) continue; // Value is a recent spill-copy - if (lrgs(lidx).is_singledef()) continue; + uint lidx = _lrg_map.live_range_id(in); + if (lidx >= _lrg_map.max_lrg_id()) { + continue; // Value is a recent spill-copy + } + if (lrgs(lidx).is_singledef()) { + continue; + } Block *b_def = _cfg._bbs[def->_idx]; int idx_def = b_def->find_node(def); @@ -344,26 +357,28 @@ if( spill->req() > 1 ) { for( uint i = 1; i < spill->req(); i++ ) { Node *in = spill->in(i); - uint lidx = Find_id(in); + uint lidx = _lrg_map.find_id(in); // Walk backwards thru spill copy node intermediates if (walkThru) { - while ( in->is_SpillCopy() && lidx >= _maxlrg ) { + while (in->is_SpillCopy() && lidx >= _lrg_map.max_lrg_id()) { in = in->in(1); - lidx = Find_id(in); + lidx = _lrg_map.find_id(in); } - if (lidx < _maxlrg && lrgs(lidx).is_multidef()) { + if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_multidef()) { // walkThru found a multidef LRG, which is unsafe to use, so // just keep the original def used in the clone. in = spill->in(i); - lidx = Find_id(in); + lidx = _lrg_map.find_id(in); } } - if( lidx < _maxlrg && lrgs(lidx).reg() >= LRG::SPILL_REG ) { + if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).reg() >= LRG::SPILL_REG) { Node *rdef = Reachblock[lrg2reach[lidx]]; - if( rdef ) spill->set_req(i,rdef); + if (rdef) { + spill->set_req(i, rdef); + } } } } @@ -382,7 +397,7 @@ #endif // See if the cloned def kills any flags, and copy those kills as well uint i = insidx+1; - if( clone_projs( b, i, def, spill, maxlrg ) ) { + if( clone_projs( b, i, def, spill, maxlrg) ) { // Adjust the point where we go hi-pressure if( i <= b->_ihrp_index ) b->_ihrp_index++; if( i <= b->_fhrp_index ) b->_fhrp_index++; @@ -424,17 +439,25 @@ //------------------------------prompt_use--------------------------------- // True if lidx is used before any real register is def'd in the block bool PhaseChaitin::prompt_use( Block *b, uint lidx ) { - if( lrgs(lidx)._was_spilled2 ) return false; + if (lrgs(lidx)._was_spilled2) { + return false; + } // Scan block for 1st use. for( uint i = 1; i <= b->end_idx(); i++ ) { Node *n = b->_nodes[i]; // Ignore PHI use, these can be up or down - if( n->is_Phi() ) continue; - for( uint j = 1; j < n->req(); j++ ) - if( Find_id(n->in(j)) == lidx ) + if (n->is_Phi()) { + continue; + } + for (uint j = 1; j < n->req(); j++) { + if (_lrg_map.find_id(n->in(j)) == lidx) { return true; // Found 1st use! - if( n->out_RegMask().is_NotEmpty() ) return false; + } + } + if (n->out_RegMask().is_NotEmpty()) { + return false; + } } return false; } @@ -464,23 +487,23 @@ bool u1, u2, u3; Block *b, *pred; PhiNode *phi; - GrowableArray lidxs(split_arena, _maxlrg, 0, 0); + GrowableArray lidxs(split_arena, maxlrg, 0, 0); // Array of counters to count splits per live range - GrowableArray splits(split_arena, _maxlrg, 0, 0); + GrowableArray splits(split_arena, maxlrg, 0, 0); #define NEW_SPLIT_ARRAY(type, size)\ (type*) split_arena->allocate_bytes((size) * sizeof(type)) //----------Setup Code---------- // Create a convenient mapping from lrg numbers to reaches/leaves indices - uint *lrg2reach = NEW_SPLIT_ARRAY( uint, _maxlrg ); + uint *lrg2reach = NEW_SPLIT_ARRAY(uint, maxlrg); // Keep track of DEFS & Phis for later passes defs = new Node_List(); phis = new Node_List(); // Gather info on which LRG's are spilling, and build maps - for( bidx = 1; bidx < _maxlrg; bidx++ ) { - if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) { + for (bidx = 1; bidx < maxlrg; bidx++) { + if (lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG) { assert(!lrgs(bidx).mask().is_AllStack(),"AllStack should color"); lrg2reach[bidx] = spill_cnt; spill_cnt++; @@ -629,7 +652,7 @@ break; } // must be looking at a phi - if( Find_id(n1) == lidxs.at(slidx) ) { + if (_lrg_map.find_id(n1) == lidxs.at(slidx)) { // found the necessary phi needs_phi = false; has_phi = true; @@ -651,11 +674,11 @@ Reachblock[slidx] = phi; // add node to block & node_to_block mapping - insert_proj( b, insidx++, phi, maxlrg++ ); + insert_proj(b, insidx++, phi, maxlrg++); non_phi++; // Reset new phi's mapping to be the spilling live range - _names.map(phi->_idx, lidx); - assert(Find_id(phi) == lidx,"Bad update on Union-Find mapping"); + _lrg_map.map(phi->_idx, lidx); + assert(_lrg_map.find_id(phi) == lidx, "Bad update on Union-Find mapping"); } // end if not found correct phi // Here you have either found or created the Phi, so record it assert(phi != NULL,"Must have a Phi Node here"); @@ -721,12 +744,12 @@ for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { Node *n = b->_nodes[insidx]; // Find the defining Node's live range index - uint defidx = Find_id(n); + uint defidx = _lrg_map.find_id(n); uint cnt = n->req(); - if( n->is_Phi() ) { + if (n->is_Phi()) { // Skip phi nodes after removing dead copies. - if( defidx < _maxlrg ) { + if (defidx < _lrg_map.max_lrg_id()) { // Check for useless Phis. These appear if we spill, then // coalesce away copies. Dont touch Phis in spilling live // ranges; they are busy getting modifed in this pass. @@ -744,8 +767,8 @@ } } assert( u, "at least 1 valid input expected" ); - if( i >= cnt ) { // Found one unique input - assert(Find_id(n) == Find_id(u), "should be the same lrg"); + if (i >= cnt) { // Found one unique input + assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg"); n->replace_by(u); // Then replace with unique input n->disconnect_inputs(NULL, C); b->_nodes.remove(insidx); @@ -793,16 +816,24 @@ while( insert_point > 0 ) { Node *n = b->_nodes[insert_point]; // Hit top of block? Quit going backwards - if( n->is_Phi() ) break; + if (n->is_Phi()) { + break; + } // Found a def? Better split after it. - if( n2lidx(n) == lidx ) break; + if (_lrg_map.live_range_id(n) == lidx) { + break; + } // Look for a use uint i; - for( i = 1; i < n->req(); i++ ) - if( n2lidx(n->in(i)) == lidx ) + for( i = 1; i < n->req(); i++ ) { + if (_lrg_map.live_range_id(n->in(i)) == lidx) { break; + } + } // Found a use? Better split after it. - if( i < n->req() ) break; + if (i < n->req()) { + break; + } insert_point--; } uint orig_eidx = b->end_idx(); @@ -812,8 +843,9 @@ return 0; } // Spill of NULL check mem op goes into the following block. - if (b->end_idx() > orig_eidx) + if (b->end_idx() > orig_eidx) { insidx++; + } } // This is a new DEF, so update UP UPblock[slidx] = false; @@ -832,13 +864,13 @@ } // end if crossing HRP Boundry // If the LRG index is oob, then this is a new spillcopy, skip it. - if( defidx >= _maxlrg ) { + if (defidx >= _lrg_map.max_lrg_id()) { continue; } LRG &deflrg = lrgs(defidx); uint copyidx = n->is_Copy(); // Remove coalesced copy from CFG - if( copyidx && defidx == n2lidx(n->in(copyidx)) ) { + if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) { n->replace_by( n->in(copyidx) ); n->set_req( copyidx, NULL ); b->_nodes.remove(insidx--); @@ -864,13 +896,13 @@ // If inpidx > old_last, then one of these new inputs is being // handled. Skip the derived part of the pair, but process // the base like any other input. - if( inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED ) { + if (inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED) { continue; // skip derived_debug added below } // Get lidx of input - uint useidx = Find_id(n->in(inpidx)); + uint useidx = _lrg_map.find_id(n->in(inpidx)); // Not a brand-new split, and it is a spill use - if( useidx < _maxlrg && lrgs(useidx).reg() >= LRG::SPILL_REG ) { + if (useidx < _lrg_map.max_lrg_id() && lrgs(useidx).reg() >= LRG::SPILL_REG) { // Check for valid reaching DEF slidx = lrg2reach[useidx]; Node *def = Reachblock[slidx]; @@ -886,7 +918,7 @@ if (def == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { return 0; } - _names.extend(def->_idx,0); + _lrg_map.extend(def->_idx, 0); _cfg._bbs.map(def->_idx,b); n->set_req(inpidx, def); continue; @@ -1186,10 +1218,10 @@ // ********** Split Left Over Mem-Mem Moves ********** // Check for mem-mem copies and split them now. Do not do this // to copies about to be spilled; they will be Split shortly. - if( copyidx ) { + if (copyidx) { Node *use = n->in(copyidx); - uint useidx = Find_id(use); - if( useidx < _maxlrg && // This is not a new split + uint useidx = _lrg_map.find_id(use); + if (useidx < _lrg_map.max_lrg_id() && // This is not a new split OptoReg::is_stack(deflrg.reg()) && deflrg.reg() < LRG::SPILL_REG ) { // And DEF is from stack LRG &uselrg = lrgs(useidx); @@ -1228,7 +1260,7 @@ uint member; IndexSetIterator isi(liveout); while ((member = isi.next()) != 0) { - assert(defidx != Find_const(member), "Live out member has not been compressed"); + assert(defidx != _lrg_map.find_const(member), "Live out member has not been compressed"); } #endif Reachblock[slidx] = NULL; @@ -1261,7 +1293,7 @@ assert(phi->is_Phi(),"This list must only contain Phi Nodes"); Block *b = _cfg._bbs[phi->_idx]; // Grab the live range number - uint lidx = Find_id(phi); + uint lidx = _lrg_map.find_id(phi); uint slidx = lrg2reach[lidx]; // Update node to lidx map new_lrg(phi, maxlrg++); @@ -1289,18 +1321,20 @@ Node *def = Reaches[pidx][slidx]; assert( def, "must have reaching def" ); // If input up/down sense and reg-pressure DISagree - if( def->rematerialize() ) { + if (def->rematerialize() && contains_no_live_range_input(def)) { // Place the rematerialized node above any MSCs created during // phi node splitting. end_idx points at the insertion point // so look at the node before it. int insert = pred->end_idx(); while (insert >= 1 && pred->_nodes[insert - 1]->is_SpillCopy() && - Find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) { + _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) { insert--; } - def = split_Rematerialize( def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false ); - if( !def ) return 0; // Bail out + def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false); + if (!def) { + return 0; // Bail out + } } // Update the Phi's input edge array phi->set_req(i,def); @@ -1316,7 +1350,7 @@ } // End for all inputs to the Phi } // End for all Phi Nodes // Update _maxlrg to save Union asserts - _maxlrg = maxlrg; + _lrg_map.set_max_lrg_id(maxlrg); //----------PASS 3---------- @@ -1328,47 +1362,51 @@ for( uint i = 1; i < phi->req(); i++ ) { // Grab the input node Node *n = phi->in(i); - assert( n, "" ); - uint lidx = Find(n); - uint pidx = Find(phi); - if( lidx < pidx ) + assert(n, "node should exist"); + uint lidx = _lrg_map.find(n); + uint pidx = _lrg_map.find(phi); + if (lidx < pidx) { Union(n, phi); - else if( lidx > pidx ) + } + else if(lidx > pidx) { Union(phi, n); + } } // End for all inputs to the Phi Node } // End for all Phi Nodes // Now union all two address instructions - for( insidx = 0; insidx < defs->size(); insidx++ ) { + for (insidx = 0; insidx < defs->size(); insidx++) { // Grab the def n1 = defs->at(insidx); // Set new lidx for DEF & handle 2-addr instructions - if( n1->is_Mach() && ((twoidx = n1->as_Mach()->two_adr()) != 0) ) { - assert( Find(n1->in(twoidx)) < maxlrg,"Assigning bad live range index"); + if (n1->is_Mach() && ((twoidx = n1->as_Mach()->two_adr()) != 0)) { + assert(_lrg_map.find(n1->in(twoidx)) < maxlrg,"Assigning bad live range index"); // Union the input and output live ranges - uint lr1 = Find(n1); - uint lr2 = Find(n1->in(twoidx)); - if( lr1 < lr2 ) + uint lr1 = _lrg_map.find(n1); + uint lr2 = _lrg_map.find(n1->in(twoidx)); + if (lr1 < lr2) { Union(n1, n1->in(twoidx)); - else if( lr1 > lr2 ) + } + else if (lr1 > lr2) { Union(n1->in(twoidx), n1); + } } // End if two address } // End for all defs // DEBUG #ifdef ASSERT // Validate all live range index assignments - for( bidx = 0; bidx < _cfg._num_blocks; bidx++ ) { + for (bidx = 0; bidx < _cfg._num_blocks; bidx++) { b = _cfg._blocks[bidx]; - for( insidx = 0; insidx <= b->end_idx(); insidx++ ) { + for (insidx = 0; insidx <= b->end_idx(); insidx++) { Node *n = b->_nodes[insidx]; - uint defidx = Find(n); - assert(defidx < _maxlrg,"Bad live range index in Split"); + uint defidx = _lrg_map.find(n); + assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split"); assert(defidx < maxlrg,"Bad live range index in Split"); } } // Issue a warning if splitting made no progress int noprogress = 0; - for( slidx = 0; slidx < spill_cnt; slidx++ ) { - if( PrintOpto && WizardMode && splits.at(slidx) == 0 ) { + for (slidx = 0; slidx < spill_cnt; slidx++) { + if (PrintOpto && WizardMode && splits.at(slidx) == 0) { tty->print_cr("Failed to split live range %d", lidxs.at(slidx)); //BREAKPOINT; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/regalloc.hpp --- a/src/share/vm/opto/regalloc.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/regalloc.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -113,7 +113,7 @@ OptoReg::Name offset2reg( int stk_offset ) const; // Get the register encoding associated with the Node - int get_encode( const Node *n ) const { + int get_encode(const Node *n) const { assert( n->_idx < _node_regs_max_index, "Exceeded _node_regs array"); OptoReg::Name first = _node_regs[n->_idx].first(); OptoReg::Name second = _node_regs[n->_idx].second(); @@ -122,15 +122,6 @@ return Matcher::_regEncode[first]; } - // Platform dependent hook for actions prior to allocation - void pd_preallocate_hook(); - -#ifdef ASSERT - // Platform dependent hook for verification after allocation. Will - // only get called when compiling with asserts. - void pd_postallocate_verify_hook(); -#endif - #ifndef PRODUCT static int _total_framesize; static int _max_framesize; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/runtime.cpp --- a/src/share/vm/opto/runtime.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/runtime.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -134,7 +134,7 @@ assert(caller.is_compiled_frame(), "not being called from compiled like code"); return true; } -#endif +#endif // ASSERT #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, save_arg_regs, return_pc) \ diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/subnode.cpp --- a/src/share/vm/opto/subnode.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/subnode.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -863,10 +863,11 @@ const TypePtr *r1 = t2->make_ptr(); // Undefined inputs makes for an undefined result - if( TypePtr::above_centerline(r0->_ptr) || - TypePtr::above_centerline(r1->_ptr) ) + if ((r0 == NULL) || (r1 == NULL) || + TypePtr::above_centerline(r0->_ptr) || + TypePtr::above_centerline(r1->_ptr)) { return Type::TOP; - + } if (r0 == r1 && r0->singleton()) { // Equal pointer constants (klasses, nulls, etc.) return TypeInt::CC_EQ; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/type.cpp --- a/src/share/vm/opto/type.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/type.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -2372,7 +2372,12 @@ _klass_is_exact(xk), _is_ptr_to_narrowoop(false), _is_ptr_to_narrowklass(false), + _is_ptr_to_boxed_value(false), _instance_id(instance_id) { + if (Compile::current()->eliminate_boxing() && (t == InstPtr) && + (offset > 0) && xk && (k != 0) && k->is_instance_klass()) { + _is_ptr_to_boxed_value = k->as_instance_klass()->is_boxed_value_offset(offset); + } #ifdef _LP64 if (_offset != 0) { if (_offset == oopDesc::klass_offset_in_bytes()) { @@ -2613,44 +2618,50 @@ //------------------------------make_from_constant----------------------------- // Make a java pointer from an oop constant -const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o, bool require_constant) { - assert(!o->is_null_object(), "null object not yet handled here."); - ciKlass* klass = o->klass(); - if (klass->is_instance_klass()) { - // Element is an instance - if (require_constant) { - if (!o->can_be_constant()) return NULL; - } else if (!o->should_be_constant()) { - return TypeInstPtr::make(TypePtr::NotNull, klass, true, NULL, 0); - } - return TypeInstPtr::make(o); - } else if (klass->is_obj_array_klass()) { - // Element is an object array. Recursively call ourself. - const Type *etype = +const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o, + bool require_constant, + bool is_autobox_cache) { + assert(!o->is_null_object(), "null object not yet handled here."); + ciKlass* klass = o->klass(); + if (klass->is_instance_klass()) { + // Element is an instance + if (require_constant) { + if (!o->can_be_constant()) return NULL; + } else if (!o->should_be_constant()) { + return TypeInstPtr::make(TypePtr::NotNull, klass, true, NULL, 0); + } + return TypeInstPtr::make(o); + } else if (klass->is_obj_array_klass()) { + // Element is an object array. Recursively call ourself. + const TypeOopPtr *etype = TypeOopPtr::make_from_klass_raw(klass->as_obj_array_klass()->element_klass()); - const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length())); - // We used to pass NotNull in here, asserting that the sub-arrays - // are all not-null. This is not true in generally, as code can - // slam NULLs down in the subarrays. - if (require_constant) { - if (!o->can_be_constant()) return NULL; - } else if (!o->should_be_constant()) { - return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0); - } - const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0); + if (is_autobox_cache) { + // The pointers in the autobox arrays are always non-null. + etype = etype->cast_to_ptr_type(TypePtr::NotNull)->is_oopptr(); + } + const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length())); + // We used to pass NotNull in here, asserting that the sub-arrays + // are all not-null. This is not true in generally, as code can + // slam NULLs down in the subarrays. + if (require_constant) { + if (!o->can_be_constant()) return NULL; + } else if (!o->should_be_constant()) { + return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0); + } + const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0, InstanceBot, is_autobox_cache); return arr; - } else if (klass->is_type_array_klass()) { - // Element is an typeArray + } else if (klass->is_type_array_klass()) { + // Element is an typeArray const Type* etype = (Type*)get_const_basic_type(klass->as_type_array_klass()->element_type()); - const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length())); - // We used to pass NotNull in here, asserting that the array pointer - // is not-null. That was not true in general. - if (require_constant) { - if (!o->can_be_constant()) return NULL; - } else if (!o->should_be_constant()) { - return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0); - } + const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length())); + // We used to pass NotNull in here, asserting that the array pointer + // is not-null. That was not true in general. + if (require_constant) { + if (!o->can_be_constant()) return NULL; + } else if (!o->should_be_constant()) { + return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0); + } const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0); return arr; } @@ -2856,6 +2867,28 @@ return result; } +/** + * Create constant type for a constant boxed value + */ +const Type* TypeInstPtr::get_const_boxed_value() const { + assert(is_ptr_to_boxed_value(), "should be called only for boxed value"); + assert((const_oop() != NULL), "should be called only for constant object"); + ciConstant constant = const_oop()->as_instance()->field_value_by_offset(offset()); + BasicType bt = constant.basic_type(); + switch (bt) { + case T_BOOLEAN: return TypeInt::make(constant.as_boolean()); + case T_INT: return TypeInt::make(constant.as_int()); + case T_CHAR: return TypeInt::make(constant.as_char()); + case T_BYTE: return TypeInt::make(constant.as_byte()); + case T_SHORT: return TypeInt::make(constant.as_short()); + case T_FLOAT: return TypeF::make(constant.as_float()); + case T_DOUBLE: return TypeD::make(constant.as_double()); + case T_LONG: return TypeLong::make(constant.as_long()); + default: break; + } + fatal(err_msg_res("Invalid boxed value type '%s'", type2name(bt))); + return NULL; +} //------------------------------cast_to_ptr_type------------------------------- const Type *TypeInstPtr::cast_to_ptr_type(PTR ptr) const { @@ -3330,18 +3363,18 @@ if (!xk) xk = ary->ary_must_be_exact(); assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed"); if (!UseExactTypes) xk = (ptr == Constant); - return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id))->hashcons(); + return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id, false))->hashcons(); } //------------------------------make------------------------------------------- -const TypeAryPtr *TypeAryPtr::make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id ) { +const TypeAryPtr *TypeAryPtr::make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, bool is_autobox_cache) { assert(!(k == NULL && ary->_elem->isa_int()), "integral arrays must be pre-equipped with a class"); assert( (ptr==Constant && o) || (ptr!=Constant && !o), "" ); if (!xk) xk = (o != NULL) || ary->ary_must_be_exact(); assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed"); if (!UseExactTypes) xk = (ptr == Constant); - return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id))->hashcons(); + return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id, is_autobox_cache))->hashcons(); } //------------------------------cast_to_ptr_type------------------------------- @@ -3397,8 +3430,20 @@ jint max_hi = max_array_length(elem()->basic_type()); //if (index_not_size) --max_hi; // type of a valid array index, FTR bool chg = false; - if (lo < min_lo) { lo = min_lo; chg = true; } - if (hi > max_hi) { hi = max_hi; chg = true; } + if (lo < min_lo) { + lo = min_lo; + if (size->is_con()) { + hi = lo; + } + chg = true; + } + if (hi > max_hi) { + hi = max_hi; + if (size->is_con()) { + lo = hi; + } + chg = true; + } // Negative length arrays will produce weird intermediate dead fast-path code if (lo > hi) return TypeInt::ZERO; @@ -3630,7 +3675,7 @@ //------------------------------xdual------------------------------------------ // Dual: compute field-by-field dual const Type *TypeAryPtr::xdual() const { - return new TypeAryPtr( dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance_id() ); + return new TypeAryPtr( dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance_id(), is_autobox_cache() ); } //----------------------interface_vs_oop--------------------------------------- diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/opto/type.hpp --- a/src/share/vm/opto/type.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/opto/type.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -234,6 +234,9 @@ bool is_ptr_to_narrowoop() const; bool is_ptr_to_narrowklass() const; + bool is_ptr_to_boxing_obj() const; + + // Convenience access float getf() const; double getd() const; @@ -794,6 +797,7 @@ bool _klass_is_exact; bool _is_ptr_to_narrowoop; bool _is_ptr_to_narrowklass; + bool _is_ptr_to_boxed_value; // If not InstanceTop or InstanceBot, indicates that this is // a particular instance of this type which is distinct. @@ -826,7 +830,9 @@ // If the object cannot be rendered as a constant, // may return a non-singleton type. // If require_constant, produce a NULL if a singleton is not possible. - static const TypeOopPtr* make_from_constant(ciObject* o, bool require_constant = false); + static const TypeOopPtr* make_from_constant(ciObject* o, + bool require_constant = false, + bool not_null_elements = false); // Make a generic (unclassed) pointer to an oop. static const TypeOopPtr* make(PTR ptr, int offset, int instance_id); @@ -839,7 +845,7 @@ // compressed oop references. bool is_ptr_to_narrowoop_nv() const { return _is_ptr_to_narrowoop; } bool is_ptr_to_narrowklass_nv() const { return _is_ptr_to_narrowklass; } - + bool is_ptr_to_boxed_value() const { return _is_ptr_to_boxed_value; } bool is_known_instance() const { return _instance_id > 0; } int instance_id() const { return _instance_id; } bool is_known_instance_field() const { return is_known_instance() && _offset >= 0; } @@ -912,6 +918,9 @@ // Make a pointer to an oop. static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot ); + /** Create constant type for a constant boxed value */ + const Type* get_const_boxed_value() const; + // If this is a java.lang.Class constant, return the type for it or NULL. // Pass to Type::get_const_type to turn it to a type, which will usually // be a TypeInstPtr, but may also be a TypeInt::INT for int.class, etc. @@ -943,7 +952,12 @@ //------------------------------TypeAryPtr------------------------------------- // Class of Java array pointers class TypeAryPtr : public TypeOopPtr { - TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id ) : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id), _ary(ary) { + TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, + int offset, int instance_id, bool is_autobox_cache ) + : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id), + _ary(ary), + _is_autobox_cache(is_autobox_cache) + { #ifdef ASSERT if (k != NULL) { // Verify that specified klass and TypeAryPtr::klass() follow the same rules. @@ -964,6 +978,7 @@ virtual bool eq( const Type *t ) const; virtual int hash() const; // Type specific hashing const TypeAry *_ary; // Array we point into + const bool _is_autobox_cache; ciKlass* compute_klass(DEBUG_ONLY(bool verify = false)) const; @@ -974,9 +989,11 @@ const Type* elem() const { return _ary->_elem; } const TypeInt* size() const { return _ary->_size; } + bool is_autobox_cache() const { return _is_autobox_cache; } + static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot); // Constant pointer to array - static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot); + static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, bool is_autobox_cache = false); // Return a 'ptr' version of this type virtual const Type *cast_to_ptr_type(PTR ptr) const; @@ -1504,6 +1521,13 @@ return false; } +inline bool Type::is_ptr_to_boxing_obj() const { + const TypeInstPtr* tp = isa_instptr(); + return (tp != NULL) && (tp->offset() == 0) && + tp->klass()->is_instance_klass() && + tp->klass()->as_instance_klass()->is_box_klass(); +} + // =============================================================== // Things that need to be 64-bits in the 64-bit build but diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/precompiled/precompiled.hpp --- a/src/share/vm/precompiled/precompiled.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/precompiled/precompiled.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -26,7 +26,6 @@ // or if the user passes USE_PRECOMPILED_HEADER=0 to the makefiles. #ifndef DONT_USE_PRECOMPILED_HEADER - # include "asm/assembler.hpp" # include "asm/assembler.inline.hpp" # include "asm/codeBuffer.hpp" diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/forte.cpp --- a/src/share/vm/prims/forte.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/forte.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -35,6 +35,19 @@ #include "runtime/vframe.hpp" #include "runtime/vframeArray.hpp" +// call frame copied from old .h file and renamed +typedef struct { + jint lineno; // line number in the source file + jmethodID method_id; // method executed in this frame +} ASGCT_CallFrame; + +// call trace copied from old .h file and renamed +typedef struct { + JNIEnv *env_id; // Env where trace was recorded + jint num_frames; // number of frames in this trace + ASGCT_CallFrame *frames; // frames +} ASGCT_CallTrace; + // These name match the names reported by the forte quality kit enum { ticks_no_Java_frame = 0, @@ -50,6 +63,8 @@ ticks_safepoint = -10 }; +#if INCLUDE_JVMTI + //------------------------------------------------------- // Native interfaces for use by Forte tools. @@ -360,20 +375,6 @@ } - -// call frame copied from old .h file and renamed -typedef struct { - jint lineno; // line number in the source file - jmethodID method_id; // method executed in this frame -} ASGCT_CallFrame; - -// call trace copied from old .h file and renamed -typedef struct { - JNIEnv *env_id; // Env where trace was recorded - jint num_frames; // number of frames in this trace - ASGCT_CallFrame *frames; // frames -} ASGCT_CallTrace; - static void forte_fill_call_trace_given_top(JavaThread* thd, ASGCT_CallTrace* trace, int depth, @@ -634,3 +635,12 @@ pointer_delta(end, start, sizeof(jbyte)), 0, NULL); #endif // !_WINDOWS && !IA64 } + +#else // INCLUDE_JVMTI +extern "C" { + JNIEXPORT + void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext) { + trace->num_frames = ticks_no_class_load; // -1 + } +} +#endif // INCLUDE_JVMTI diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/jni.cpp --- a/src/share/vm/prims/jni.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/jni.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -77,7 +77,6 @@ #include "runtime/vm_operations.hpp" #include "services/runtimeService.hpp" #include "trace/tracing.hpp" -#include "trace/traceEventTypes.hpp" #include "utilities/defaultStream.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" @@ -5017,7 +5016,11 @@ #ifndef PRODUCT +#include "gc_implementation/shared/gcTimer.hpp" #include "gc_interface/collectedHeap.hpp" +#if INCLUDE_ALL_GCS +#include "gc_implementation/g1/heapRegionRemSet.hpp" +#endif #include "utilities/quickSort.hpp" #if INCLUDE_VM_STRUCTS #include "runtime/vmStructs.hpp" @@ -5031,6 +5034,7 @@ if (ExecuteInternalVMTests) { tty->print_cr("Running internal VM tests"); run_unit_test(GlobalDefinitions::test_globals()); + run_unit_test(GCTimerAllTest::all()); run_unit_test(arrayOopDesc::test_max_array_length()); run_unit_test(CollectedHeap::test_is_in()); run_unit_test(QuickSort::test_quick_sort()); @@ -5038,6 +5042,9 @@ #if INCLUDE_VM_STRUCTS run_unit_test(VMStructs::test()); #endif +#if INCLUDE_ALL_GCS + run_unit_test(HeapRegionRemSet::test_prt()); +#endif tty->print_cr("All internal VM tests passed"); } } @@ -5133,9 +5140,11 @@ JvmtiExport::post_thread_start(thread); } - EVENT_BEGIN(TraceEventThreadStart, event); - EVENT_COMMIT(event, - EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj()))); + EventThreadStart event; + if (event.should_commit()) { + event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj())); + event.commit(); + } // Check if we should compile all classes on bootclasspath NOT_PRODUCT(if (CompileTheWorld) ClassLoader::compile_the_world();) @@ -5336,9 +5345,11 @@ JvmtiExport::post_thread_start(thread); } - EVENT_BEGIN(TraceEventThreadStart, event); - EVENT_COMMIT(event, - EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj()))); + EventThreadStart event; + if (event.should_commit()) { + event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj())); + event.commit(); + } *(JNIEnv**)penv = thread->jni_environment(); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/jvm.cpp --- a/src/share/vm/prims/jvm.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/jvm.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -59,6 +59,7 @@ #include "services/attachListener.hpp" #include "services/management.hpp" #include "services/threadService.hpp" +#include "trace/tracing.hpp" #include "utilities/copy.hpp" #include "utilities/defaultStream.hpp" #include "utilities/dtrace.hpp" @@ -1074,11 +1075,7 @@ return NULL; } - Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls)); - objArrayOop signers = NULL; - if (k->oop_is_instance()) { - signers = InstanceKlass::cast(k)->signers(); - } + objArrayOop signers = java_lang_Class::signers(JNIHandles::resolve_non_null(cls)); // If there are no signers set in the class, or if the class // is an array, return NULL. @@ -1104,7 +1101,7 @@ // be called with an array. Only the bootstrap loader creates arrays. Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls)); if (k->oop_is_instance()) { - InstanceKlass::cast(k)->set_signers(objArrayOop(JNIHandles::resolve(signers))); + java_lang_Class::set_signers(k->java_mirror(), objArrayOop(JNIHandles::resolve(signers))); } } JVM_END @@ -1121,8 +1118,8 @@ return NULL; } - Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve(cls)); - return (jobject) JNIHandles::make_local(env, k->protection_domain()); + oop pd = java_lang_Class::protection_domain(JNIHandles::resolve(cls)); + return (jobject) JNIHandles::make_local(env, pd); JVM_END @@ -1141,7 +1138,7 @@ if (k->oop_is_instance()) { oop pd = JNIHandles::resolve(protection_domain); assert(pd == NULL || pd->is_oop(), "just checking"); - InstanceKlass::cast(k)->set_protection_domain(pd); + java_lang_Class::set_protection_domain(k->java_mirror(), pd); } } JVM_END @@ -1712,7 +1709,7 @@ for (int i = 0; i < num_params; i++) { MethodParametersElement* params = mh->method_parameters_start(); // For a 0 index, give a NULL symbol - Symbol* const sym = 0 != params[i].name_cp_index ? + Symbol* sym = 0 != params[i].name_cp_index ? mh->constants()->symbol_at(params[i].name_cp_index) : NULL; int flags = params[i].flags; oop param = Reflection::new_parameter(reflected_method, i, sym, @@ -3005,6 +3002,8 @@ millis); #endif /* USDT2 */ + EventThreadSleep event; + if (millis == 0) { // When ConvertSleepToYield is on, this matches the classic VM implementation of // JVM_Sleep. Critical for similar threading behaviour (Win32) @@ -3025,6 +3024,10 @@ // An asynchronous exception (e.g., ThreadDeathException) could have been thrown on // us while we were sleeping. We do not overwrite those. if (!HAS_PENDING_EXCEPTION) { + if (event.should_commit()) { + event.set_time(millis); + event.commit(); + } #ifndef USDT2 HS_DTRACE_PROBE1(hotspot, thread__sleep__end,1); #else /* USDT2 */ @@ -3038,6 +3041,10 @@ } thread->osthread()->set_state(old_state); } + if (event.should_commit()) { + event.set_time(millis); + event.commit(); + } #ifndef USDT2 HS_DTRACE_PROBE1(hotspot, thread__sleep__end,0); #else /* USDT2 */ diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/jvmtiClassFileReconstituter.cpp --- a/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -341,6 +341,44 @@ memcpy(writeable_address(length), annos->adr_at(0), length); } +// BootstrapMethods_attribute { +// u2 attribute_name_index; +// u4 attribute_length; +// u2 num_bootstrap_methods; +// { u2 bootstrap_method_ref; +// u2 num_bootstrap_arguments; +// u2 bootstrap_arguments[num_bootstrap_arguments]; +// } bootstrap_methods[num_bootstrap_methods]; +// } +void JvmtiClassFileReconstituter::write_bootstrapmethod_attribute() { + Array* operands = cpool()->operands(); + write_attribute_name_index("BootstrapMethods"); + int num_bootstrap_methods = ConstantPool::operand_array_length(operands); + + // calculate length of attribute + int length = sizeof(u2); // num_bootstrap_methods + for (int n = 0; n < num_bootstrap_methods; n++) { + u2 num_bootstrap_arguments = cpool()->operand_argument_count_at(n); + length += sizeof(u2); // bootstrap_method_ref + length += sizeof(u2); // num_bootstrap_arguments + length += sizeof(u2) * num_bootstrap_arguments; // bootstrap_arguments[num_bootstrap_arguments] + } + write_u4(length); + + // write attribute + write_u2(num_bootstrap_methods); + for (int n = 0; n < num_bootstrap_methods; n++) { + u2 bootstrap_method_ref = cpool()->operand_bootstrap_method_ref_index_at(n); + u2 num_bootstrap_arguments = cpool()->operand_argument_count_at(n); + write_u2(bootstrap_method_ref); + write_u2(num_bootstrap_arguments); + for (int arg = 0; arg < num_bootstrap_arguments; arg++) { + u2 bootstrap_argument = cpool()->operand_argument_index_at(n, arg); + write_u2(bootstrap_argument); + } + } +} + // Write InnerClasses attribute // JVMSpec| InnerClasses_attribute { @@ -513,6 +551,11 @@ AnnotationArray* param_anno = method->parameter_annotations(); AnnotationArray* default_anno = method->annotation_default(); + // skip generated default interface methods + if (method->is_overpass()) { + return; + } + write_u2(access_flags.get_flags() & JVM_RECOGNIZED_METHOD_MODIFIERS); write_u2(const_method->name_index()); write_u2(const_method->signature_index()); @@ -592,6 +635,9 @@ if (anno != NULL) { ++attr_count; // has RuntimeVisibleAnnotations attribute } + if (cpool()->operands() != NULL) { + ++attr_count; + } write_u2(attr_count); @@ -610,6 +656,9 @@ if (anno != NULL) { write_annotations_attribute("RuntimeVisibleAnnotations", anno); } + if (cpool()->operands() != NULL) { + write_bootstrapmethod_attribute(); + } } // Write the method information portion of ClassFile structure @@ -619,8 +668,19 @@ HandleMark hm(thread()); Array* methods = ikh()->methods(); int num_methods = methods->length(); + int num_overpass = 0; - write_u2(num_methods); + // count the generated default interface methods + // these will not be re-created by write_method_info + // and should not be included in the total count + for (int index = 0; index < num_methods; index++) { + Method* method = methods->at(index); + if (method->is_overpass()) { + num_overpass++; + } + } + + write_u2(num_methods - num_overpass); if (JvmtiExport::can_maintain_original_method_order()) { int index; int original_index; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/jvmtiClassFileReconstituter.hpp --- a/src/share/vm/prims/jvmtiClassFileReconstituter.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/jvmtiClassFileReconstituter.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -127,6 +127,7 @@ void write_signature_attribute(u2 generic_signaure_index); void write_attribute_name_index(const char* name); void write_annotations_attribute(const char* attr_name, AnnotationArray* annos); + void write_bootstrapmethod_attribute(); address writeable_address(size_t size); void write_u1(u1 x); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/jvmtiEnv.cpp --- a/src/share/vm/prims/jvmtiEnv.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/jvmtiEnv.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -259,7 +259,8 @@ // bytes to the InstanceKlass here because they have not been // validated and we're not at a safepoint. constantPoolHandle constants(current_thread, ikh->constants()); - MonitorLockerEx ml(constants->lock()); // lock constant pool while we query it + oop cplock = constants->lock(); + ObjectLocker ol(cplock, current_thread, cplock != NULL); // lock constant pool while we query it JvmtiClassFileReconstituter reconstituter(ikh); if (reconstituter.get_error() != JVMTI_ERROR_NONE) { @@ -2417,7 +2418,8 @@ instanceKlassHandle ikh(thread, k_oop); constantPoolHandle constants(thread, ikh->constants()); - MonitorLockerEx ml(constants->lock()); // lock constant pool while we query it + oop cplock = constants->lock(); + ObjectLocker ol(cplock, thread, cplock != NULL); // lock constant pool while we query it JvmtiConstantPoolReconstituter reconstituter(ikh); if (reconstituter.get_error() != JVMTI_ERROR_NONE) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/jvmtiExport.cpp --- a/src/share/vm/prims/jvmtiExport.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/jvmtiExport.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -619,6 +619,9 @@ // data has been changed by the new retransformable agent // and it hasn't already been cached, cache it *_cached_data_ptr = (unsigned char *)os::malloc(_curr_len, mtInternal); + if (*_cached_data_ptr == NULL) { + vm_exit_out_of_memory(_curr_len, OOM_MALLOC_ERROR, "unable to allocate cached copy of original class bytes"); + } memcpy(*_cached_data_ptr, _curr_data, _curr_len); *_cached_length_ptr = _curr_len; } @@ -1621,15 +1624,19 @@ } } + assert(sig_type != '[', "array should have sig_type == 'L'"); + bool handle_created = false; + // convert oop to JNI handle. - if (sig_type == 'L' || sig_type == '[') { + if (sig_type == 'L') { + handle_created = true; value->l = (jobject)JNIHandles::make_local(thread, (oop)value->l); } post_field_modification(thread, method, location, field_klass, object, field, sig_type, value); // Destroy the JNI handle allocated above. - if (sig_type == 'L') { + if (handle_created) { JNIHandles::destroy_local(value->l); } } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/jvmtiGen.java --- a/src/share/vm/prims/jvmtiGen.java Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/jvmtiGen.java Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,6 @@ import org.xml.sax.SAXParseException; import org.w3c.dom.Document; import org.w3c.dom.DOMException; - // For write operation import javax.xml.transform.Transformer; import javax.xml.transform.TransformerException; @@ -129,6 +128,7 @@ factory.setNamespaceAware(true); factory.setValidating(true); + factory.setXIncludeAware(true); try { File datafile = new File(inFileName); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/jvmtiImpl.cpp --- a/src/share/vm/prims/jvmtiImpl.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/jvmtiImpl.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -360,19 +360,14 @@ case CLEAR_BREAKPOINT: _breakpoints->clear_at_safepoint(*_bp); break; - case CLEAR_ALL_BREAKPOINT: - _breakpoints->clearall_at_safepoint(); - break; default: assert(false, "Unknown operation"); } } void VM_ChangeBreakpoints::oops_do(OopClosure* f) { - // This operation keeps breakpoints alive - if (_breakpoints != NULL) { - _breakpoints->oops_do(f); - } + // The JvmtiBreakpoints in _breakpoints will be visited via + // JvmtiExport::oops_do. if (_bp != NULL) { _bp->oops_do(f); } @@ -433,23 +428,13 @@ } } -void JvmtiBreakpoints::clearall_at_safepoint() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - - int len = _bps.length(); - for (int i=0; iinvoke_dynamic_bootstrap_specifier_index(scratch_i); - - int ref_i = scratch_cp->invoke_dynamic_name_and_type_ref_index_at(scratch_i); - int new_ref_i = find_or_append_indirect_entry(scratch_cp, ref_i, merge_cp_p, + // Index of the bootstrap specifier in the operands array + int old_bs_i = scratch_cp->invoke_dynamic_bootstrap_specifier_index(scratch_i); + int new_bs_i = find_or_append_operand(scratch_cp, old_bs_i, merge_cp_p, + merge_cp_length_p, THREAD); + // The bootstrap method NameAndType_info index + int old_ref_i = scratch_cp->invoke_dynamic_name_and_type_ref_index_at(scratch_i); + int new_ref_i = find_or_append_indirect_entry(scratch_cp, old_ref_i, merge_cp_p, merge_cp_length_p, THREAD); - if (new_ref_i != ref_i) { + if (new_bs_i != old_bs_i) { RC_TRACE(0x00080000, - ("InvokeDynamic entry@%d name_and_type ref_index change: %d to %d", - *merge_cp_length_p, ref_i, new_ref_i)); + ("InvokeDynamic entry@%d bootstrap_method_attr_index change: %d to %d", + *merge_cp_length_p, old_bs_i, new_bs_i)); } - - (*merge_cp_p)->invoke_dynamic_at_put(*merge_cp_length_p, bss_idx, new_ref_i); + if (new_ref_i != old_ref_i) { + RC_TRACE(0x00080000, + ("InvokeDynamic entry@%d name_and_type_index change: %d to %d", + *merge_cp_length_p, old_ref_i, new_ref_i)); + } + + (*merge_cp_p)->invoke_dynamic_at_put(*merge_cp_length_p, new_bs_i, new_ref_i); if (scratch_i != *merge_cp_length_p) { // The new entry in *merge_cp_p is at a different index than // the new entry in scratch_cp so we need to map the index values. @@ -492,6 +499,105 @@ } // end find_or_append_indirect_entry() +// Append a bootstrap specifier into the merge_cp operands that is semantically equal +// to the scratch_cp operands bootstrap specifier passed by the old_bs_i index. +// Recursively append new merge_cp entries referenced by the new bootstrap specifier. +void VM_RedefineClasses::append_operand(constantPoolHandle scratch_cp, int old_bs_i, + constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS) { + + int old_ref_i = scratch_cp->operand_bootstrap_method_ref_index_at(old_bs_i); + int new_ref_i = find_or_append_indirect_entry(scratch_cp, old_ref_i, merge_cp_p, + merge_cp_length_p, THREAD); + if (new_ref_i != old_ref_i) { + RC_TRACE(0x00080000, + ("operands entry@%d bootstrap method ref_index change: %d to %d", + _operands_cur_length, old_ref_i, new_ref_i)); + } + + Array* merge_ops = (*merge_cp_p)->operands(); + int new_bs_i = _operands_cur_length; + // We have _operands_cur_length == 0 when the merge_cp operands is empty yet. + // However, the operand_offset_at(0) was set in the extend_operands() call. + int new_base = (new_bs_i == 0) ? (*merge_cp_p)->operand_offset_at(0) + : (*merge_cp_p)->operand_next_offset_at(new_bs_i - 1); + int argc = scratch_cp->operand_argument_count_at(old_bs_i); + + ConstantPool::operand_offset_at_put(merge_ops, _operands_cur_length, new_base); + merge_ops->at_put(new_base++, new_ref_i); + merge_ops->at_put(new_base++, argc); + + for (int i = 0; i < argc; i++) { + int old_arg_ref_i = scratch_cp->operand_argument_index_at(old_bs_i, i); + int new_arg_ref_i = find_or_append_indirect_entry(scratch_cp, old_arg_ref_i, merge_cp_p, + merge_cp_length_p, THREAD); + merge_ops->at_put(new_base++, new_arg_ref_i); + if (new_arg_ref_i != old_arg_ref_i) { + RC_TRACE(0x00080000, + ("operands entry@%d bootstrap method argument ref_index change: %d to %d", + _operands_cur_length, old_arg_ref_i, new_arg_ref_i)); + } + } + if (old_bs_i != _operands_cur_length) { + // The bootstrap specifier in *merge_cp_p is at a different index than + // that in scratch_cp so we need to map the index values. + map_operand_index(old_bs_i, new_bs_i); + } + _operands_cur_length++; +} // end append_operand() + + +int VM_RedefineClasses::find_or_append_operand(constantPoolHandle scratch_cp, + int old_bs_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS) { + + int new_bs_i = old_bs_i; // bootstrap specifier index + bool match = (old_bs_i < _operands_cur_length) && + scratch_cp->compare_operand_to(old_bs_i, *merge_cp_p, old_bs_i, THREAD); + + if (!match) { + // forward reference in *merge_cp_p or not a direct match + int found_i = scratch_cp->find_matching_operand(old_bs_i, *merge_cp_p, + _operands_cur_length, THREAD); + if (found_i != -1) { + guarantee(found_i != old_bs_i, "compare_operand_to() and find_matching_operand() disagree"); + // found a matching operand somewhere else in *merge_cp_p so just need a mapping + new_bs_i = found_i; + map_operand_index(old_bs_i, found_i); + } else { + // no match found so we have to append this bootstrap specifier to *merge_cp_p + append_operand(scratch_cp, old_bs_i, merge_cp_p, merge_cp_length_p, THREAD); + new_bs_i = _operands_cur_length - 1; + } + } + return new_bs_i; +} // end find_or_append_operand() + + +void VM_RedefineClasses::finalize_operands_merge(constantPoolHandle merge_cp, TRAPS) { + if (merge_cp->operands() == NULL) { + return; + } + // Shrink the merge_cp operands + merge_cp->shrink_operands(_operands_cur_length, CHECK); + + if (RC_TRACE_ENABLED(0x00040000)) { + // don't want to loop unless we are tracing + int count = 0; + for (int i = 1; i < _operands_index_map_p->length(); i++) { + int value = _operands_index_map_p->at(i); + if (value != -1) { + RC_TRACE_WITH_THREAD(0x00040000, THREAD, + ("operands_index_map[%d]: old=%d new=%d", count, i, value)); + count++; + } + } + } + // Clean-up + _operands_index_map_p = NULL; + _operands_cur_length = 0; + _operands_index_map_count = 0; +} // end finalize_operands_merge() + + jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( instanceKlassHandle the_class, instanceKlassHandle scratch_class) { @@ -765,6 +871,31 @@ } // end find_new_index() +// Find new bootstrap specifier index value for old bootstrap specifier index +// value by seaching the index map. Returns unused index (-1) if there is +// no mapped value for the old bootstrap specifier index. +int VM_RedefineClasses::find_new_operand_index(int old_index) { + if (_operands_index_map_count == 0) { + // map is empty so nothing can be found + return -1; + } + + if (old_index == -1 || old_index >= _operands_index_map_p->length()) { + // The old_index is out of range so it is not mapped. + // This should not happen in regular constant pool merging use. + return -1; + } + + int value = _operands_index_map_p->at(old_index); + if (value == -1) { + // the old_index is not mapped + return -1; + } + + return value; +} // end find_new_operand_index() + + // Returns true if the current mismatch is due to a resolved/unresolved // class pair. Otherwise, returns false. bool VM_RedefineClasses::is_unresolved_class_mismatch(constantPoolHandle cp1, @@ -1014,6 +1145,25 @@ } // end map_index() +// Map old_index to new_index as needed. +void VM_RedefineClasses::map_operand_index(int old_index, int new_index) { + if (find_new_operand_index(old_index) != -1) { + // old_index is already mapped + return; + } + + if (old_index == new_index) { + // no mapping is needed + return; + } + + _operands_index_map_p->at_put(old_index, new_index); + _operands_index_map_count++; + + RC_TRACE(0x00040000, ("mapped bootstrap specifier at index %d to %d", old_index, new_index)); +} // end map_index() + + // Merge old_cp and scratch_cp and return the results of the merge via // merge_cp_p. The number of entries in *merge_cp_p is returned via // merge_cp_length_p. The entries in old_cp occupy the same locations @@ -1086,6 +1236,7 @@ } // end for each old_cp entry ConstantPool::copy_operands(old_cp, *merge_cp_p, CHECK_0); + (*merge_cp_p)->extend_operands(scratch_cp, CHECK_0); // We don't need to sanity check that *merge_cp_length_p is within // *merge_cp_p bounds since we have the minimum on-entry check above. @@ -1202,6 +1353,7 @@ ("after pass 1b: merge_cp_len=%d, scratch_i=%d, index_map_len=%d", *merge_cp_length_p, scratch_i, _index_map_count)); } + finalize_operands_merge(*merge_cp_p, THREAD); return true; } // end merge_constant_pools() @@ -1270,6 +1422,11 @@ _index_map_count = 0; _index_map_p = new intArray(scratch_cp->length(), -1); + _operands_cur_length = ConstantPool::operand_array_length(old_cp->operands()); + _operands_index_map_count = 0; + _operands_index_map_p = new intArray( + ConstantPool::operand_array_length(scratch_cp->operands()), -1); + // reference to the cp holder is needed for copy_operands() merge_cp->set_pool_holder(scratch_class()); bool result = merge_constant_pools(old_cp, scratch_cp, &merge_cp, @@ -1400,7 +1557,6 @@ return true; } // end rewrite_cp_refs() - // Rewrite constant pool references in the methods. bool VM_RedefineClasses::rewrite_cp_refs_in_methods( instanceKlassHandle scratch_class, TRAPS) { @@ -2497,29 +2653,35 @@ } // end set_new_constant_pool() -void VM_RedefineClasses::adjust_array_vtable(Klass* k_oop) { - ArrayKlass* ak = ArrayKlass::cast(k_oop); - bool trace_name_printed = false; - ak->vtable()->adjust_method_entries(_matching_old_methods, - _matching_new_methods, - _matching_methods_length, - &trace_name_printed); -} - // Unevolving classes may point to methods of the_class directly // from their constant pool caches, itables, and/or vtables. We -// use the SystemDictionary::classes_do() facility and this helper +// use the ClassLoaderDataGraph::classes_do() facility and this helper // to fix up these pointers. -// -// Note: We currently don't support updating the vtable in -// arrayKlassOops. See Open Issues in jvmtiRedefineClasses.hpp. -void VM_RedefineClasses::adjust_cpool_cache_and_vtable(Klass* k_oop, - ClassLoaderData* initiating_loader, - TRAPS) { - Klass *k = k_oop; - if (k->oop_is_instance()) { - HandleMark hm(THREAD); - InstanceKlass *ik = (InstanceKlass *) k; + +// Adjust cpools and vtables closure +void VM_RedefineClasses::AdjustCpoolCacheAndVtable::do_klass(Klass* k) { + + // This is a very busy routine. We don't want too much tracing + // printed out. + bool trace_name_printed = false; + + // Very noisy: only enable this call if you are trying to determine + // that a specific class gets found by this routine. + // RC_TRACE macro has an embedded ResourceMark + // RC_TRACE_WITH_THREAD(0x00100000, THREAD, + // ("adjust check: name=%s", k->external_name())); + // trace_name_printed = true; + + // If the class being redefined is java.lang.Object, we need to fix all + // array class vtables also + if (k->oop_is_array() && _the_class_oop == SystemDictionary::Object_klass()) { + k->vtable()->adjust_method_entries(_matching_old_methods, + _matching_new_methods, + _matching_methods_length, + &trace_name_printed); + } else if (k->oop_is_instance()) { + HandleMark hm(_thread); + InstanceKlass *ik = InstanceKlass::cast(k); // HotSpot specific optimization! HotSpot does not currently // support delegation from the bootstrap class loader to a @@ -2539,23 +2701,6 @@ return; } - // If the class being redefined is java.lang.Object, we need to fix all - // array class vtables also - if (_the_class_oop == SystemDictionary::Object_klass()) { - ik->array_klasses_do(adjust_array_vtable); - } - - // This is a very busy routine. We don't want too much tracing - // printed out. - bool trace_name_printed = false; - - // Very noisy: only enable this call if you are trying to determine - // that a specific class gets found by this routine. - // RC_TRACE macro has an embedded ResourceMark - // RC_TRACE_WITH_THREAD(0x00100000, THREAD, - // ("adjust check: name=%s", ik->external_name())); - // trace_name_printed = true; - // Fix the vtable embedded in the_class and subclasses of the_class, // if one exists. We discard scratch_class and we don't keep an // InstanceKlass around to hold obsolete methods so we don't have @@ -2563,7 +2708,7 @@ // holds the Method*s for virtual (but not final) methods. if (ik->vtable_length() > 0 && ik->is_subtype_of(_the_class_oop)) { // ik->vtable() creates a wrapper object; rm cleans it up - ResourceMark rm(THREAD); + ResourceMark rm(_thread); ik->vtable()->adjust_method_entries(_matching_old_methods, _matching_new_methods, _matching_methods_length, @@ -2579,7 +2724,7 @@ if (ik->itable_length() > 0 && (_the_class_oop->is_interface() || ik->is_subclass_of(_the_class_oop))) { // ik->itable() creates a wrapper object; rm cleans it up - ResourceMark rm(THREAD); + ResourceMark rm(_thread); ik->itable()->adjust_method_entries(_matching_old_methods, _matching_new_methods, _matching_methods_length, @@ -2602,7 +2747,7 @@ constantPoolHandle other_cp; ConstantPoolCache* cp_cache; - if (k_oop != _the_class_oop) { + if (ik != _the_class_oop) { // this klass' constant pool cache may need adjustment other_cp = constantPoolHandle(ik->constants()); cp_cache = other_cp->cache(); @@ -2614,7 +2759,7 @@ } } { - ResourceMark rm(THREAD); + ResourceMark rm(_thread); // PreviousVersionInfo objects returned via PreviousVersionWalker // contain a GrowableArray of handles. We have to clean up the // GrowableArray _after_ the PreviousVersionWalker destructor @@ -3052,7 +3197,7 @@ // parts of the_class // - adjusting constant pool caches and vtables in other classes // that refer to methods in the_class. These adjustments use the -// SystemDictionary::classes_do() facility which only allows +// ClassLoaderDataGraph::classes_do() facility which only allows // a helper method to be specified. The interesting parameters // that we would like to pass to the helper method are saved in // static global fields in the VM operation. @@ -3210,6 +3355,10 @@ } #endif + // NULL out in scratch class to not delete twice. The class to be redefined + // always owns these bytes. + scratch_class->set_cached_class_file(NULL, 0); + // Replace inner_classes Array* old_inner_classes = the_class->inner_classes(); the_class->set_inner_classes(scratch_class->inner_classes()); @@ -3282,7 +3431,18 @@ // Adjust constantpool caches and vtables for all classes // that reference methods of the evolved class. - SystemDictionary::classes_do(adjust_cpool_cache_and_vtable, THREAD); + AdjustCpoolCacheAndVtable adjust_cpool_cache_and_vtable(THREAD); + ClassLoaderDataGraph::classes_do(&adjust_cpool_cache_and_vtable); + + // JSR-292 support + MemberNameTable* mnt = the_class->member_names(); + if (mnt != NULL) { + bool trace_name_printed = false; + mnt->adjust_method_entries(_matching_old_methods, + _matching_new_methods, + _matching_methods_length, + &trace_name_printed); + } // Fix Resolution Error table also to remove old constant pools SystemDictionary::delete_resolution_error(old_constants); @@ -3333,34 +3493,33 @@ } } -void VM_RedefineClasses::check_class(Klass* k_oop, - ClassLoaderData* initiating_loader, - TRAPS) { - Klass *k = k_oop; +void VM_RedefineClasses::CheckClass::do_klass(Klass* k) { + bool no_old_methods = true; // be optimistic + + // Both array and instance classes have vtables. + // a vtable should never contain old or obsolete methods + ResourceMark rm(_thread); + if (k->vtable_length() > 0 && + !k->vtable()->check_no_old_or_obsolete_entries()) { + if (RC_TRACE_ENABLED(0x00004000)) { + RC_TRACE_WITH_THREAD(0x00004000, _thread, + ("klassVtable::check_no_old_or_obsolete_entries failure" + " -- OLD or OBSOLETE method found -- class: %s", + k->signature_name())); + k->vtable()->dump_vtable(); + } + no_old_methods = false; + } + if (k->oop_is_instance()) { - HandleMark hm(THREAD); - InstanceKlass *ik = (InstanceKlass *) k; - bool no_old_methods = true; // be optimistic - ResourceMark rm(THREAD); - - // a vtable should never contain old or obsolete methods - if (ik->vtable_length() > 0 && - !ik->vtable()->check_no_old_or_obsolete_entries()) { - if (RC_TRACE_ENABLED(0x00004000)) { - RC_TRACE_WITH_THREAD(0x00004000, THREAD, - ("klassVtable::check_no_old_or_obsolete_entries failure" - " -- OLD or OBSOLETE method found -- class: %s", - ik->signature_name())); - ik->vtable()->dump_vtable(); - } - no_old_methods = false; - } + HandleMark hm(_thread); + InstanceKlass *ik = InstanceKlass::cast(k); // an itable should never contain old or obsolete methods if (ik->itable_length() > 0 && !ik->itable()->check_no_old_or_obsolete_entries()) { if (RC_TRACE_ENABLED(0x00004000)) { - RC_TRACE_WITH_THREAD(0x00004000, THREAD, + RC_TRACE_WITH_THREAD(0x00004000, _thread, ("klassItable::check_no_old_or_obsolete_entries failure" " -- OLD or OBSOLETE method found -- class: %s", ik->signature_name())); @@ -3374,7 +3533,7 @@ ik->constants()->cache() != NULL && !ik->constants()->cache()->check_no_old_or_obsolete_entries()) { if (RC_TRACE_ENABLED(0x00004000)) { - RC_TRACE_WITH_THREAD(0x00004000, THREAD, + RC_TRACE_WITH_THREAD(0x00004000, _thread, ("cp-cache::check_no_old_or_obsolete_entries failure" " -- OLD or OBSOLETE method found -- class: %s", ik->signature_name())); @@ -3382,19 +3541,21 @@ } no_old_methods = false; } - - if (!no_old_methods) { - if (RC_TRACE_ENABLED(0x00004000)) { - dump_methods(); - } else { - tty->print_cr("INFO: use the '-XX:TraceRedefineClasses=16384' option " - "to see more info about the following guarantee() failure."); - } - guarantee(false, "OLD and/or OBSOLETE method(s) found"); + } + + // print and fail guarantee if old methods are found. + if (!no_old_methods) { + if (RC_TRACE_ENABLED(0x00004000)) { + dump_methods(); + } else { + tty->print_cr("INFO: use the '-XX:TraceRedefineClasses=16384' option " + "to see more info about the following guarantee() failure."); } + guarantee(false, "OLD and/or OBSOLETE method(s) found"); } } + void VM_RedefineClasses::dump_methods() { int j; RC_TRACE(0x00004000, ("_old_methods --")); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/jvmtiRedefineClasses.hpp --- a/src/share/vm/prims/jvmtiRedefineClasses.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/jvmtiRedefineClasses.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -87,7 +87,7 @@ // parts of the_class // - adjusting constant pool caches and vtables in other classes // that refer to methods in the_class. These adjustments use the -// SystemDictionary::classes_do() facility which only allows +// ClassLoaderDataGraph::classes_do() facility which only allows // a helper method to be specified. The interesting parameters // that we would like to pass to the helper method are saved in // static global fields in the VM operation. @@ -333,8 +333,8 @@ class VM_RedefineClasses: public VM_Operation { private: - // These static fields are needed by SystemDictionary::classes_do() - // facility and the adjust_cpool_cache_and_vtable() helper: + // These static fields are needed by ClassLoaderDataGraph::classes_do() + // facility and the AdjustCpoolCacheAndVtable helper: static Array* _old_methods; static Array* _new_methods; static Method** _matching_old_methods; @@ -359,8 +359,15 @@ // _index_map_p contains any entries. int _index_map_count; intArray * _index_map_p; + + // _operands_index_map_count is just an optimization for knowing if + // _operands_index_map_p contains any entries. + int _operands_cur_length; + int _operands_index_map_count; + intArray * _operands_index_map_p; + // ptr to _class_count scratch_classes - Klass** _scratch_classes; + Klass** _scratch_classes; jvmtiError _res; // Performance measurement support. These timers do not cover all @@ -401,13 +408,6 @@ int * emcp_method_count_p); void transfer_old_native_function_registrations(instanceKlassHandle the_class); - // Unevolving classes may point to methods of the_class directly - // from their constant pool caches, itables, and/or vtables. We - // use the SystemDictionary::classes_do() facility and this helper - // to fix up these pointers. - static void adjust_cpool_cache_and_vtable(Klass* k_oop, ClassLoaderData* initiating_loader, TRAPS); - static void adjust_array_vtable(Klass* k_oop); - // Install the redefinition of a class void redefine_single_class(jclass the_jclass, Klass* scratch_class_oop, TRAPS); @@ -422,12 +422,19 @@ // Support for constant pool merging (these routines are in alpha order): void append_entry(constantPoolHandle scratch_cp, int scratch_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS); + void append_operand(constantPoolHandle scratch_cp, int scratch_bootstrap_spec_index, + constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS); + void finalize_operands_merge(constantPoolHandle merge_cp, TRAPS); int find_or_append_indirect_entry(constantPoolHandle scratch_cp, int scratch_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS); + int find_or_append_operand(constantPoolHandle scratch_cp, int scratch_bootstrap_spec_index, + constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS); int find_new_index(int old_index); + int find_new_operand_index(int old_bootstrap_spec_index); bool is_unresolved_class_mismatch(constantPoolHandle cp1, int index1, constantPoolHandle cp2, int index2); void map_index(constantPoolHandle scratch_cp, int old_index, int new_index); + void map_operand_index(int old_bootstrap_spec_index, int new_bootstrap_spec_index); bool merge_constant_pools(constantPoolHandle old_cp, constantPoolHandle scratch_cp, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS); @@ -466,10 +473,27 @@ void flush_dependent_code(instanceKlassHandle k_h, TRAPS); - static void check_class(Klass* k_oop, ClassLoaderData* initiating_loader, - TRAPS); static void dump_methods(); + // Check that there are no old or obsolete methods + class CheckClass : public KlassClosure { + Thread* _thread; + public: + CheckClass(Thread* t) : _thread(t) {} + void do_klass(Klass* k); + }; + + // Unevolving classes may point to methods of the_class directly + // from their constant pool caches, itables, and/or vtables. We + // use the ClassLoaderDataGraph::classes_do() facility and this helper + // to fix up these pointers. + class AdjustCpoolCacheAndVtable : public KlassClosure { + Thread* _thread; + public: + AdjustCpoolCacheAndVtable(Thread* t) : _thread(t) {} + void do_klass(Klass* k); + }; + public: VM_RedefineClasses(jint class_count, const jvmtiClassDefinition *class_defs, diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/jvmtiTagMap.cpp --- a/src/share/vm/prims/jvmtiTagMap.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/jvmtiTagMap.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -153,7 +153,8 @@ size_t s = initial_size * sizeof(JvmtiTagHashmapEntry*); _table = (JvmtiTagHashmapEntry**)os::malloc(s, mtInternal); if (_table == NULL) { - vm_exit_out_of_memory(s, "unable to allocate initial hashtable for jvmti object tags"); + vm_exit_out_of_memory(s, OOM_MALLOC_ERROR, + "unable to allocate initial hashtable for jvmti object tags"); } for (int i=0; iconstants(); + ConstantPool* pool = ik->constants(); for (int i = 1; i < pool->length(); i++) { constantTag tag = pool->tag_at(i).value(); if (tag.is_string() || tag.is_klass()) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/methodHandles.cpp --- a/src/share/vm/prims/methodHandles.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/methodHandles.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "interpreter/oopMapCache.hpp" #include "memory/allocation.inline.hpp" #include "memory/oopFactory.hpp" +#include "prims/jvmtiRedefineClassesTrace.hpp" #include "prims/methodHandles.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/javaCalls.hpp" @@ -66,7 +67,8 @@ TraceTime timer("MethodHandles adapters generation", TraceStartupTime); _adapter_code = MethodHandlesAdapterBlob::create(adapter_code_size); if (_adapter_code == NULL) - vm_exit_out_of_memory(adapter_code_size, "CodeCache: no room for MethodHandles adapters"); + vm_exit_out_of_memory(adapter_code_size, OOM_MALLOC_ERROR, + "CodeCache: no room for MethodHandles adapters"); { CodeBuffer code(_adapter_code); MethodHandlesAdapterGenerator g(&code); @@ -124,7 +126,9 @@ return Handle(THREAD, k->allocate_instance(THREAD)); } -oop MethodHandles::init_MemberName(oop mname_oop, oop target_oop) { +oop MethodHandles::init_MemberName(Handle mname, Handle target) { + Thread* thread = Thread::current(); + oop target_oop = target(); Klass* target_klass = target_oop->klass(); if (target_klass == SystemDictionary::reflect_Field_klass()) { oop clazz = java_lang_reflect_Field::clazz(target_oop); // fd.field_holder() @@ -132,24 +136,24 @@ int mods = java_lang_reflect_Field::modifiers(target_oop); oop type = java_lang_reflect_Field::type(target_oop); oop name = java_lang_reflect_Field::name(target_oop); - Klass* k = java_lang_Class::as_Klass(clazz); - intptr_t offset = InstanceKlass::cast(k)->field_offset(slot); - return init_field_MemberName(mname_oop, k, accessFlags_from(mods), type, name, offset); + KlassHandle k(thread, java_lang_Class::as_Klass(clazz)); + intptr_t offset = InstanceKlass::cast(k())->field_offset(slot); + return init_field_MemberName(mname, k, accessFlags_from(mods), type, name, offset); } else if (target_klass == SystemDictionary::reflect_Method_klass()) { oop clazz = java_lang_reflect_Method::clazz(target_oop); int slot = java_lang_reflect_Method::slot(target_oop); - Klass* k = java_lang_Class::as_Klass(clazz); - if (k != NULL && k->oop_is_instance()) { - Method* m = InstanceKlass::cast(k)->method_with_idnum(slot); - return init_method_MemberName(mname_oop, m, true, k); + KlassHandle k(thread, java_lang_Class::as_Klass(clazz)); + if (!k.is_null() && k->oop_is_instance()) { + Method* m = InstanceKlass::cast(k())->method_with_idnum(slot); + return init_method_MemberName(mname, m, true, k); } } else if (target_klass == SystemDictionary::reflect_Constructor_klass()) { oop clazz = java_lang_reflect_Constructor::clazz(target_oop); int slot = java_lang_reflect_Constructor::slot(target_oop); - Klass* k = java_lang_Class::as_Klass(clazz); - if (k != NULL && k->oop_is_instance()) { - Method* m = InstanceKlass::cast(k)->method_with_idnum(slot); - return init_method_MemberName(mname_oop, m, false, k); + KlassHandle k(thread, java_lang_Class::as_Klass(clazz)); + if (!k.is_null() && k->oop_is_instance()) { + Method* m = InstanceKlass::cast(k())->method_with_idnum(slot); + return init_method_MemberName(mname, m, false, k); } } else if (target_klass == SystemDictionary::MemberName_klass()) { // Note: This only works if the MemberName has already been resolved. @@ -157,17 +161,18 @@ int flags = java_lang_invoke_MemberName::flags(target_oop); Metadata* vmtarget=java_lang_invoke_MemberName::vmtarget(target_oop); intptr_t vmindex = java_lang_invoke_MemberName::vmindex(target_oop); - Klass* k = java_lang_Class::as_Klass(clazz); + KlassHandle k(thread, java_lang_Class::as_Klass(clazz)); int ref_kind = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK; if (vmtarget == NULL) return NULL; // not resolved if ((flags & IS_FIELD) != 0) { assert(vmtarget->is_klass(), "field vmtarget is Klass*"); int basic_mods = (ref_kind_is_static(ref_kind) ? JVM_ACC_STATIC : 0); // FIXME: how does k (receiver_limit) contribute? - return init_field_MemberName(mname_oop, (Klass*)vmtarget, accessFlags_from(basic_mods), NULL, NULL, vmindex); + KlassHandle k_vmtarget(thread, (Klass*)vmtarget); + return init_field_MemberName(mname, k_vmtarget, accessFlags_from(basic_mods), NULL, NULL, vmindex); } else if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) != 0) { assert(vmtarget->is_method(), "method or constructor vmtarget is Method*"); - return init_method_MemberName(mname_oop, (Method*)vmtarget, ref_kind_does_dispatch(ref_kind), k); + return init_method_MemberName(mname, (Method*)vmtarget, ref_kind_does_dispatch(ref_kind), k); } else { return NULL; } @@ -175,8 +180,9 @@ return NULL; } -oop MethodHandles::init_method_MemberName(oop mname_oop, Method* m, bool do_dispatch, - Klass* receiver_limit) { +oop MethodHandles::init_method_MemberName(Handle mname, Method* m, bool do_dispatch, + KlassHandle receiver_limit_h) { + Klass* receiver_limit = receiver_limit_h(); AccessFlags mods = m->access_flags(); int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS ); int vmindex = Method::nonvirtual_vtable_index; // implies never any dispatch @@ -190,12 +196,12 @@ } else if (receiver_limit != mklass && !receiver_limit->is_subtype_of(mklass)) { return NULL; // bad receiver limit - } else if (receiver_limit->is_interface() && + } else if (do_dispatch && receiver_limit->is_interface() && mklass->is_interface()) { flags |= IS_METHOD | (JVM_REF_invokeInterface << REFERENCE_KIND_SHIFT); receiver_limit = mklass; // ignore passed-in limit; interfaces are interconvertible vmindex = klassItable::compute_itable_index(m); - } else if (mklass != receiver_limit && mklass->is_interface()) { + } else if (do_dispatch && mklass != receiver_limit && mklass->is_interface()) { flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT); // it is a miranda method, so m->vtable_index is not what we want ResourceMark rm; @@ -213,6 +219,7 @@ flags |= CALLER_SENSITIVE; } + oop mname_oop = mname(); java_lang_invoke_MemberName::set_flags( mname_oop, flags); java_lang_invoke_MemberName::set_vmtarget(mname_oop, m); java_lang_invoke_MemberName::set_vmindex( mname_oop, vmindex); // vtable/itable index @@ -225,10 +232,12 @@ // This is done eagerly, since it is readily available without // constructing any new objects. // TO DO: maybe intern mname_oop - return mname_oop; + m->method_holder()->add_member_name(m->method_idnum(), mname); + + return mname(); } -Handle MethodHandles::init_method_MemberName(oop mname_oop, CallInfo& info, TRAPS) { +Handle MethodHandles::init_method_MemberName(Handle mname, CallInfo& info, TRAPS) { Handle empty; if (info.resolved_appendix().not_null()) { // The resolved MemberName must not be accompanied by an appendix argument, @@ -238,29 +247,45 @@ } methodHandle m = info.resolved_method(); KlassHandle defc = info.resolved_klass(); - int vmindex = -1; + int vmindex = Method::invalid_vtable_index; if (defc->is_interface() && m->method_holder()->is_interface()) { - // LinkResolver does not report itable indexes! (fix this?) - vmindex = klassItable::compute_itable_index(m()); + // static interface methods do not reference vtable or itable + if (m->is_static()) { + vmindex = Method::nonvirtual_vtable_index; + } + // interface methods invoked via invokespecial also + // do not reference vtable or itable. + int ref_kind = ((java_lang_invoke_MemberName::flags(mname()) >> + REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK); + if (ref_kind == JVM_REF_invokeSpecial) { + vmindex = Method::nonvirtual_vtable_index; + } + // If neither m is static nor ref_kind is invokespecial, + // set it to itable index. + if (vmindex == Method::invalid_vtable_index) { + // LinkResolver does not report itable indexes! (fix this?) + vmindex = klassItable::compute_itable_index(m()); + } } else if (m->can_be_statically_bound()) { // LinkResolver reports vtable index even for final methods! vmindex = Method::nonvirtual_vtable_index; } else { vmindex = info.vtable_index(); } - oop res = init_method_MemberName(mname_oop, m(), (vmindex >= 0), defc()); + oop res = init_method_MemberName(mname, m(), (vmindex >= 0), defc()); assert(res == NULL || (java_lang_invoke_MemberName::vmindex(res) == vmindex), ""); return Handle(THREAD, res); } -oop MethodHandles::init_field_MemberName(oop mname_oop, Klass* field_holder, +oop MethodHandles::init_field_MemberName(Handle mname, KlassHandle field_holder, AccessFlags mods, oop type, oop name, intptr_t offset, bool is_setter) { int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS ); flags |= IS_FIELD | ((mods.is_static() ? JVM_REF_getStatic : JVM_REF_getField) << REFERENCE_KIND_SHIFT); if (is_setter) flags += ((JVM_REF_putField - JVM_REF_getField) << REFERENCE_KIND_SHIFT); - Metadata* vmtarget = field_holder; + Metadata* vmtarget = field_holder(); int vmindex = offset; // determines the field uniquely when combined with static bit + oop mname_oop = mname(); java_lang_invoke_MemberName::set_flags(mname_oop, flags); java_lang_invoke_MemberName::set_vmtarget(mname_oop, vmtarget); java_lang_invoke_MemberName::set_vmindex(mname_oop, vmindex); @@ -277,10 +302,10 @@ // Although the fieldDescriptor::_index would also identify the field, // we do not use it, because it is harder to decode. // TO DO: maybe intern mname_oop - return mname_oop; + return mname(); } -Handle MethodHandles::init_field_MemberName(oop mname_oop, FieldAccessInfo& info, TRAPS) { +Handle MethodHandles::init_field_MemberName(Handle mname, FieldAccessInfo& info, TRAPS) { return Handle(); #if 0 // FIXME KlassHandle field_holder = info.klass(); @@ -651,11 +676,9 @@ case IS_METHOD: { CallInfo result; - bool do_dispatch = true; // default, neutral setting { assert(!HAS_PENDING_EXCEPTION, ""); if (ref_kind == JVM_REF_invokeStatic) { - //do_dispatch = false; // no need, since statics are never dispatched LinkResolver::resolve_static_call(result, defc, name, type, KlassHandle(), false, false, THREAD); } else if (ref_kind == JVM_REF_invokeInterface) { @@ -666,7 +689,6 @@ LinkResolver::resolve_handle_call(result, defc, name, type, KlassHandle(), THREAD); } else if (ref_kind == JVM_REF_invokeSpecial) { - do_dispatch = false; // force non-virtual linkage LinkResolver::resolve_special_call(result, defc, name, type, KlassHandle(), false, THREAD); } else if (ref_kind == JVM_REF_invokeVirtual) { @@ -679,7 +701,7 @@ return empty; } } - return init_method_MemberName(mname(), result, THREAD); + return init_method_MemberName(mname, result, THREAD); } case IS_CONSTRUCTOR: { @@ -697,7 +719,7 @@ } } assert(result.is_statically_bound(), ""); - return init_method_MemberName(mname(), result, THREAD); + return init_method_MemberName(mname, result, THREAD); } case IS_FIELD: { @@ -710,7 +732,7 @@ oop name = field_name_or_null(fd.name()); bool is_setter = (ref_kind_is_valid(ref_kind) && ref_kind_is_setter(ref_kind)); mname = Handle(THREAD, - init_field_MemberName(mname(), sel_klass(), + init_field_MemberName(mname, sel_klass, fd.access_flags(), type, name, fd.offset(), is_setter)); return mname; } @@ -802,16 +824,15 @@ THROW_MSG(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format"); } -int MethodHandles::find_MemberNames(Klass* k, +int MethodHandles::find_MemberNames(KlassHandle k, Symbol* name, Symbol* sig, - int mflags, Klass* caller, - int skip, objArrayOop results) { - DEBUG_ONLY(No_Safepoint_Verifier nsv); - // this code contains no safepoints! - + int mflags, KlassHandle caller, + int skip, objArrayHandle results) { // %%% take caller into account! - if (k == NULL || !k->oop_is_instance()) return -1; + Thread* thread = Thread::current(); + + if (k.is_null() || !k->oop_is_instance()) return -1; int rfill = 0, rlimit = results->length(), rskip = skip; // overflow measurement: @@ -839,7 +860,7 @@ } if ((match_flags & IS_FIELD) != 0) { - for (FieldStream st(k, local_only, !search_intfc); !st.eos(); st.next()) { + for (FieldStream st(k(), local_only, !search_intfc); !st.eos(); st.next()) { if (name != NULL && st.name() != name) continue; if (sig != NULL && st.signature() != sig) @@ -848,15 +869,15 @@ if (rskip > 0) { --rskip; } else if (rfill < rlimit) { - oop result = results->obj_at(rfill++); - if (!java_lang_invoke_MemberName::is_instance(result)) + Handle result(thread, results->obj_at(rfill++)); + if (!java_lang_invoke_MemberName::is_instance(result())) return -99; // caller bug! oop type = field_signature_type_or_null(st.signature()); oop name = field_name_or_null(st.name()); - oop saved = MethodHandles::init_field_MemberName(result, st.klass()(), + oop saved = MethodHandles::init_field_MemberName(result, st.klass(), st.access_flags(), type, name, st.offset()); - if (saved != result) + if (saved != result()) results->obj_at_put(rfill-1, saved); // show saved instance to user } else if (++overflow >= overflow_limit) { match_flags = 0; break; // got tired of looking at overflow @@ -889,7 +910,7 @@ } else { // caller will accept either sort; no need to adjust name } - for (MethodStream st(k, local_only, !search_intfc); !st.eos(); st.next()) { + for (MethodStream st(k(), local_only, !search_intfc); !st.eos(); st.next()) { Method* m = st.method(); Symbol* m_name = m->name(); if (m_name == clinit_name) @@ -902,11 +923,11 @@ if (rskip > 0) { --rskip; } else if (rfill < rlimit) { - oop result = results->obj_at(rfill++); - if (!java_lang_invoke_MemberName::is_instance(result)) + Handle result(thread, results->obj_at(rfill++)); + if (!java_lang_invoke_MemberName::is_instance(result())) return -99; // caller bug! oop saved = MethodHandles::init_method_MemberName(result, m, true, NULL); - if (saved != result) + if (saved != result()) results->obj_at_put(rfill-1, saved); // show saved instance to user } else if (++overflow >= overflow_limit) { match_flags = 0; break; // got tired of looking at overflow @@ -917,6 +938,89 @@ // return number of elements we at leasted wanted to initialize return rfill + overflow; } + +//------------------------------------------------------------------------------ +// MemberNameTable +// + +MemberNameTable::MemberNameTable(int methods_cnt) + : GrowableArray(methods_cnt, true) { + assert_locked_or_safepoint(MemberNameTable_lock); +} + +MemberNameTable::~MemberNameTable() { + assert_locked_or_safepoint(MemberNameTable_lock); + int len = this->length(); + + for (int idx = 0; idx < len; idx++) { + jweak ref = this->at(idx); + JNIHandles::destroy_weak_global(ref); + } +} + +void MemberNameTable::add_member_name(int index, jweak mem_name_wref) { + assert_locked_or_safepoint(MemberNameTable_lock); + this->at_put_grow(index, mem_name_wref); +} + +// Return a member name oop or NULL. +oop MemberNameTable::get_member_name(int index) { + assert_locked_or_safepoint(MemberNameTable_lock); + + jweak ref = this->at(index); + oop mem_name = JNIHandles::resolve(ref); + return mem_name; +} + +#if INCLUDE_JVMTI +oop MemberNameTable::find_member_name_by_method(Method* old_method) { + assert_locked_or_safepoint(MemberNameTable_lock); + oop found = NULL; + int len = this->length(); + + for (int idx = 0; idx < len; idx++) { + oop mem_name = JNIHandles::resolve(this->at(idx)); + if (mem_name == NULL) { + continue; + } + Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(mem_name); + if (method == old_method) { + found = mem_name; + break; + } + } + return found; +} + +// It is called at safepoint only +void MemberNameTable::adjust_method_entries(Method** old_methods, Method** new_methods, + int methods_length, bool *trace_name_printed) { + assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint"); + // search the MemberNameTable for uses of either obsolete or EMCP methods + for (int j = 0; j < methods_length; j++) { + Method* old_method = old_methods[j]; + Method* new_method = new_methods[j]; + oop mem_name = find_member_name_by_method(old_method); + if (mem_name != NULL) { + java_lang_invoke_MemberName::adjust_vmtarget(mem_name, new_method); + + if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { + if (!(*trace_name_printed)) { + // RC_TRACE_MESG macro has an embedded ResourceMark + RC_TRACE_MESG(("adjust: name=%s", + old_method->method_holder()->external_name())); + *trace_name_printed = true; + } + // RC_TRACE macro has an embedded ResourceMark + RC_TRACE(0x00400000, ("MemberName method update: %s(%s)", + new_method->name()->as_C_string(), + new_method->signature()->as_C_string())); + } + } + } +} +#endif // INCLUDE_JVMTI + // // Here are the native methods in java.lang.invoke.MethodHandleNatives // They are the private interface between this JVM and the HotSpot-specific @@ -1010,8 +1114,8 @@ if (mname_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "mname is null"); } if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); } Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh)); - oop target_oop = JNIHandles::resolve_non_null(target_jh); - MethodHandles::init_MemberName(mname(), target_oop); + Handle target(THREAD, JNIHandles::resolve_non_null(target_jh)); + MethodHandles::init_MemberName(mname, target); } JVM_END @@ -1118,7 +1222,7 @@ x = ((Klass*) vmtarget)->java_mirror(); } else if (vmtarget->is_method()) { Handle mname2 = MethodHandles::new_MemberName(CHECK_NULL); - x = MethodHandles::init_method_MemberName(mname2(), (Method*)vmtarget, false, NULL); + x = MethodHandles::init_method_MemberName(mname2, (Method*)vmtarget, false, NULL); } result->obj_at_put(1, x); return JNIHandles::make_local(env, result()); @@ -1161,8 +1265,8 @@ // %%% TO DO } - int res = MethodHandles::find_MemberNames(k(), name, sig, mflags, - caller(), skip, results()); + int res = MethodHandles::find_MemberNames(k, name, sig, mflags, + caller, skip, results); // TO DO: expand at least some of the MemberNames, to avoid massive callbacks return res; } @@ -1192,6 +1296,28 @@ } JVM_END +/** + * Throws a java/lang/UnsupportedOperationException unconditionally. + * This is required by the specification of MethodHandle.invoke if + * invoked directly. + */ +JVM_ENTRY(jobject, MH_invoke_UOE(JNIEnv* env, jobject mh, jobjectArray args)) { + THROW_MSG_NULL(vmSymbols::java_lang_UnsupportedOperationException(), "MethodHandle.invoke cannot be invoked reflectively"); + return NULL; +} +JVM_END + +/** + * Throws a java/lang/UnsupportedOperationException unconditionally. + * This is required by the specification of MethodHandle.invokeExact if + * invoked directly. + */ +JVM_ENTRY(jobject, MH_invokeExact_UOE(JNIEnv* env, jobject mh, jobjectArray args)) { + THROW_MSG_NULL(vmSymbols::java_lang_UnsupportedOperationException(), "MethodHandle.invokeExact cannot be invoked reflectively"); + return NULL; +} +JVM_END + /// JVM_RegisterMethodHandleMethods #undef CS // Solaris builds complain @@ -1211,7 +1337,7 @@ #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f) // These are the native methods on java.lang.invoke.MethodHandleNatives. -static JNINativeMethod required_methods_JDK8[] = { +static JNINativeMethod MHN_methods[] = { {CC"init", CC"("MEM""OBJ")V", FN_PTR(MHN_init_Mem)}, {CC"expand", CC"("MEM")V", FN_PTR(MHN_expand_Mem)}, {CC"resolve", CC"("MEM""CLS")"MEM, FN_PTR(MHN_resolve_Mem)}, @@ -1229,8 +1355,28 @@ {CC"getMemberVMInfo", CC"("MEM")"OBJ, FN_PTR(MHN_getMemberVMInfo)} }; -// This one function is exported, used by NativeLookup. +static JNINativeMethod MH_methods[] = { + // UnsupportedOperationException throwers + {CC"invoke", CC"(["OBJ")"OBJ, FN_PTR(MH_invoke_UOE)}, + {CC"invokeExact", CC"(["OBJ")"OBJ, FN_PTR(MH_invokeExact_UOE)} +}; +/** + * Helper method to register native methods. + */ +static bool register_natives(JNIEnv* env, jclass clazz, const JNINativeMethod* methods, jint nMethods) { + int status = env->RegisterNatives(clazz, methods, nMethods); + if (status != JNI_OK || env->ExceptionOccurred()) { + warning("JSR 292 method handle code is mismatched to this JVM. Disabling support."); + env->ExceptionClear(); + return false; + } + return true; +} + +/** + * This one function is exported, used by NativeLookup. + */ JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class)) { if (!EnableInvokeDynamic) { warning("JSR 292 is disabled in this JVM. Use -XX:+UnlockDiagnosticVMOptions -XX:+EnableInvokeDynamic to enable."); @@ -1248,16 +1394,14 @@ MH_class = (jclass) JNIHandles::make_local(env, mirror); } - int status; - if (enable_MH) { ThreadToNativeFromVM ttnfv(thread); - status = env->RegisterNatives(MHN_class, required_methods_JDK8, sizeof(required_methods_JDK8)/sizeof(JNINativeMethod)); - if (status != JNI_OK || env->ExceptionOccurred()) { - warning("JSR 292 method handle code is mismatched to this JVM. Disabling support."); - enable_MH = false; - env->ExceptionClear(); + if (enable_MH) { + enable_MH = register_natives(env, MHN_class, MHN_methods, sizeof(MHN_methods)/sizeof(JNINativeMethod)); + } + if (enable_MH) { + enable_MH = register_natives(env, MH_class, MH_methods, sizeof(MH_methods)/sizeof(JNINativeMethod)); } } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/methodHandles.hpp --- a/src/share/vm/prims/methodHandles.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/methodHandles.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,23 +54,23 @@ static Handle resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type static void expand_MemberName(Handle mname, int suppress, TRAPS); // expand defc/name/type if missing static Handle new_MemberName(TRAPS); // must be followed by init_MemberName - static oop init_MemberName(oop mname_oop, oop target_oop); // compute vmtarget/vmindex from target - static oop init_method_MemberName(oop mname_oop, Method* m, bool do_dispatch, - Klass* receiver_limit); - static oop init_field_MemberName(oop mname_oop, Klass* field_holder, + static oop init_MemberName(Handle mname_h, Handle target_h); // compute vmtarget/vmindex from target + static oop init_method_MemberName(Handle mname_h, Method* m, bool do_dispatch, + KlassHandle receiver_limit_h); + static oop init_field_MemberName(Handle mname_h, KlassHandle field_holder_h, AccessFlags mods, oop type, oop name, intptr_t offset, bool is_setter = false); - static Handle init_method_MemberName(oop mname_oop, CallInfo& info, TRAPS); - static Handle init_field_MemberName(oop mname_oop, FieldAccessInfo& info, TRAPS); + static Handle init_method_MemberName(Handle mname_h, CallInfo& info, TRAPS); + static Handle init_field_MemberName(Handle mname_h, FieldAccessInfo& info, TRAPS); static int method_ref_kind(Method* m, bool do_dispatch_if_possible = true); - static int find_MemberNames(Klass* k, Symbol* name, Symbol* sig, - int mflags, Klass* caller, - int skip, objArrayOop results); + static int find_MemberNames(KlassHandle k, Symbol* name, Symbol* sig, + int mflags, KlassHandle caller, + int skip, objArrayHandle results); // bit values for suppress argument to expand_MemberName: enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 }; // Generate MethodHandles adapters. - static void generate_adapters(); + static void generate_adapters(); // Called from MethodHandlesAdapterGenerator. static address generate_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid); @@ -219,7 +219,6 @@ } }; - //------------------------------------------------------------------------------ // MethodHandlesAdapterGenerator // @@ -230,4 +229,27 @@ void generate(); }; +//------------------------------------------------------------------------------ +// MemberNameTable +// + +class MemberNameTable : public GrowableArray { + public: + MemberNameTable(int methods_cnt); + ~MemberNameTable(); + void add_member_name(int index, jweak mem_name_ref); + oop get_member_name(int index); + +#if INCLUDE_JVMTI + public: + // RedefineClasses() API support: + // If a MemberName refers to old_method then update it + // to refer to new_method. + void adjust_method_entries(Method** old_methods, Method** new_methods, + int methods_length, bool *trace_name_printed); + private: + oop find_member_name_by_method(Method* old_method); +#endif // INCLUDE_JVMTI +}; + #endif // SHARE_VM_PRIMS_METHODHANDLES_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/nativeLookup.cpp --- a/src/share/vm/prims/nativeLookup.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/nativeLookup.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -389,10 +389,7 @@ address NativeLookup::lookup(methodHandle method, bool& in_base_library, TRAPS) { if (!method->has_native_function()) { - address entry = - method->intrinsic_id() == vmIntrinsics::_invokeGeneric ? - SharedRuntime::native_method_throw_unsupported_operation_exception_entry() : - lookup_base(method, in_base_library, CHECK_NULL); + address entry = lookup_base(method, in_base_library, CHECK_NULL); method->set_native_function(entry, Method::native_bind_event_is_interesting); // -verbose:jni printing diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/perf.cpp --- a/src/share/vm/prims/perf.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/perf.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -142,20 +142,20 @@ } switch(variability) { - case 1: /* V_Constant */ + case PerfData::V_Constant: pl = PerfDataManager::create_long_constant(NULL_NS, (char *)name_utf, (PerfData::Units)units, value, CHECK_NULL); break; - case 2: /* V_Variable */ - pl = PerfDataManager::create_long_variable(NULL_NS, (char *)name_utf, + case PerfData::V_Monotonic: + pl = PerfDataManager::create_long_counter(NULL_NS, (char *)name_utf, (PerfData::Units)units, value, CHECK_NULL); break; - case 3: /* V_Monotonic Counter */ - pl = PerfDataManager::create_long_counter(NULL_NS, (char *)name_utf, + case PerfData::V_Variable: + pl = PerfDataManager::create_long_variable(NULL_NS, (char *)name_utf, (PerfData::Units)units, value, CHECK_NULL); break; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/unsafe.cpp --- a/src/share/vm/prims/unsafe.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/unsafe.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,7 @@ #include "runtime/reflection.hpp" #include "runtime/synchronizer.hpp" #include "services/threadService.hpp" +#include "trace/tracing.hpp" #include "utilities/copy.hpp" #include "utilities/dtrace.hpp" @@ -115,12 +116,6 @@ inline void* index_oop_from_field_offset_long(oop p, jlong field_offset) { jlong byte_offset = field_offset_to_byte_offset(field_offset); - // Don't allow unsafe to be used to read or write the header word of oops - // unless running GRAAL which wants to read the misc word for example when - // interpreting computeHashCode(). -#ifndef GRAAL - assert(p == NULL || field_offset >= oopDesc::header_size(), "offset must be outside of header"); -#endif #ifdef ASSERT if (p != NULL) { assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset"); @@ -190,7 +185,7 @@ oop v; \ /* Uncompression is not performed to unsafeAccess with null object. * This concerns accesses to the metaspace such as the classMirrorOffset which is not compressed.*/ \ - if (UseCompressedOops && p!=NULL && offset>=oopDesc::header_size()) { \ + if (UseCompressedOops && p != NULL && offset >= oopDesc::header_size()) { \ narrowOop n = *(narrowOop*)index_oop_from_field_offset_long(p, offset); \ v = oopDesc::decode_heap_oop(n); \ } else { \ @@ -331,10 +326,7 @@ OrderAccess::fence(); UNSAFE_END -#if defined(SPARC) || defined(X86) -// Sparc and X86 have atomic jlong (8 bytes) instructions - -#else +#ifndef SUPPORTS_NATIVE_CX8 // Keep old code for platforms which may not have atomic jlong (8 bytes) instructions // Volatile long versions must use locks if !VM_Version::supports_cx8(). @@ -372,7 +364,7 @@ } UNSAFE_END -#endif // not SPARC and not X86 +#endif // not SUPPORTS_NATIVE_CX8 #define DEFINE_GETSETOOP(jboolean, Boolean) \ \ @@ -436,8 +428,7 @@ DEFINE_GETSETOOP_VOLATILE(jfloat, Float); DEFINE_GETSETOOP_VOLATILE(jdouble, Double); -#if defined(SPARC) || defined(X86) -// Sparc and X86 have atomic jlong (8 bytes) instructions +#ifdef SUPPORTS_NATIVE_CX8 DEFINE_GETSETOOP_VOLATILE(jlong, Long); #endif @@ -466,8 +457,7 @@ UNSAFE_ENTRY(void, Unsafe_SetOrderedLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x)) UnsafeWrapper("Unsafe_SetOrderedLong"); -#if defined(SPARC) || defined(X86) - // Sparc and X86 have atomic jlong (8 bytes) instructions +#ifdef SUPPORTS_NATIVE_CX8 SET_FIELD_VOLATILE(obj, offset, jlong, x); #else // Keep old code for platforms which may not have atomic long (8 bytes) instructions @@ -1227,6 +1217,7 @@ UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time)) UnsafeWrapper("Unsafe_Park"); + EventThreadPark event; #ifndef USDT2 HS_DTRACE_PROBE3(hotspot, thread__park__begin, thread->parker(), (int) isAbsolute, time); #else /* USDT2 */ @@ -1241,6 +1232,13 @@ HOTSPOT_THREAD_PARK_END( (uintptr_t) thread->parker()); #endif /* USDT2 */ + if (event.should_commit()) { + oop obj = thread->current_park_blocker(); + event.set_klass(obj ? obj->klass() : NULL); + event.set_timeout(time); + event.set_address(obj ? (TYPE_ADDRESS) (uintptr_t) obj : 0); + event.commit(); + } UNSAFE_END UNSAFE_ENTRY(void, Unsafe_Unpark(JNIEnv *env, jobject unsafe, jobject jthread)) diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/prims/whitebox.cpp --- a/src/share/vm/prims/whitebox.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/prims/whitebox.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ #include "runtime/os.hpp" #include "utilities/debug.hpp" #include "utilities/macros.hpp" +#include "utilities/exceptions.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/g1/concurrentMark.hpp" @@ -93,6 +94,15 @@ return closure.found(); WB_END +WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) { + CollectorPolicy * p = Universe::heap()->collector_policy(); + gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap " + SIZE_FORMAT" Maximum heap "SIZE_FORMAT" Min alignment "SIZE_FORMAT" Max alignment "SIZE_FORMAT, + p->min_heap_byte_size(), p->initial_heap_byte_size(), p->max_heap_byte_size(), + p->min_alignment(), p->max_alignment()); +} +WB_END + #if INCLUDE_ALL_GCS WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj)) G1CollectedHeap* g1 = G1CollectedHeap::heap(); @@ -237,10 +247,10 @@ WB_END -WB_ENTRY(void, WB_MakeMethodNotCompilable(JNIEnv* env, jobject o, jobject method)) +WB_ENTRY(void, WB_MakeMethodNotCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level)) jmethodID jmid = reflected_method_to_jmid(thread, env, method); methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); - mh->set_not_compilable(); + mh->set_not_compilable(comp_level, true /* report */, "WhiteBox"); WB_END WB_ENTRY(jboolean, WB_TestSetDontInlineMethod(JNIEnv* env, jobject o, jobject method, jboolean value)) @@ -278,6 +288,7 @@ methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); MutexLockerEx mu(Compile_lock); MethodData* mdo = mh->method_data(); + MethodCounters* mcs = mh->method_counters(); if (mdo != NULL) { mdo->init(); @@ -288,31 +299,29 @@ } } - mh->backedge_counter()->init(); - mh->invocation_counter()->init(); - mh->set_interpreter_invocation_count(0); - mh->set_interpreter_throwout_count(0); mh->clear_not_c1_compilable(); mh->clear_not_c2_compilable(); mh->clear_not_c2_osr_compilable(); NOT_PRODUCT(mh->set_compiled_invocation_count(0)); + if (mcs != NULL) { + mcs->backedge_counter()->init(); + mcs->invocation_counter()->init(); + mcs->set_interpreter_invocation_count(0); + mcs->set_interpreter_throwout_count(0); #ifdef TIERED - mh->set_rate(0.0F); - mh->set_prev_event_count(0); - mh->set_prev_time(0); + mcs->set_rate(0.0F); + mh->set_prev_event_count(0, THREAD); + mh->set_prev_time(0, THREAD); #endif + } WB_END WB_ENTRY(jboolean, WB_IsInStringTable(JNIEnv* env, jobject o, jstring javaString)) ResourceMark rm(THREAD); int len; - jchar* name = java_lang_String::as_unicode_string(JNIHandles::resolve(javaString), len); - oop found_string = StringTable::the_table()->lookup(name, len); - if (found_string == NULL) { - return false; - } - return true; + jchar* name = java_lang_String::as_unicode_string(JNIHandles::resolve(javaString), len, CHECK_false); + return (StringTable::lookup(name, len) != NULL); WB_END @@ -321,6 +330,21 @@ Universe::heap()->collect(GCCause::_last_ditch_collection); WB_END + +WB_ENTRY(void, WB_ReadReservedMemory(JNIEnv* env, jobject o)) + // static+volatile in order to force the read to happen + // (not be eliminated by the compiler) + static char c; + static volatile char* p; + + p = os::reserve_memory(os::vm_allocation_granularity(), NULL, 0); + if (p == NULL) { + THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(), "Failed to reserve memory"); + } + + c = *p; +WB_END + //Some convenience methods to deal with objects from java int WhiteBox::offset_for_field(const char* field_name, oop object, Symbol* signature_symbol) { @@ -382,6 +406,7 @@ CC"(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;", (void*) &WB_ParseCommandLine }, + {CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes }, #if INCLUDE_ALL_GCS {CC"g1InConcurrentMark", CC"()Z", (void*)&WB_G1InConcurrentMark}, {CC"g1IsHumongous", CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous }, @@ -398,30 +423,32 @@ {CC"NMTWaitForDataMerge", CC"()Z", (void*)&WB_NMTWaitForDataMerge}, #endif // INCLUDE_NMT {CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll }, - {CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Method;)I", + {CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Executable;)I", (void*)&WB_DeoptimizeMethod }, - {CC"isMethodCompiled", CC"(Ljava/lang/reflect/Method;)Z", + {CC"isMethodCompiled", CC"(Ljava/lang/reflect/Executable;)Z", (void*)&WB_IsMethodCompiled }, - {CC"isMethodCompilable", CC"(Ljava/lang/reflect/Method;I)Z", + {CC"isMethodCompilable", CC"(Ljava/lang/reflect/Executable;I)Z", (void*)&WB_IsMethodCompilable}, {CC"isMethodQueuedForCompilation", - CC"(Ljava/lang/reflect/Method;)Z", (void*)&WB_IsMethodQueuedForCompilation}, + CC"(Ljava/lang/reflect/Executable;)Z", (void*)&WB_IsMethodQueuedForCompilation}, {CC"makeMethodNotCompilable", - CC"(Ljava/lang/reflect/Method;)V", (void*)&WB_MakeMethodNotCompilable}, + CC"(Ljava/lang/reflect/Executable;I)V", (void*)&WB_MakeMethodNotCompilable}, {CC"testSetDontInlineMethod", - CC"(Ljava/lang/reflect/Method;Z)Z", (void*)&WB_TestSetDontInlineMethod}, + CC"(Ljava/lang/reflect/Executable;Z)Z", (void*)&WB_TestSetDontInlineMethod}, {CC"getMethodCompilationLevel", - CC"(Ljava/lang/reflect/Method;)I", (void*)&WB_GetMethodCompilationLevel}, + CC"(Ljava/lang/reflect/Executable;)I", (void*)&WB_GetMethodCompilationLevel}, {CC"getCompileQueuesSize", CC"()I", (void*)&WB_GetCompileQueuesSize}, {CC"testSetForceInlineMethod", - CC"(Ljava/lang/reflect/Method;Z)Z", (void*)&WB_TestSetForceInlineMethod}, + CC"(Ljava/lang/reflect/Executable;Z)Z", (void*)&WB_TestSetForceInlineMethod}, {CC"enqueueMethodForCompilation", - CC"(Ljava/lang/reflect/Method;I)Z", (void*)&WB_EnqueueMethodForCompilation}, + CC"(Ljava/lang/reflect/Executable;I)Z", (void*)&WB_EnqueueMethodForCompilation}, {CC"clearMethodState", - CC"(Ljava/lang/reflect/Method;)V", (void*)&WB_ClearMethodState}, + CC"(Ljava/lang/reflect/Executable;)V", (void*)&WB_ClearMethodState}, {CC"isInStringTable", CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable }, {CC"fullGC", CC"()V", (void*)&WB_FullGC }, + + {CC"readReservedMemory", CC"()V", (void*)&WB_ReadReservedMemory }, }; #undef CC @@ -433,9 +460,29 @@ instanceKlassHandle ikh = instanceKlassHandle(JNIHandles::resolve(wbclass)->klass()); Handle loader(ikh->class_loader()); if (loader.is_null()) { + ResourceMark rm; ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI - jint result = env->RegisterNatives(wbclass, methods, sizeof(methods)/sizeof(methods[0])); - if (result == 0) { + bool result = true; + // one by one registration natives for exception catching + jclass exceptionKlass = env->FindClass(vmSymbols::java_lang_NoSuchMethodError()->as_C_string()); + for (int i = 0, n = sizeof(methods) / sizeof(methods[0]); i < n; ++i) { + if (env->RegisterNatives(wbclass, methods + i, 1) != 0) { + result = false; + if (env->ExceptionCheck() && env->IsInstanceOf(env->ExceptionOccurred(), exceptionKlass)) { + // j.l.NoSuchMethodError is thrown when a method can't be found or a method is not native + // ignoring the exception + tty->print_cr("Warning: 'NoSuchMethodError' on register of sun.hotspot.WhiteBox::%s%s", methods[i].name, methods[i].signature); + env->ExceptionClear(); + } else { + // register is failed w/o exception or w/ unexpected exception + tty->print_cr("Warning: unexpected error on register of sun.hotspot.WhiteBox::%s%s. All methods will be unregistered", methods[i].name, methods[i].signature); + env->UnregisterNatives(wbclass); + break; + } + } + } + + if (result) { WhiteBox::set_used(); } } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/advancedThresholdPolicy.cpp --- a/src/share/vm/runtime/advancedThresholdPolicy.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,16 +68,17 @@ } #endif - + set_increase_threshold_at_ratio(); set_start_time(os::javaTimeMillis()); } // update_rate() is called from select_task() while holding a compile queue lock. void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) { + JavaThread* THREAD = JavaThread::current(); if (is_old(m)) { // We don't remove old methods from the queue, // so we can just zero the rate. - m->set_rate(0); + m->set_rate(0, THREAD); return; } @@ -93,13 +94,13 @@ if (delta_s >= TieredRateUpdateMinTime) { // And we must've taken the previous point at least 1ms before. if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) { - m->set_prev_time(t); - m->set_prev_event_count(event_count); - m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond + m->set_prev_time(t, THREAD); + m->set_prev_event_count(event_count, THREAD); + m->set_rate((float)delta_e / (float)delta_t, THREAD); // Rate is events per millisecond } else if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) { // If nothing happened for 25ms, zero the rate. Don't modify prev values. - m->set_rate(0); + m->set_rate(0, THREAD); } } } @@ -204,6 +205,17 @@ double queue_size = CompileBroker::queue_size(level); int comp_count = compiler_count(level); double k = queue_size / (feedback_k * comp_count) + 1; + + // Increase C1 compile threshold when the code cache is filled more + // than specified by IncreaseFirstTierCompileThresholdAt percentage. + // The main intention is to keep enough free space for C2 compiled code + // to achieve peak performance if the code cache is under stress. + if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) { + double current_reverse_free_ratio = CodeCache::reverse_free_ratio(); + if (current_reverse_free_ratio > _increase_threshold_at_ratio) { + k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio); + } + } return k; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/advancedThresholdPolicy.hpp --- a/src/share/vm/runtime/advancedThresholdPolicy.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/advancedThresholdPolicy.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -201,9 +201,12 @@ // Is method profiled enough? bool is_method_profiled(Method* method); + double _increase_threshold_at_ratio; + protected: void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level); + void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); } void set_start_time(jlong t) { _start_time = t; } jlong start_time() const { return _start_time; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/arguments.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -751,16 +751,16 @@ return; } - int index = *count; + int new_count = *count + 1; // expand the array and add arg to the last element - (*count)++; if (*bldarray == NULL) { - *bldarray = NEW_C_HEAP_ARRAY(char*, *count, mtInternal); + *bldarray = NEW_C_HEAP_ARRAY(char*, new_count, mtInternal); } else { - *bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, *count, mtInternal); + *bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, new_count, mtInternal); } - (*bldarray)[index] = strdup(arg); + (*bldarray)[*count] = strdup(arg); + *count = new_count; } void Arguments::build_jvm_args(const char* arg) { @@ -1098,6 +1098,10 @@ if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) { FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 5); } + if (!UseInterpreter) { // -Xcomp + Tier3InvokeNotifyFreqLog = 0; + Tier4InvocationThreshold = 0; + } } #if INCLUDE_ALL_GCS @@ -1626,30 +1630,38 @@ FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx)reasonable_max); } - // If the initial_heap_size has not been set with InitialHeapSize - // or -Xms, then set it as fraction of the size of physical memory, - // respecting the maximum and minimum sizes of the heap. - if (FLAG_IS_DEFAULT(InitialHeapSize)) { + // If the minimum or initial heap_size have not been set or requested to be set + // ergonomically, set them accordingly. + if (InitialHeapSize == 0 || min_heap_size() == 0) { julong reasonable_minimum = (julong)(OldSize + NewSize); reasonable_minimum = MIN2(reasonable_minimum, (julong)MaxHeapSize); reasonable_minimum = limit_by_allocatable_memory(reasonable_minimum); - julong reasonable_initial = phys_mem / InitialRAMFraction; - - reasonable_initial = MAX2(reasonable_initial, reasonable_minimum); - reasonable_initial = MIN2(reasonable_initial, (julong)MaxHeapSize); - - reasonable_initial = limit_by_allocatable_memory(reasonable_initial); - - if (PrintGCDetails && Verbose) { - // Cannot use gclog_or_tty yet. - tty->print_cr(" Initial heap size " SIZE_FORMAT, (uintx)reasonable_initial); - tty->print_cr(" Minimum heap size " SIZE_FORMAT, (uintx)reasonable_minimum); + if (InitialHeapSize == 0) { + julong reasonable_initial = phys_mem / InitialRAMFraction; + + reasonable_initial = MAX3(reasonable_initial, reasonable_minimum, (julong)min_heap_size()); + reasonable_initial = MIN2(reasonable_initial, (julong)MaxHeapSize); + + reasonable_initial = limit_by_allocatable_memory(reasonable_initial); + + if (PrintGCDetails && Verbose) { + // Cannot use gclog_or_tty yet. + tty->print_cr(" Initial heap size " SIZE_FORMAT, (uintx)reasonable_initial); + } + FLAG_SET_ERGO(uintx, InitialHeapSize, (uintx)reasonable_initial); } - FLAG_SET_ERGO(uintx, InitialHeapSize, (uintx)reasonable_initial); - set_min_heap_size((uintx)reasonable_minimum); + // If the minimum heap size has not been set (via -Xms), + // synchronize with InitialHeapSize to avoid errors with the default value. + if (min_heap_size() == 0) { + set_min_heap_size(MIN2((uintx)reasonable_minimum, InitialHeapSize)); + if (PrintGCDetails && Verbose) { + // Cannot use gclog_or_tty yet. + tty->print_cr(" Minimum heap size " SIZE_FORMAT, min_heap_size()); + } + } } } @@ -1670,6 +1682,20 @@ // Aggressive optimization flags -XX:+AggressiveOpts void Arguments::set_aggressive_opts_flags() { #ifdef COMPILER2 + if (AggressiveUnboxing) { + if (FLAG_IS_DEFAULT(EliminateAutoBox)) { + FLAG_SET_DEFAULT(EliminateAutoBox, true); + } else if (!EliminateAutoBox) { + // warning("AggressiveUnboxing is disabled because EliminateAutoBox is disabled"); + AggressiveUnboxing = false; + } + if (FLAG_IS_DEFAULT(DoEscapeAnalysis)) { + FLAG_SET_DEFAULT(DoEscapeAnalysis, true); + } else if (!DoEscapeAnalysis) { + // warning("AggressiveUnboxing is disabled because DoEscapeAnalysis is disabled"); + AggressiveUnboxing = false; + } + } if (AggressiveOpts || !FLAG_IS_DEFAULT(AutoBoxCacheMax)) { if (FLAG_IS_DEFAULT(EliminateAutoBox)) { FLAG_SET_DEFAULT(EliminateAutoBox, true); @@ -1902,7 +1928,7 @@ status = false; } - status = status && verify_percentage(AdaptiveSizePolicyWeight, + status = status && verify_interval(AdaptiveSizePolicyWeight, 0, 100, "AdaptiveSizePolicyWeight"); status = status && verify_percentage(ThresholdTolerance, "ThresholdTolerance"); status = status && verify_percentage(MinHeapFreeRatio, "MinHeapFreeRatio"); @@ -1910,7 +1936,7 @@ // Divide by bucket size to prevent a large size from causing rollover when // calculating amount of memory needed to be allocated for the String table. - status = status && verify_interval(StringTableSize, defaultStringTableSize, + status = status && verify_interval(StringTableSize, minimumStringTableSize, (max_uintx / StringTable::bucket_size()), "StringTable size"); if (MinHeapFreeRatio > MaxHeapFreeRatio) { @@ -1962,8 +1988,6 @@ FLAG_SET_DEFAULT(UseGCOverheadLimit, false); } - status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit"); - status = status && check_gc_consistency(); status = status && check_stack_pages(); @@ -2052,6 +2076,56 @@ "G1RefProcDrainInterval"); status = status && verify_min_value((intx)G1ConcMarkStepDurationMillis, 1, "G1ConcMarkStepDurationMillis"); + status = status && verify_interval(G1ConcRSHotCardLimit, 0, max_jubyte, + "G1ConcRSHotCardLimit"); + status = status && verify_interval(G1ConcRSLogCacheSize, 0, 31, + "G1ConcRSLogCacheSize"); + } + if (UseConcMarkSweepGC) { + status = status && verify_min_value(CMSOldPLABNumRefills, 1, "CMSOldPLABNumRefills"); + status = status && verify_min_value(CMSOldPLABToleranceFactor, 1, "CMSOldPLABToleranceFactor"); + status = status && verify_min_value(CMSOldPLABMax, 1, "CMSOldPLABMax"); + status = status && verify_interval(CMSOldPLABMin, 1, CMSOldPLABMax, "CMSOldPLABMin"); + + status = status && verify_min_value(CMSYoungGenPerWorker, 1, "CMSYoungGenPerWorker"); + + status = status && verify_min_value(CMSSamplingGrain, 1, "CMSSamplingGrain"); + status = status && verify_interval(CMS_SweepWeight, 0, 100, "CMS_SweepWeight"); + status = status && verify_interval(CMS_FLSWeight, 0, 100, "CMS_FLSWeight"); + + status = status && verify_interval(FLSCoalescePolicy, 0, 4, "FLSCoalescePolicy"); + + status = status && verify_min_value(CMSRescanMultiple, 1, "CMSRescanMultiple"); + status = status && verify_min_value(CMSConcMarkMultiple, 1, "CMSConcMarkMultiple"); + + status = status && verify_interval(CMSPrecleanIter, 0, 9, "CMSPrecleanIter"); + status = status && verify_min_value(CMSPrecleanDenominator, 1, "CMSPrecleanDenominator"); + status = status && verify_interval(CMSPrecleanNumerator, 0, CMSPrecleanDenominator - 1, "CMSPrecleanNumerator"); + + status = status && verify_percentage(CMSBootstrapOccupancy, "CMSBootstrapOccupancy"); + + status = status && verify_min_value(CMSPrecleanThreshold, 100, "CMSPrecleanThreshold"); + + status = status && verify_percentage(CMSScheduleRemarkEdenPenetration, "CMSScheduleRemarkEdenPenetration"); + status = status && verify_min_value(CMSScheduleRemarkSamplingRatio, 1, "CMSScheduleRemarkSamplingRatio"); + status = status && verify_min_value(CMSBitMapYieldQuantum, 1, "CMSBitMapYieldQuantum"); + status = status && verify_percentage(CMSTriggerRatio, "CMSTriggerRatio"); + status = status && verify_percentage(CMSIsTooFullPercentage, "CMSIsTooFullPercentage"); + } + + if (UseParallelGC || UseParallelOldGC) { + status = status && verify_interval(ParallelOldDeadWoodLimiterMean, 0, 100, "ParallelOldDeadWoodLimiterMean"); + status = status && verify_interval(ParallelOldDeadWoodLimiterStdDev, 0, 100, "ParallelOldDeadWoodLimiterStdDev"); + + status = status && verify_percentage(YoungGenerationSizeIncrement, "YoungGenerationSizeIncrement"); + status = status && verify_percentage(TenuredGenerationSizeIncrement, "TenuredGenerationSizeIncrement"); + + status = status && verify_min_value(YoungGenerationSizeSupplementDecay, 1, "YoungGenerationSizeSupplementDecay"); + status = status && verify_min_value(TenuredGenerationSizeSupplementDecay, 1, "TenuredGenerationSizeSupplementDecay"); + + status = status && verify_min_value(ParGCCardsPerStrideChunk, 1, "ParGCCardsPerStrideChunk"); + + status = status && verify_min_value(ParallelOldGCSplitInterval, 0, "ParallelOldGCSplitInterval"); } #endif // INCLUDE_ALL_GCS @@ -2072,7 +2146,42 @@ status = status && verify_interval(MarkStackSizeMax, 1, (max_jint - 1), "MarkStackSizeMax"); - + status = status && verify_interval(NUMAChunkResizeWeight, 0, 100, "NUMAChunkResizeWeight"); + + status = status && verify_min_value(LogEventsBufferEntries, 1, "LogEventsBufferEntries"); + + status = status && verify_min_value(HeapSizePerGCThread, (uintx) os::vm_page_size(), "HeapSizePerGCThread"); + + status = status && verify_min_value(GCTaskTimeStampEntries, 1, "GCTaskTimeStampEntries"); + + status = status && verify_percentage(ParallelGCBufferWastePct, "ParallelGCBufferWastePct"); + status = status && verify_interval(TargetPLABWastePct, 1, 100, "TargetPLABWastePct"); + + status = status && verify_min_value(ParGCStridesPerThread, 1, "ParGCStridesPerThread"); + + status = status && verify_min_value(MinRAMFraction, 1, "MinRAMFraction"); + status = status && verify_min_value(InitialRAMFraction, 1, "InitialRAMFraction"); + status = status && verify_min_value(MaxRAMFraction, 1, "MaxRAMFraction"); + status = status && verify_min_value(DefaultMaxRAMFraction, 1, "DefaultMaxRAMFraction"); + + status = status && verify_interval(AdaptiveTimeWeight, 0, 100, "AdaptiveTimeWeight"); + status = status && verify_min_value(AdaptiveSizeDecrementScaleFactor, 1, "AdaptiveSizeDecrementScaleFactor"); + + status = status && verify_interval(TLABAllocationWeight, 0, 100, "TLABAllocationWeight"); + status = status && verify_min_value(MinTLABSize, 1, "MinTLABSize"); + status = status && verify_min_value(TLABRefillWasteFraction, 1, "TLABRefillWasteFraction"); + + status = status && verify_percentage(YoungGenerationSizeSupplement, "YoungGenerationSizeSupplement"); + status = status && verify_percentage(TenuredGenerationSizeSupplement, "TenuredGenerationSizeSupplement"); + + // the "age" field in the oop header is 4 bits; do not want to pull in markOop.hpp + // just for that, so hardcode here. + status = status && verify_interval(MaxTenuringThreshold, 0, 15, "MaxTenuringThreshold"); + status = status && verify_interval(InitialTenuringThreshold, 0, MaxTenuringThreshold, "MaxTenuringThreshold"); + status = status && verify_percentage(TargetSurvivorRatio, "TargetSurvivorRatio"); + status = status && verify_percentage(MarkSweepDeadRatio, "MarkSweepDeadRatio"); + + status = status && verify_min_value(MarkSweepAlwaysCompactCount, 1, "MarkSweepAlwaysCompactCount"); #ifdef SPARC if (UseConcMarkSweepGC || UseG1GC) { // Issue a stern warning if the user has explicitly set @@ -2110,38 +2219,6 @@ FLAG_SET_CMDLINE(bool, UseCompressedOops, false); } - if (UseCompressedKlassPointers) { - if (IgnoreUnrecognizedVMOptions) { - warning("UseCompressedKlassPointers is disabled, because it is not supported by Graal"); - FLAG_SET_CMDLINE(bool, UseCompressedKlassPointers, false); - } else { - jio_fprintf(defaultStream::error_stream(), - "UseCompressedKlassPointers are not supported in Graal at the moment\n"); - status = false; - } - } else { - // This prevents the flag being set to true by set_ergonomics_flags() - FLAG_SET_CMDLINE(bool, UseCompressedKlassPointers, false); - } - if (UseG1GC) { - if (IgnoreUnrecognizedVMOptions) { - warning("UseG1GC is still experimental in Graal, use SerialGC instead "); - FLAG_SET_CMDLINE(bool, UseG1GC, true); - } else { - warning("UseG1GC is still experimental in Graal, use SerialGC instead "); - status = true; - } - } else { - // This prevents the flag being set to true by set_ergonomics_flags() - FLAG_SET_CMDLINE(bool, UseG1GC, false); - } - - if (!ScavengeRootsInCode) { - warning("forcing ScavengeRootsInCode non-zero because Graal is enabled"); - ScavengeRootsInCode = 1; - } - -#endif return status; } @@ -2278,6 +2355,55 @@ return JNI_OK; } +// Checks if name in command-line argument -agent{lib,path}:name[=options] +// represents a valid HPROF of JDWP agent. is_path==true denotes that we +// are dealing with -agentpath (case where name is a path), otherwise with +// -agentlib +bool valid_hprof_or_jdwp_agent(char *name, bool is_path) { + char *_name; + const char *_hprof = "hprof", *_jdwp = "jdwp"; + size_t _len_hprof, _len_jdwp, _len_prefix; + + if (is_path) { + if ((_name = strrchr(name, (int) *os::file_separator())) == NULL) { + return false; + } + + _name++; // skip past last path separator + _len_prefix = strlen(JNI_LIB_PREFIX); + + if (strncmp(_name, JNI_LIB_PREFIX, _len_prefix) != 0) { + return false; + } + + _name += _len_prefix; + _len_hprof = strlen(_hprof); + _len_jdwp = strlen(_jdwp); + + if (strncmp(_name, _hprof, _len_hprof) == 0) { + _name += _len_hprof; + } + else if (strncmp(_name, _jdwp, _len_jdwp) == 0) { + _name += _len_jdwp; + } + else { + return false; + } + + if (strcmp(_name, JNI_LIB_SUFFIX) != 0) { + return false; + } + + return true; + } + + if (strcmp(name, _hprof) == 0 || strcmp(name, _jdwp) == 0) { + return true; + } + + return false; +} + jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, SysClassPath* scp_p, bool* scp_assembly_required_p, @@ -2376,7 +2502,7 @@ options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(pos + 1) + 1, mtInternal), pos + 1); } #if !INCLUDE_JVMTI - if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) { + if (valid_hprof_or_jdwp_agent(name, is_absolute_path)) { jio_fprintf(defaultStream::error_stream(), "Profiling and debugging agents are not supported in this VM\n"); return JNI_ERR; @@ -2431,7 +2557,8 @@ // -Xms } else if (match_option(option, "-Xms", &tail)) { julong long_initial_heap_size = 0; - ArgsRange errcode = parse_memory_size(tail, &long_initial_heap_size, 1); + // an initial heap size of 0 means automatically determine + ArgsRange errcode = parse_memory_size(tail, &long_initial_heap_size, 0); if (errcode != arg_in_range) { jio_fprintf(defaultStream::error_stream(), "Invalid initial heap size: %s\n", option->optionString); @@ -2442,7 +2569,7 @@ // Currently the minimum size and the initial heap sizes are the same. set_min_heap_size(InitialHeapSize); // -Xmx - } else if (match_option(option, "-Xmx", &tail)) { + } else if (match_option(option, "-Xmx", &tail) || match_option(option, "-XX:MaxHeapSize=", &tail)) { julong long_max_heap_size = 0; ArgsRange errcode = parse_memory_size(tail, &long_max_heap_size, 1); if (errcode != arg_in_range) { @@ -2494,16 +2621,23 @@ } else if (match_option(option, "-Xmaxjitcodesize", &tail) || match_option(option, "-XX:ReservedCodeCacheSize=", &tail)) { julong long_ReservedCodeCacheSize = 0; - ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize, - (size_t)InitialCodeCacheSize); + ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize, 1); if (errcode != arg_in_range) { jio_fprintf(defaultStream::error_stream(), - "Invalid maximum code cache size: %s. Should be greater than InitialCodeCacheSize=%dK\n", - option->optionString, InitialCodeCacheSize/K); - describe_range_error(errcode); + "Invalid maximum code cache size: %s.\n", option->optionString); return JNI_EINVAL; } FLAG_SET_CMDLINE(uintx, ReservedCodeCacheSize, (uintx)long_ReservedCodeCacheSize); + //-XX:IncreaseFirstTierCompileThresholdAt= + } else if (match_option(option, "-XX:IncreaseFirstTierCompileThresholdAt=", &tail)) { + uintx uint_IncreaseFirstTierCompileThresholdAt = 0; + if (!parse_uintx(tail, &uint_IncreaseFirstTierCompileThresholdAt, 0) || uint_IncreaseFirstTierCompileThresholdAt > 99) { + jio_fprintf(defaultStream::error_stream(), + "Invalid value for IncreaseFirstTierCompileThresholdAt: %s. Should be between 0 and 99.\n", + option->optionString); + return JNI_EINVAL; + } + FLAG_SET_CMDLINE(uintx, IncreaseFirstTierCompileThresholdAt, (uintx)uint_IncreaseFirstTierCompileThresholdAt); // -green } else if (match_option(option, "-green", &tail)) { jio_fprintf(defaultStream::error_stream(), @@ -2968,6 +3102,11 @@ set_mode_flags(_int); } + // eventually fix up InitialTenuringThreshold if only MaxTenuringThreshold is set + if (FLAG_IS_DEFAULT(InitialTenuringThreshold) && (InitialTenuringThreshold > MaxTenuringThreshold)) { + FLAG_SET_ERGO(uintx, InitialTenuringThreshold, MaxTenuringThreshold); + } + #ifndef COMPILER2 // Don't degrade server performance for footprint if (FLAG_IS_DEFAULT(UseLargePages) && @@ -3100,36 +3239,27 @@ } void Arguments::set_shared_spaces_flags() { - const bool must_share = DumpSharedSpaces || RequireSharedSpaces; - const bool might_share = must_share || UseSharedSpaces; - - // CompressedOops cannot be used with CDS. The offsets of oopmaps and - // static fields are incorrect in the archive. With some more clever - // initialization, this restriction can probably be lifted. - // ??? UseLargePages might be okay now - const bool cannot_share = UseCompressedOops || - (UseLargePages && FLAG_IS_CMDLINE(UseLargePages)); - if (cannot_share) { - if (must_share) { - warning("disabling large pages %s" - "because of %s", "" LP64_ONLY("and compressed oops "), - DumpSharedSpaces ? "-Xshare:dump" : "-Xshare:on"); - FLAG_SET_CMDLINE(bool, UseLargePages, false); - LP64_ONLY(FLAG_SET_CMDLINE(bool, UseCompressedOops, false)); - LP64_ONLY(FLAG_SET_CMDLINE(bool, UseCompressedKlassPointers, false)); - } else { - // Prefer compressed oops and large pages to class data sharing - if (UseSharedSpaces && Verbose) { - warning("turning off use of shared archive because of large pages%s", - "" LP64_ONLY(" and/or compressed oops")); +#ifdef _LP64 + const bool must_share = DumpSharedSpaces || RequireSharedSpaces; + + // CompressedOops cannot be used with CDS. The offsets of oopmaps and + // static fields are incorrect in the archive. With some more clever + // initialization, this restriction can probably be lifted. + if (UseCompressedOops) { + if (must_share) { + warning("disabling compressed oops because of %s", + DumpSharedSpaces ? "-Xshare:dump" : "-Xshare:on"); + FLAG_SET_CMDLINE(bool, UseCompressedOops, false); + FLAG_SET_CMDLINE(bool, UseCompressedKlassPointers, false); + } else { + // Prefer compressed oops to class data sharing + if (UseSharedSpaces && Verbose) { + warning("turning off use of shared archive because of compressed oops"); + } + no_shared_spaces(); } - no_shared_spaces(); } - } else if (UseLargePages && might_share) { - // Disable large pages to allow shared spaces. This is sub-optimal, since - // there may not even be a shared archive to use. - FLAG_SET_DEFAULT(UseLargePages, false); - } +#endif if (DumpSharedSpaces) { if (RequireSharedSpaces) { @@ -3174,25 +3304,37 @@ } #endif // INCLUDE_ALL_GCS +// Sharing support +// Construct the path to the archive +static char* get_shared_archive_path() { + char *shared_archive_path; + if (SharedArchiveFile == NULL) { + char jvm_path[JVM_MAXPATHLEN]; + os::jvm_path(jvm_path, sizeof(jvm_path)); + char *end = strrchr(jvm_path, *os::file_separator()); + if (end != NULL) *end = '\0'; + size_t jvm_path_len = strlen(jvm_path); + size_t file_sep_len = strlen(os::file_separator()); + shared_archive_path = NEW_C_HEAP_ARRAY(char, jvm_path_len + + file_sep_len + 20, mtInternal); + if (shared_archive_path != NULL) { + strncpy(shared_archive_path, jvm_path, jvm_path_len + 1); + strncat(shared_archive_path, os::file_separator(), file_sep_len); + strncat(shared_archive_path, "classes.jsa", 11); + } + } else { + shared_archive_path = NEW_C_HEAP_ARRAY(char, strlen(SharedArchiveFile) + 1, mtInternal); + if (shared_archive_path != NULL) { + strncpy(shared_archive_path, SharedArchiveFile, strlen(SharedArchiveFile) + 1); + } + } + return shared_archive_path; +} + // Parse entry point called from JNI_CreateJavaVM jint Arguments::parse(const JavaVMInitArgs* args) { - // Sharing support - // Construct the path to the archive - char jvm_path[JVM_MAXPATHLEN]; - os::jvm_path(jvm_path, sizeof(jvm_path)); - char *end = strrchr(jvm_path, *os::file_separator()); - if (end != NULL) *end = '\0'; - char *shared_archive_path = NEW_C_HEAP_ARRAY(char, strlen(jvm_path) + - strlen(os::file_separator()) + 20, mtInternal); - if (shared_archive_path == NULL) return JNI_ENOMEM; - strcpy(shared_archive_path, jvm_path); - strcat(shared_archive_path, os::file_separator()); - strcat(shared_archive_path, "classes"); - strcat(shared_archive_path, ".jsa"); - SharedArchivePath = shared_archive_path; - // Remaining part of option string const char* tail; @@ -3283,6 +3425,12 @@ return result; } + // Call get_shared_archive_path() here, after possible SharedArchiveFile option got parsed. + SharedArchivePath = get_shared_archive_path(); + if (SharedArchivePath == NULL) { + return JNI_ENOMEM; + } + // Delay warning until here so that we've had a chance to process // the -XX:-PrintWarnings flag if (needs_hotspotrc_warning) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/compilationPolicy.cpp --- a/src/share/vm/runtime/compilationPolicy.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/compilationPolicy.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -116,6 +116,9 @@ // Returns true if m is allowed to be compiled bool CompilationPolicy::can_be_compiled(methodHandle m, int comp_level) { + // allow any levels for WhiteBox + assert(WhiteBoxAPI || comp_level == CompLevel_all || is_compile(comp_level), "illegal compilation level"); + if (m->is_abstract()) return false; if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false; @@ -129,7 +132,13 @@ return false; } if (comp_level == CompLevel_all) { - return !m->is_not_compilable(CompLevel_simple) && !m->is_not_compilable(CompLevel_full_optimization); + if (TieredCompilation) { + // enough to be compilable at any level for tiered + return !m->is_not_compilable(CompLevel_simple) || !m->is_not_compilable(CompLevel_full_optimization); + } else { + // must be compilable at available level for non-tiered + return !m->is_not_compilable(CompLevel_highest_tier); + } } else if (is_compile(comp_level)) { return !m->is_not_compilable(comp_level); } @@ -205,8 +214,10 @@ // BUT also make sure the method doesn't look like it was never executed. // Set carry bit and reduce counter's value to min(count, CompileThreshold/2). - m->invocation_counter()->set_carry(); - m->backedge_counter()->set_carry(); + MethodCounters* mcs = m->method_counters(); + assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); + mcs->invocation_counter()->set_carry(); + mcs->backedge_counter()->set_carry(); assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed"); } @@ -214,8 +225,10 @@ void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) { // Delay next back-branch event but pump up invocation counter to triger // whole method compilation. - InvocationCounter* i = m->invocation_counter(); - InvocationCounter* b = m->backedge_counter(); + MethodCounters* mcs = m->method_counters(); + assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); + InvocationCounter* i = mcs->invocation_counter(); + InvocationCounter* b = mcs->backedge_counter(); // Don't set invocation_counter's value too low otherwise the method will // look like immature (ic < ~5300) which prevents the inlining based on @@ -234,7 +247,10 @@ class CounterDecay : public AllStatic { static jlong _last_timestamp; static void do_method(Method* m) { - m->invocation_counter()->decay(); + MethodCounters* mcs = m->method_counters(); + if (mcs != NULL) { + mcs->invocation_counter()->decay(); + } } public: static void decay(); @@ -272,30 +288,44 @@ void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { ScopeDesc* sd = trap_scope; + MethodCounters* mcs; + InvocationCounter* c; for (; !sd->is_top(); sd = sd->sender()) { - // Reset ICs of inlined methods, since they can trigger compilations also. - sd->method()->invocation_counter()->reset(); + mcs = sd->method()->method_counters(); + if (mcs != NULL) { + // Reset ICs of inlined methods, since they can trigger compilations also. + mcs->invocation_counter()->reset(); + } } - InvocationCounter* c = sd->method()->invocation_counter(); - if (is_osr) { - // It was an OSR method, so bump the count higher. - c->set(c->state(), CompileThreshold); - } else { - c->reset(); + mcs = sd->method()->method_counters(); + if (mcs != NULL) { + c = mcs->invocation_counter(); + if (is_osr) { + // It was an OSR method, so bump the count higher. + c->set(c->state(), CompileThreshold); + } else { + c->reset(); + } + mcs->backedge_counter()->reset(); } - sd->method()->backedge_counter()->reset(); } // This method can be called by any component of the runtime to notify the policy // that it's recommended to delay the complation of this method. void NonTieredCompPolicy::delay_compilation(Method* method) { - method->invocation_counter()->decay(); - method->backedge_counter()->decay(); + MethodCounters* mcs = method->method_counters(); + if (mcs != NULL) { + mcs->invocation_counter()->decay(); + mcs->backedge_counter()->decay(); + } } void NonTieredCompPolicy::disable_compilation(Method* method) { - method->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); - method->backedge_counter()->set_state(InvocationCounter::wait_for_nothing); + MethodCounters* mcs = method->method_counters(); + if (mcs != NULL) { + mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); + mcs->backedge_counter()->set_state(InvocationCounter::wait_for_nothing); + } } CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) { @@ -378,8 +408,10 @@ #ifndef PRODUCT void NonTieredCompPolicy::trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci) { if (TraceInvocationCounterOverflow) { - InvocationCounter* ic = m->invocation_counter(); - InvocationCounter* bc = m->backedge_counter(); + MethodCounters* mcs = m->method_counters(); + assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); + InvocationCounter* ic = mcs->invocation_counter(); + InvocationCounter* bc = mcs->backedge_counter(); ResourceMark rm; const char* msg = bci == InvocationEntryBci @@ -420,7 +452,7 @@ reset_counter_for_invocation_event(m); const char* comment = "count"; - if (is_compilation_enabled() && can_be_compiled(m)) { + if (is_compilation_enabled() && can_be_compiled(m, comp_level)) { nmethod* nm = m->code(); #ifdef GRAALVM if (m->queued_for_compilation()) { @@ -438,7 +470,7 @@ const int hot_count = m->backedge_count(); const char* comment = "backedge_count"; - if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m)) { + if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m, comp_level)) { CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread); NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));) } @@ -449,8 +481,9 @@ #ifdef GRAALVM void GraalCompPolicy::method_invocation_event(methodHandle m, JavaThread* thread) { + MethodCounters* mcs = m->method_counters(); int hot_count = m->invocation_count(); - jlong hot_time = m->graal_invocation_time(); + jlong hot_time = (mcs == NULL) ? 0 : mcs->graal_invocation_time(); reset_counter_for_invocation_event(m); if (is_compilation_enabled() && can_be_compiled(m)) { @@ -459,7 +492,9 @@ if (hot_count > 1) { jlong current_time = os::javaTimeNanos(); int time_per_call = (int) ((current_time - hot_time) / hot_count); - m->set_graal_invocation_time(current_time); + if (mcs != NULL) { + mcs->set_graal_invocation_time(current_time); + } if (UseNewCode) { if (m->queued_for_compilation()) { if (time_per_call < (m->graal_priority() / 5)) { @@ -525,7 +560,7 @@ reset_counter_for_invocation_event(m); const char* comment = "count"; - if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m)) { + if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m, comp_level)) { ResourceMark rm(thread); frame fr = thread->last_frame(); assert(fr.is_interpreted_frame(), "must be interpreted"); @@ -563,7 +598,7 @@ const int hot_count = m->backedge_count(); const char* comment = "backedge_count"; - if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m)) { + if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m, comp_level)) { CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread); NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));) } @@ -658,7 +693,7 @@ // If the caller method is too big or something then we do not want to // compile it just to inline a method - if (!can_be_compiled(next_m)) { + if (!can_be_compiled(next_m, CompLevel_any)) { msg = "caller cannot be compiled"; break; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/deoptimization.cpp --- a/src/share/vm/runtime/deoptimization.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/deoptimization.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -667,18 +667,22 @@ // at an uncommon trap for an invoke (where the compiler // generates debug info before the invoke has executed) Bytecodes::Code cur_code = str.next(); - if (cur_code == Bytecodes::_invokevirtual || - cur_code == Bytecodes::_invokespecial || - cur_code == Bytecodes::_invokestatic || - cur_code == Bytecodes::_invokeinterface) { + if (cur_code == Bytecodes::_invokevirtual || + cur_code == Bytecodes::_invokespecial || + cur_code == Bytecodes::_invokestatic || + cur_code == Bytecodes::_invokeinterface || + cur_code == Bytecodes::_invokedynamic) { Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci()); Symbol* signature = invoke.signature(); ArgumentSizeComputer asc(signature); cur_invoke_parameter_size = asc.size(); - if (cur_code != Bytecodes::_invokestatic) { + if (invoke.has_receiver()) { // Add in receiver ++cur_invoke_parameter_size; } + if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) { + callee_size_of_parameters++; + } } if (str.bci() < max_bci) { Bytecodes::Code bc = str.next(); @@ -693,6 +697,7 @@ case Bytecodes::_invokespecial: case Bytecodes::_invokestatic: case Bytecodes::_invokeinterface: + case Bytecodes::_invokedynamic: case Bytecodes::_athrow: break; default: { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/fprofiler.cpp --- a/src/share/vm/runtime/fprofiler.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/fprofiler.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -421,7 +421,8 @@ void print_method_on(outputStream* st) { ProfilerNode::print_method_on(st); - if (Verbose) method()->invocation_counter()->print_short(); + MethodCounters* mcs = method()->method_counters(); + if (Verbose && mcs != NULL) mcs->invocation_counter()->print_short(); } }; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/frame.cpp --- a/src/share/vm/runtime/frame.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/frame.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1008,6 +1008,7 @@ OopClosure* _f; int _offset; // the current offset, incremented with each argument bool _has_receiver; // true if the callee has a receiver + bool _has_appendix; // true if the call has an appendix frame _fr; RegisterMap* _reg_map; int _arg_size; @@ -1027,19 +1028,20 @@ } public: - CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, OopClosure* f, frame fr, const RegisterMap* reg_map) + CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map) : SignatureInfo(signature) { // initialize CompiledArgumentOopFinder _f = f; _offset = 0; _has_receiver = has_receiver; + _has_appendix = has_appendix; _fr = fr; _reg_map = (RegisterMap*)reg_map; - _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0); + _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0) + (has_appendix ? 1 : 0); int arg_size; - _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, &arg_size); + _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &arg_size); assert(arg_size == _arg_size, "wrong arg size"); } @@ -1049,12 +1051,16 @@ _offset++; } iterate_parameters(); + if (_has_appendix) { + handle_oop_offset(); + _offset++; + } } }; -void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f) { +void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f) { ResourceMark rm; - CompiledArgumentOopFinder finder(signature, has_receiver, f, *this, reg_map); + CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map); finder.oops_do(); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/frame.hpp --- a/src/share/vm/runtime/frame.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/frame.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -134,6 +134,7 @@ bool is_interpreted_frame() const; bool is_java_frame() const; bool is_entry_frame() const; // Java frame called from C? + bool is_stub_frame() const; bool is_ignored_frame() const; bool is_native_frame() const; bool is_runtime_frame() const; @@ -411,7 +412,7 @@ oop* oopmapreg_to_location(VMReg reg, const RegisterMap* regmap) const; // Oops-do's - void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f); + void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f); void oops_interpreted_do(OopClosure* f, CLDToOopClosure* cld_f, const RegisterMap* map, bool query_oop_map_cache = true); private: diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/frame.inline.hpp --- a/src/share/vm/runtime/frame.inline.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/frame.inline.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -79,6 +79,10 @@ return StubRoutines::returns_to_call_stub(pc()); } +inline bool frame::is_stub_frame() const { + return StubRoutines::is_stub_code(pc()) || (_cb != NULL && _cb->is_adapter_blob()); +} + inline bool frame::is_first_frame() const { return is_entry_frame() && entry_frame_is_first(); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/globals.hpp --- a/src/share/vm/runtime/globals.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/globals.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -300,12 +300,12 @@ }; -class IntFlagSetting { - intx val; - intx* flag; +class UIntFlagSetting { + uintx val; + uintx* flag; public: - IntFlagSetting(intx& fl, intx newValue) { flag = &fl; val = fl; fl = newValue; } - ~IntFlagSetting() { *flag = val; } + UIntFlagSetting(uintx& fl, uintx newValue) { flag = &fl; val = fl; fl = newValue; } + ~UIntFlagSetting() { *flag = val; } }; @@ -527,12 +527,12 @@ product(bool, ForceNUMA, false, \ "Force NUMA optimizations on single-node/UMA systems") \ \ - product(intx, NUMAChunkResizeWeight, 20, \ - "Percentage (0-100) used to weight the current sample when " \ + product(uintx, NUMAChunkResizeWeight, 20, \ + "Percentage (0-100) used to weigh the current sample when " \ "computing exponentially decaying average for " \ "AdaptiveNUMAChunkSizing") \ \ - product(intx, NUMASpaceResizeRate, 1*G, \ + product(uintx, NUMASpaceResizeRate, 1*G, \ "Do not reallocate more that this amount per collection") \ \ product(bool, UseAdaptiveNUMAChunkSizing, true, \ @@ -541,7 +541,7 @@ product(bool, NUMAStats, false, \ "Print NUMA stats in detailed heap information") \ \ - product(intx, NUMAPageScanRate, 256, \ + product(uintx, NUMAPageScanRate, 256, \ "Maximum number of pages to include in the page scan procedure") \ \ product_pd(bool, NeedsDeoptSuspend, \ @@ -729,7 +729,7 @@ diagnostic(bool, LogEvents, true, \ "Enable the various ring buffer event logs") \ \ - diagnostic(intx, LogEventsBufferEntries, 10, \ + diagnostic(uintx, LogEventsBufferEntries, 10, \ "Enable the various ring buffer event logs") \ \ product(bool, BytecodeVerificationRemote, true, \ @@ -1176,9 +1176,6 @@ product(bool, CompactFields, true, \ "Allocate nonstatic fields in gaps between previous fields") \ \ - notproduct(bool, PrintCompactFieldsSavings, false, \ - "Print how many words were saved with CompactFields") \ - \ notproduct(bool, PrintFieldLayout, false, \ "Print field layout for each class") \ \ @@ -1449,16 +1446,17 @@ product(bool, ParallelGCVerbose, false, \ "Verbose output for parallel GC.") \ \ - product(intx, ParallelGCBufferWastePct, 10, \ - "wasted fraction of parallel allocation buffer.") \ + product(uintx, ParallelGCBufferWastePct, 10, \ + "Wasted fraction of parallel allocation buffer.") \ \ diagnostic(bool, ParallelGCRetainPLAB, false, \ "Retain parallel allocation buffers across scavenges; " \ " -- disabled because this currently conflicts with " \ " parallel card scanning under certain conditions ") \ \ - product(intx, TargetPLABWastePct, 10, \ - "target wasted space in last buffer as pct of overall allocation")\ + product(uintx, TargetPLABWastePct, 10, \ + "Target wasted space in last buffer as percent of overall " \ + "allocation") \ \ product(uintx, PLABWeight, 75, \ "Percentage (0-100) used to weight the current sample when" \ @@ -1536,7 +1534,7 @@ product(bool, AlwaysPreTouch, false, \ "It forces all freshly committed pages to be pre-touched.") \ \ - product_pd(intx, CMSYoungGenPerWorker, \ + product_pd(uintx, CMSYoungGenPerWorker, \ "The maximum size of young gen chosen by default per GC worker " \ "thread available") \ \ @@ -1854,7 +1852,7 @@ product(bool, UseCMSInitiatingOccupancyOnly, false, \ "Only use occupancy as a crierion for starting a CMS collection") \ \ - product(intx, CMSIsTooFullPercentage, 98, \ + product(uintx, CMSIsTooFullPercentage, 98, \ "An absolute ceiling above which CMS will always consider the " \ "unloading of classes when class unloading is enabled") \ \ @@ -1893,7 +1891,7 @@ develop(uintx, PromotionFailureALotInterval, 5, \ "Total collections between promotion failures alot") \ \ - experimental(intx, WorkStealingSleepMillis, 1, \ + experimental(uintx, WorkStealingSleepMillis, 1, \ "Sleep time when sleep is used for yields") \ \ experimental(uintx, WorkStealingYieldsBeforeSleep, 5000, \ @@ -2037,7 +2035,7 @@ "Number of collections before the adaptive sizing is started") \ \ product(uintx, AdaptiveSizePolicyOutputInterval, 0, \ - "Collecton interval for printing information; zero => never") \ + "Collection interval for printing information; zero means never") \ \ product(bool, UseAdaptiveSizePolicyFootprintGoal, true, \ "Use adaptive minimum footprint as a goal") \ @@ -2140,6 +2138,9 @@ product(intx, PrefetchFieldsAhead, -1, \ "How many fields ahead to prefetch in oop scan (<= 0 means off)") \ \ + diagnostic(bool, VerifySilently, false, \ + "Don't print print the verification progress") \ + \ diagnostic(bool, VerifyDuringStartup, false, \ "Verify memory system before executing any Java code " \ "during VM initialization") \ @@ -2327,6 +2328,10 @@ "Print diagnostic message when GC is stalled" \ "by JNI critical section") \ \ + experimental(double, ObjectCountCutOffPercent, 0.5, \ + "The percentage of the used heap that the instances of a class " \ + "must occupy for the class to generate a trace event.") \ + \ /* GC log rotation setting */ \ \ product(bool, UseGCLogFileRotation, false, \ @@ -2988,7 +2993,7 @@ \ /* gc parameters */ \ product(uintx, InitialHeapSize, 0, \ - "Initial heap size (in bytes); zero means OldSize + NewSize") \ + "Initial heap size (in bytes); zero means use ergonomics") \ \ product(uintx, MaxHeapSize, ScaleForWordSize(96*M), \ "Maximum heap size (in bytes)") \ @@ -3069,7 +3074,7 @@ product(uintx, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \ "Max expansion of Metaspace without full GC (in bytes)") \ \ - product(intx, QueuedAllocationWarningCount, 0, \ + product(uintx, QueuedAllocationWarningCount, 0, \ "Number of times an allocation that queues behind a GC " \ "will retry before printing a warning") \ \ @@ -3097,7 +3102,7 @@ "either completely full or completely empty. Par compact also" \ "has a smaller default value; see arguments.cpp.") \ \ - product(intx, MarkSweepAlwaysCompactCount, 4, \ + product(uintx, MarkSweepAlwaysCompactCount, 4, \ "How often should we fully compact the heap (ignoring the dead " \ "space parameters)") \ \ @@ -3202,6 +3207,9 @@ product(uintx, CodeCacheFlushingMinimumFreeSpace, 1500*K, \ "When less than X space left, start code cache cleaning") \ \ + product(uintx, CodeCacheFlushingFraction, 2, \ + "Fraction of the code cache that is flushed when full") \ + \ /* interpreter debugging */ \ develop(intx, BinarySwitchThreshold, 5, \ "Minimal number of lookupswitch entries for rewriting to binary " \ @@ -3246,8 +3254,9 @@ develop(bool, ReplayCompiles, false, \ "Enable replay of compilations from ReplayDataFile") \ \ - develop(ccstr, ReplayDataFile, "replay.txt", \ - "file containing compilation replay information") \ + product(ccstr, ReplayDataFile, NULL, \ + "File containing compilation replay information" \ + "[default: ./replay_pid%p.log] (%p replaced with pid)") \ \ develop(intx, ReplaySuppressInitializers, 2, \ "Controls handling of class initialization during replay" \ @@ -3260,8 +3269,8 @@ develop(bool, ReplayIgnoreInitErrors, false, \ "Ignore exceptions thrown during initialization for replay") \ \ - develop(bool, DumpReplayDataOnError, true, \ - "record replay data for crashing compiler threads") \ + product(bool, DumpReplayDataOnError, true, \ + "Record replay data for crashing compiler threads") \ \ product(bool, CICompilerCountPerCPU, false, \ "1 compiler thread for log(N CPUs)") \ @@ -3270,7 +3279,9 @@ "Fire OutOfMemoryErrors throughout CI for testing the compiler " \ "(non-negative value throws OOM after this many CI accesses " \ "in each compile)") \ - \ + notproduct(intx, CICrashAt, -1, \ + "id of compilation to trigger assert in compiler thread for " \ + "the purpose of testing, e.g. generation of replay data") \ notproduct(bool, CIObjectFactoryVerify, false, \ "enable potentially expensive verification in ciObjectFactory") \ \ @@ -3452,6 +3463,10 @@ "Start profiling in interpreter if the counters exceed tier 3" \ "thresholds by the specified percentage") \ \ + product(uintx, IncreaseFirstTierCompileThresholdAt, 50, \ + "Increase the compile threshold for C1 compilation if the code" \ + "cache is filled by the specified percentage.") \ + \ product(intx, TieredRateUpdateMinTime, 1, \ "Minimum rate sampling interval (in milliseconds)") \ \ @@ -3694,13 +3709,19 @@ product(bool , AllowNonVirtualCalls, false, \ "Obey the ACC_SUPER flag and allow invokenonvirtual calls") \ \ - product(bool, TraceWarpLoading, false, \ - "trace external GPU warp loading") \ + diagnostic(ccstr, SharedArchiveFile, NULL, \ + "Override the default location of the CDS archive file") \ \ experimental(uintx, ArrayAllocatorMallocLimit, \ SOLARIS_ONLY(64*K) NOT_SOLARIS(max_uintx), \ "Allocation less than this value will be allocated " \ - "using malloc. Larger allocations will use mmap.") + "using malloc. Larger allocations will use mmap.") \ + \ + product(bool, EnableTracing, false, \ + "Enable event-based tracing") \ + product(bool, UseLockedTracing, false, \ + "Use locked-tracing when doing event-based tracing") + /* * Macros for factoring of globals diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/handles.cpp --- a/src/share/vm/runtime/handles.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/handles.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -179,6 +179,22 @@ _thread->set_last_handle_mark(previous_handle_mark()); } +void* HandleMark::operator new(size_t size) { + return AllocateHeap(size, mtThread); +} + +void* HandleMark::operator new [] (size_t size) { + return AllocateHeap(size, mtThread); +} + +void HandleMark::operator delete(void* p) { + FreeHeap(p, mtThread); +} + +void HandleMark::operator delete[](void* p) { + FreeHeap(p, mtThread); +} + #ifdef ASSERT NoHandleMark::NoHandleMark() { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/handles.hpp --- a/src/share/vm/runtime/handles.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/handles.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -281,7 +281,7 @@ // across the HandleMark boundary. // The base class of HandleMark should have been StackObj but we also heap allocate -// a HandleMark when a thread is created. +// a HandleMark when a thread is created. The operator new is for this special case. class HandleMark { private: @@ -308,6 +308,11 @@ void push(); // called in the destructor of HandleMarkCleaner void pop_and_restore(); + // overloaded operators + void* operator new(size_t size); + void* operator new [](size_t size); + void operator delete(void* p); + void operator delete[](void* p); }; //------------------------------------------------------------------------------------------------------------------------ diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/java.cpp --- a/src/share/vm/runtime/java.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/java.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,7 +63,6 @@ #include "services/memReporter.hpp" #include "services/memTracker.hpp" #include "trace/tracing.hpp" -#include "trace/traceEventTypes.hpp" #include "utilities/dtrace.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/histogram.hpp" @@ -540,9 +539,12 @@ JvmtiExport::post_thread_end(thread); } - EVENT_BEGIN(TraceEventThreadEnd, event); - EVENT_COMMIT(event, - EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj()))); + + EventThreadEnd event; + if (event.should_commit()) { + event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj())); + event.commit(); + } // Always call even when there are not JVMTI environments yet, since environments // may be attached late and JVMTI must track phases of VM execution diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/jniHandles.cpp --- a/src/share/vm/runtime/jniHandles.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/jniHandles.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -188,7 +188,6 @@ class AlwaysAliveClosure: public BoolObjectClosure { public: bool do_object_b(oop obj) { return true; } - void do_object(oop obj) { assert(false, "Don't call"); } }; class CountHandleClosure: public OopClosure { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/mutexLocker.cpp --- a/src/share/vm/runtime/mutexLocker.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/mutexLocker.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,6 +46,7 @@ Mutex* JNIGlobalHandle_lock = NULL; Mutex* JNIHandleBlockFreeList_lock = NULL; Mutex* JNICachedItableIndex_lock = NULL; +Mutex* MemberNameTable_lock = NULL; Mutex* JmethodIdCreation_lock = NULL; Mutex* JfieldIdCreation_lock = NULL; Monitor* JNICritical_lock = NULL; @@ -256,6 +257,7 @@ def(Heap_lock , Monitor, nonleaf+1, false); def(JfieldIdCreation_lock , Mutex , nonleaf+1, true ); // jfieldID, Used in VM_Operation def(JNICachedItableIndex_lock , Mutex , nonleaf+1, false); // Used to cache an itable index during JNI invoke + def(MemberNameTable_lock , Mutex , nonleaf+1, false); // Used to protect MemberNameTable def(CompiledIC_lock , Mutex , nonleaf+2, false); // locks VtableStubs_lock, InlineCacheBuffer_lock def(CompileTaskAlloc_lock , Mutex , nonleaf+2, true ); @@ -272,13 +274,12 @@ def(MethodCompileQueue_lock , Monitor, nonleaf+4, true ); def(Debug2_lock , Mutex , nonleaf+4, true ); def(Debug3_lock , Mutex , nonleaf+4, true ); - def(ProfileVM_lock , Monitor, nonleaf+4, false); // used for profiling of the VMThread + def(ProfileVM_lock , Monitor, special, false); // used for profiling of the VMThread def(CompileThread_lock , Monitor, nonleaf+5, false ); - def(JfrQuery_lock , Monitor, nonleaf, true); // JFR locks, keep these in consecutive order - def(JfrMsg_lock , Monitor, nonleaf+2, true); - def(JfrBuffer_lock , Mutex, nonleaf+3, true); - def(JfrStream_lock , Mutex, nonleaf+4, true); + def(JfrMsg_lock , Monitor, leaf, true); + def(JfrBuffer_lock , Mutex, nonleaf+1, true); + def(JfrStream_lock , Mutex, nonleaf+2, true); def(PeriodicTask_lock , Monitor, nonleaf+5, true); #ifdef GRAAL def(GraalDeoptLeafGraphIds_lock , Mutex, special, true); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/mutexLocker.hpp --- a/src/share/vm/runtime/mutexLocker.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/mutexLocker.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -51,6 +51,7 @@ extern Mutex* JNIGlobalHandle_lock; // a lock on creating JNI global handles extern Mutex* JNIHandleBlockFreeList_lock; // a lock on the JNI handle block free list extern Mutex* JNICachedItableIndex_lock; // a lock on caching an itable index during JNI invoke +extern Mutex* MemberNameTable_lock; // a lock on the MemberNameTable updates extern Mutex* JmethodIdCreation_lock; // a lock on creating JNI method identifiers extern Mutex* JfieldIdCreation_lock; // a lock on creating JNI static field identifiers extern Monitor* JNICritical_lock; // a lock used while entering and exiting JNI critical regions, allows GC to sometimes get in diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/objectMonitor.cpp --- a/src/share/vm/runtime/objectMonitor.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/objectMonitor.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -36,7 +36,10 @@ #include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" #include "services/threadService.hpp" +#include "trace/tracing.hpp" +#include "trace/traceMacros.hpp" #include "utilities/dtrace.hpp" +#include "utilities/macros.hpp" #include "utilities/preserveException.hpp" #ifdef TARGET_OS_FAMILY_linux # include "os_linux.inline.hpp" @@ -371,6 +374,8 @@ // Ensure the object-monitor relationship remains stable while there's contention. Atomic::inc_ptr(&_count); + EventJavaMonitorEnter event; + { // Change java thread status to indicate blocked on monitor enter. JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this); @@ -402,7 +407,7 @@ // _recursions = 0 ; _succ = NULL ; - exit (Self) ; + exit (false, Self) ; jt->java_suspend_self(); } @@ -435,6 +440,14 @@ if (JvmtiExport::should_post_monitor_contended_entered()) { JvmtiExport::post_monitor_contended_entered(jt, this); } + + if (event.should_commit()) { + event.set_klass(((oop)this->object())->klass()); + event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid); + event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr())); + event.commit(); + } + if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) { ObjectMonitor::_sync_ContendedLockAttempts->inc() ; } @@ -917,7 +930,7 @@ // Both impinge on OS scalability. Given that, at most one thread parked on // a monitor will use a timer. -void ATTR ObjectMonitor::exit(TRAPS) { +void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) { Thread * Self = THREAD ; if (THREAD != _owner) { if (THREAD->is_lock_owned((address) _owner)) { @@ -954,6 +967,14 @@ _Responsible = NULL ; } +#if INCLUDE_TRACE + // get the owner's thread id for the MonitorEnter event + // if it is enabled and the thread isn't suspended + if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) { + _previous_owner_tid = SharedRuntime::get_java_tid(Self); + } +#endif + for (;;) { assert (THREAD == _owner, "invariant") ; @@ -1343,7 +1364,7 @@ guarantee(Self == _owner, "complete_exit not owner"); intptr_t save = _recursions; // record the old recursion count _recursions = 0; // set the recursion level to be 0 - exit (Self) ; // exit the monitor + exit (true, Self) ; // exit the monitor guarantee (_owner != Self, "invariant"); return save; } @@ -1397,6 +1418,20 @@ for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ; return v ; } + +// helper method for posting a monitor wait event +void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event, + jlong notifier_tid, + jlong timeout, + bool timedout) { + event->set_klass(((oop)this->object())->klass()); + event->set_timeout((TYPE_ULONG)timeout); + event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr())); + event->set_notifier((TYPE_OSTHREAD)notifier_tid); + event->set_timedOut((TYPE_BOOLEAN)timedout); + event->commit(); +} + // ----------------------------------------------------------------------------- // Wait/Notify/NotifyAll // @@ -1412,6 +1447,8 @@ // Throw IMSX or IEX. CHECK_OWNER(); + EventJavaMonitorWait event; + // check for a pending interrupt if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { // post monitor waited event. Note that this is past-tense, we are done waiting. @@ -1420,10 +1457,14 @@ // wait was not timed out due to thread interrupt. JvmtiExport::post_monitor_waited(jt, this, false); } + if (event.should_commit()) { + post_monitor_wait_event(&event, 0, millis, false); + } TEVENT (Wait - Throw IEX) ; THROW(vmSymbols::java_lang_InterruptedException()); return ; } + TEVENT (Wait) ; assert (Self->_Stalled == 0, "invariant") ; @@ -1455,7 +1496,7 @@ intptr_t save = _recursions; // record the old recursion count _waiters++; // increment the number of waiters _recursions = 0; // set the recursion level to be 1 - exit (Self) ; // exit the monitor + exit (true, Self) ; // exit the monitor guarantee (_owner != Self, "invariant") ; // As soon as the ObjectMonitor's ownership is dropped in the exit() @@ -1555,6 +1596,11 @@ if (JvmtiExport::should_post_monitor_waited()) { JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT); } + + if (event.should_commit()) { + post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT); + } + OrderAccess::fence() ; assert (Self->_Stalled != 0, "invariant") ; @@ -1634,6 +1680,8 @@ iterator->TState = ObjectWaiter::TS_ENTER ; } iterator->_notified = 1 ; + Thread * Self = THREAD; + iterator->_notifier_tid = Self->osthread()->thread_id(); ObjectWaiter * List = _EntryList ; if (List != NULL) { @@ -1758,6 +1806,8 @@ guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ; guarantee (iterator->_notified == 0, "invariant") ; iterator->_notified = 1 ; + Thread * Self = THREAD; + iterator->_notifier_tid = Self->osthread()->thread_id(); if (Policy != 4) { iterator->TState = ObjectWaiter::TS_ENTER ; } @@ -2404,7 +2454,7 @@ size_t sz = strlen (SyncKnobs) ; char * knobs = (char *) malloc (sz + 2) ; if (knobs == NULL) { - vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ; + vm_exit_out_of_memory (sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs") ; guarantee (0, "invariant") ; } strcpy (knobs, SyncKnobs) ; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/objectMonitor.hpp --- a/src/share/vm/runtime/objectMonitor.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/objectMonitor.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,6 @@ #include "runtime/park.hpp" #include "runtime/perfData.hpp" - // ObjectWaiter serves as a "proxy" or surrogate thread. // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific // ParkEvent instead. Beware, however, that the JVMTI code @@ -43,6 +42,7 @@ ObjectWaiter * volatile _next; ObjectWaiter * volatile _prev; Thread* _thread; + jlong _notifier_tid; ParkEvent * _event; volatile int _notified ; volatile TStates TState ; @@ -55,6 +55,9 @@ void wait_reenter_end(ObjectMonitor *mon); }; +// forward declaration to avoid include tracing.hpp +class EventJavaMonitorWait; + // WARNING: // This is a very sensitive and fragile class. DO NOT make any // change unless you are fully aware of the underlying semantics. @@ -151,6 +154,7 @@ _SpinFreq = 0 ; _SpinClock = 0 ; OwnerIsThread = 0 ; + _previous_owner_tid = 0; } ~ObjectMonitor() { @@ -192,7 +196,7 @@ bool try_enter (TRAPS) ; void enter(TRAPS); - void exit(TRAPS); + void exit(bool not_suspended, TRAPS); void wait(jlong millis, bool interruptable, TRAPS); void notify(TRAPS); void notifyAll(TRAPS); @@ -218,6 +222,10 @@ void ctAsserts () ; void ExitEpilog (Thread * Self, ObjectWaiter * Wakee) ; bool ExitSuspendEquivalent (JavaThread * Self) ; + void post_monitor_wait_event(EventJavaMonitorWait * event, + jlong notifier_tid, + jlong timeout, + bool timedout); private: friend class ObjectSynchronizer; @@ -240,6 +248,7 @@ protected: // protected for jvmtiRawMonitor void * volatile _owner; // pointer to owning thread OR BasicLock + volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor volatile intptr_t _recursions; // recursion count, 0 for first entry private: int OwnerIsThread ; // _owner is (Thread *) vs SP/BasicLock @@ -303,6 +312,18 @@ public: static int Knob_Verbose; static int Knob_SpinLimit; + void* operator new (size_t size) { + return AllocateHeap(size, mtInternal); + } + void* operator new[] (size_t size) { + return operator new (size); + } + void operator delete(void* p) { + FreeHeap(p, mtInternal); + } + void operator delete[] (void *p) { + operator delete(p); + } }; #undef TEVENT diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/os.cpp --- a/src/share/vm/runtime/os.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/os.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -265,8 +265,7 @@ VMThread::execute(&op1); Universe::print_heap_at_SIGBREAK(); if (PrintClassHistogram) { - VM_GC_HeapInspection op1(gclog_or_tty, true /* force full GC before heap inspection */, - true /* need_prologue */); + VM_GC_HeapInspection op1(gclog_or_tty, true /* force full GC before heap inspection */); VMThread::execute(&op1); } if (JvmtiExport::should_post_data_dump()) { @@ -1447,11 +1446,16 @@ return (int) i; } +void os::SuspendedThreadTask::run() { + assert(Threads_lock->owned_by_self() || (_thread == VMThread::vm_thread()), "must have threads lock to call this"); + internal_do_task(); + _done = true; +} + bool os::create_stack_guard_pages(char* addr, size_t bytes) { return os::pd_create_stack_guard_pages(addr, bytes); } - char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { char* result = pd_reserve_memory(bytes, addr, alignment_hint); if (result != NULL) { @@ -1460,6 +1464,18 @@ return result; } + +char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint, + MEMFLAGS flags) { + char* result = pd_reserve_memory(bytes, addr, alignment_hint); + if (result != NULL) { + MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); + MemTracker::record_virtual_memory_type((address)result, flags); + } + + return result; +} + char* os::attempt_reserve_memory_at(size_t bytes, char* addr) { char* result = pd_attempt_reserve_memory_at(bytes, addr); if (result != NULL) { @@ -1542,3 +1558,19 @@ pd_realign_memory(addr, bytes, alignment_hint); } +#ifndef TARGET_OS_FAMILY_windows +/* try to switch state from state "from" to state "to" + * returns the state set after the method is complete + */ +os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from, + os::SuspendResume::State to) +{ + os::SuspendResume::State result = + (os::SuspendResume::State) Atomic::cmpxchg((jint) to, (jint *) &_state, (jint) from); + if (result == from) { + // success + return to; + } + return result; +} +#endif diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/os.hpp --- a/src/share/vm/runtime/os.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/os.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -255,6 +255,8 @@ static int vm_allocation_granularity(); static char* reserve_memory(size_t bytes, char* addr = 0, size_t alignment_hint = 0); + static char* reserve_memory(size_t bytes, char* addr, + size_t alignment_hint, MEMFLAGS flags); static char* reserve_memory_aligned(size_t size, size_t alignment); static char* attempt_reserve_memory_at(size_t bytes, char* addr); static void split_reserved_memory(char *base, size_t size, @@ -454,6 +456,7 @@ // File i/o operations static const int default_file_open_flags(); static int open(const char *path, int oflag, int mode); + static FILE* open(int fd, const char* mode); static int close(int fd); static jlong lseek(int fd, jlong offset, int whence); static char* native_path(char *path); @@ -477,7 +480,7 @@ static const char* dll_file_extension(); static const char* get_temp_directory(); - static const char* get_current_directory(char *buf, int buflen); + static const char* get_current_directory(char *buf, size_t buflen); // Builds a platform-specific full library path given a ld path and lib name // Returns true if buffer contains full path to existing file, false otherwise @@ -778,6 +781,104 @@ // ResumeThread call) static void pause(); + class SuspendedThreadTaskContext { + public: + SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {} + Thread* thread() const { return _thread; } + void* ucontext() const { return _ucontext; } + private: + Thread* _thread; + void* _ucontext; + }; + + class SuspendedThreadTask { + public: + SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {} + virtual ~SuspendedThreadTask() {} + void run(); + bool is_done() { return _done; } + virtual void do_task(const SuspendedThreadTaskContext& context) = 0; + protected: + private: + void internal_do_task(); + Thread* _thread; + bool _done; + }; + +#ifndef TARGET_OS_FAMILY_windows + // Suspend/resume support + // Protocol: + // + // a thread starts in SR_RUNNING + // + // SR_RUNNING can go to + // * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it + // SR_SUSPEND_REQUEST can go to + // * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout) + // * SR_SUSPENDED if the stopped thread receives the signal and switches state + // SR_SUSPENDED can go to + // * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume + // SR_WAKEUP_REQUEST can go to + // * SR_RUNNING when the stopped thread receives the signal + // * SR_WAKEUP_REQUEST on timeout (resend the signal and try again) + class SuspendResume { + public: + enum State { + SR_RUNNING, + SR_SUSPEND_REQUEST, + SR_SUSPENDED, + SR_WAKEUP_REQUEST + }; + + private: + volatile State _state; + + private: + /* try to switch state from state "from" to state "to" + * returns the state set after the method is complete + */ + State switch_state(State from, State to); + + public: + SuspendResume() : _state(SR_RUNNING) { } + + State state() const { return _state; } + + State request_suspend() { + return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST); + } + + State cancel_suspend() { + return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING); + } + + State suspended() { + return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED); + } + + State request_wakeup() { + return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST); + } + + State running() { + return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING); + } + + bool is_running() const { + return _state == SR_RUNNING; + } + + bool is_suspend_request() const { + return _state == SR_SUSPEND_REQUEST; + } + + bool is_suspended() const { + return _state == SR_SUSPENDED; + } + }; +#endif + + protected: static long _rand_seed; // seed for random number generator static int _processor_count; // number of processors diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/perfData.cpp --- a/src/share/vm/runtime/perfData.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/perfData.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -323,6 +323,10 @@ } } +PerfData* PerfDataManager::find_by_name(const char* name) { + return _all->find_by_name(name); +} + PerfDataList* PerfDataManager::all() { MutexLocker ml(PerfDataManager_lock); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/perfData.hpp --- a/src/share/vm/runtime/perfData.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/perfData.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -693,6 +693,9 @@ // the given name. static bool exists(const char* name) { return _all->contains(name); } + // method to search for a instrumentation object by name + static PerfData* find_by_name(const char* name); + // method to map a CounterNS enumeration to a namespace string static const char* ns_to_string(CounterNS ns) { return _name_spaces[ns]; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/reflection.cpp --- a/src/share/vm/runtime/reflection.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/reflection.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,8 +44,6 @@ #include "runtime/signature.hpp" #include "runtime/vframe.hpp" -#define JAVA_1_5_VERSION 49 - static void trace_class_resolution(Klass* to_class) { ResourceMark rm; int line_number = -1; @@ -375,7 +373,7 @@ } } klass = klass->array_klass(dim, CHECK_NULL); - oop obj = ArrayKlass::cast(klass)->multi_allocate(len, dimensions, THREAD); + oop obj = ArrayKlass::cast(klass)->multi_allocate(len, dimensions, CHECK_NULL); assert(obj->is_array(), "just checking"); return arrayOop(obj); } @@ -507,9 +505,11 @@ under_host_klass(accessee_ik, accessor)) return true; - if (RelaxAccessControlCheck || - (accessor_ik->major_version() < JAVA_1_5_VERSION && - accessee_ik->major_version() < JAVA_1_5_VERSION)) { + if ((RelaxAccessControlCheck && + accessor_ik->major_version() < Verifier::NO_RELAX_ACCESS_CTRL_CHECK_VERSION && + accessee_ik->major_version() < Verifier::NO_RELAX_ACCESS_CTRL_CHECK_VERSION) || + (accessor_ik->major_version() < Verifier::STRICTER_ACCESS_CTRL_CHECK_VERSION && + accessee_ik->major_version() < Verifier::STRICTER_ACCESS_CTRL_CHECK_VERSION)) { return classloader_only && Verifier::relax_verify_for(accessor_ik->class_loader()) && accessor_ik->protection_domain() == accessee_ik->protection_domain() && @@ -817,6 +817,10 @@ typeArrayOop an_oop = Annotations::make_java_array(method->parameter_annotations(), CHECK_NULL); java_lang_reflect_Constructor::set_parameter_annotations(ch(), an_oop); } + if (java_lang_reflect_Constructor::has_type_annotations_field()) { + typeArrayOop an_oop = Annotations::make_java_array(method->type_annotations(), CHECK_NULL); + java_lang_reflect_Constructor::set_type_annotations(ch(), an_oop); + } return ch(); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/reflectionUtils.hpp --- a/src/share/vm/runtime/reflectionUtils.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/reflectionUtils.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -136,10 +136,10 @@ } }; -class FilteredField { +class FilteredField : public CHeapObj { private: Klass* _klass; - int _field_offset; + int _field_offset; public: FilteredField(Klass* klass, int field_offset) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/serviceThread.cpp --- a/src/share/vm/runtime/serviceThread.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/serviceThread.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,8 @@ #include "runtime/mutexLocker.hpp" #include "prims/jvmtiImpl.hpp" #include "services/gcNotifier.hpp" +#include "services/diagnosticArgument.hpp" +#include "services/diagnosticFramework.hpp" ServiceThread* ServiceThread::_instance = NULL; @@ -83,6 +85,7 @@ bool sensors_changed = false; bool has_jvmti_events = false; bool has_gc_notification_event = false; + bool has_dcmd_notification_event = false; JvmtiDeferredEvent jvmti_event; { // Need state transition ThreadBlockInVM so that this thread @@ -98,7 +101,8 @@ MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); while (!(sensors_changed = LowMemoryDetector::has_pending_requests()) && !(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) && - !(has_gc_notification_event = GCNotifier::has_event())) { + !(has_gc_notification_event = GCNotifier::has_event()) && + !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification())) { // wait until one of the sensors has pending requests, or there is a // pending JVMTI event or JMX GC notification to post Service_lock->wait(Mutex::_no_safepoint_check_flag); @@ -120,6 +124,10 @@ if(has_gc_notification_event) { GCNotifier::sendNotification(CHECK); } + + if(has_dcmd_notification_event) { + DCmdFactory::send_notification(CHECK); + } } } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/sharedRuntime.cpp --- a/src/share/vm/runtime/sharedRuntime.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/sharedRuntime.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -937,15 +937,23 @@ } -JNI_ENTRY(void, throw_unsatisfied_link_error(JNIEnv* env, ...)) +/** + * Throws an java/lang/UnsatisfiedLinkError. The address of this method is + * installed in the native function entry of all native Java methods before + * they get linked to their actual native methods. + * + * \note + * This method actually never gets called! The reason is because + * the interpreter's native entries call NativeLookup::lookup() which + * throws the exception when the lookup fails. The exception is then + * caught and forwarded on the return from NativeLookup::lookup() call + * before the call to the native function. This might change in the future. + */ +JNI_ENTRY(void*, throw_unsatisfied_link_error(JNIEnv* env, ...)) { - THROW(vmSymbols::java_lang_UnsatisfiedLinkError()); -} -JNI_END - -JNI_ENTRY(void, throw_unsupported_operation_exception(JNIEnv* env, ...)) -{ - THROW(vmSymbols::java_lang_UnsupportedOperationException()); + // We return a bad value here to make sure that the exception is + // forwarded before we look at the return value. + THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badJNIHandle); } JNI_END @@ -953,10 +961,6 @@ return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error); } -address SharedRuntime::native_method_throw_unsupported_operation_exception_entry() { - return CAST_FROM_FN_PTR(address, &throw_unsupported_operation_exception); -} - #ifndef PRODUCT JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2)) @@ -1381,12 +1385,6 @@ assert(stub_frame.is_runtime_frame(), "sanity check"); frame caller_frame = stub_frame.sender(®_map); - // MethodHandle invokes don't have a CompiledIC and should always - // simply redispatch to the callee_target. - address sender_pc = caller_frame.pc(); - CodeBlob* sender_cb = caller_frame.cb(); - nmethod* sender_nm = sender_cb->as_nmethod_or_null(); - if (caller_frame.is_interpreted_frame() || caller_frame.is_entry_frame()) { Method* callee = thread->callee_target(); @@ -2793,7 +2791,7 @@ return regs.first(); } -VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, int* arg_size) { +VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int* arg_size) { // This method is returning a data structure allocating as a // ResourceObject, so do not put any ResourceMarks in here. char *s = sig->as_C_string(); @@ -2837,6 +2835,11 @@ default : ShouldNotReachHere(); } } + + if (has_appendix) { + sig_bt[cnt++] = T_OBJECT; + } + assert( cnt < 256, "grow table size" ); int comp_args_on_stack; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/sharedRuntime.hpp --- a/src/share/vm/runtime/sharedRuntime.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/sharedRuntime.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -419,7 +419,7 @@ // Convert a sig into a calling convention register layout // and find interesting things about it. - static VMRegPair* find_callee_arguments(Symbol* sig, bool has_receiver, int *arg_size); + static VMRegPair* find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int *arg_size); static VMReg name_for_receiver(); // "Top of Stack" slots that may be unused by the calling convention but must diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/simpleThresholdPolicy.cpp --- a/src/share/vm/runtime/simpleThresholdPolicy.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/simpleThresholdPolicy.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -153,8 +153,11 @@ // Set carry flags on the counters if necessary void SimpleThresholdPolicy::handle_counter_overflow(Method* method) { - set_carry_if_necessary(method->invocation_counter()); - set_carry_if_necessary(method->backedge_counter()); + MethodCounters *mcs = method->method_counters(); + if (mcs != NULL) { + set_carry_if_necessary(mcs->invocation_counter()); + set_carry_if_necessary(mcs->backedge_counter()); + } MethodData* mdo = method->method_data(); if (mdo != NULL) { set_carry_if_necessary(mdo->invocation_counter()); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/stubRoutines.cpp --- a/src/share/vm/runtime/stubRoutines.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/stubRoutines.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -147,7 +147,7 @@ TraceTime timer("StubRoutines generation 1", TraceStartupTime); _code1 = BufferBlob::create("StubRoutines (1)", code_size1); if (_code1 == NULL) { - vm_exit_out_of_memory(code_size1, "CodeCache: no room for StubRoutines (1)"); + vm_exit_out_of_memory(code_size1, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (1)"); } CodeBuffer buffer(_code1); StubGenerator_generate(&buffer, false); @@ -199,7 +199,7 @@ TraceTime timer("StubRoutines generation 2", TraceStartupTime); _code2 = BufferBlob::create("StubRoutines (2)", code_size2); if (_code2 == NULL) { - vm_exit_out_of_memory(code_size2, "CodeCache: no room for StubRoutines (2)"); + vm_exit_out_of_memory(code_size2, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (2)"); } CodeBuffer buffer(_code2); StubGenerator_generate(&buffer, true); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/stubRoutines.hpp --- a/src/share/vm/runtime/stubRoutines.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/stubRoutines.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -223,6 +223,8 @@ static void initialize1(); // must happen before universe::genesis static void initialize2(); // must happen after universe::genesis + static bool is_stub_code(address addr) { return contains(addr); } + static bool contains(address addr) { return (_code1 != NULL && _code1->blob_contains(addr)) || diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/sweeper.cpp --- a/src/share/vm/runtime/sweeper.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/sweeper.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,7 @@ #include "runtime/os.hpp" #include "runtime/sweeper.hpp" #include "runtime/vm_operations.hpp" +#include "trace/tracing.hpp" #include "utilities/events.hpp" #include "utilities/xmlstream.hpp" @@ -130,19 +131,30 @@ long NMethodSweeper::_traversals = 0; // No. of stack traversals performed nmethod* NMethodSweeper::_current = NULL; // Current nmethod int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache +int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep +int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep +int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress. jint NMethodSweeper::_locked_seen = 0; jint NMethodSweeper::_not_entrant_seen_on_stack = 0; -bool NMethodSweeper::_rescan = false; -bool NMethodSweeper::_do_sweep = false; -bool NMethodSweeper::_was_full = false; -jint NMethodSweeper::_advise_to_sweep = 0; -jlong NMethodSweeper::_last_was_full = 0; -uint NMethodSweeper::_highest_marked = 0; -long NMethodSweeper::_was_full_traversal = 0; +bool NMethodSweeper::_resweep = false; +jint NMethodSweeper::_flush_token = 0; +jlong NMethodSweeper::_last_full_flush_time = 0; +int NMethodSweeper::_highest_marked = 0; +int NMethodSweeper::_dead_compile_ids = 0; +long NMethodSweeper::_last_flush_traversal_id = 0; + +int NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache +int NMethodSweeper::_total_nof_methods_reclaimed = 0; +jlong NMethodSweeper::_total_time_sweeping = 0; +jlong NMethodSweeper::_total_time_this_sweep = 0; +jlong NMethodSweeper::_peak_sweep_time = 0; +jlong NMethodSweeper::_peak_sweep_fraction_time = 0; +jlong NMethodSweeper::_total_disconnect_time = 0; +jlong NMethodSweeper::_peak_disconnect_time = 0; class MarkActivationClosure: public CodeBlobClosure { public: @@ -155,20 +167,16 @@ }; static MarkActivationClosure mark_activation_closure; +bool NMethodSweeper::sweep_in_progress() { + return (_current != NULL); +} + void NMethodSweeper::scan_stacks() { assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); if (!MethodFlushing) return; - _do_sweep = true; // No need to synchronize access, since this is always executed at a - // safepoint. If we aren't in the middle of scan and a rescan - // hasn't been requested then just return. If UseCodeCacheFlushing is on and - // code cache flushing is in progress, don't skip sweeping to help make progress - // clearing space in the code cache. - if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) { - _do_sweep = false; - return; - } + // safepoint. // Make sure CompiledIC_lock in unlocked, since we might update some // inline caches. If it is, we just bail-out and try later. @@ -176,50 +184,43 @@ // Check for restart assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); - if (_current == NULL) { + if (!sweep_in_progress() && _resweep) { _seen = 0; _invocations = NmethodSweepFraction; _current = CodeCache::first_nmethod(); _traversals += 1; + _total_time_this_sweep = 0; + if (PrintMethodFlushing) { tty->print_cr("### Sweep: stack traversal %d", _traversals); } Threads::nmethods_do(&mark_activation_closure); // reset the flags since we started a scan from the beginning. - _rescan = false; + _resweep = false; _locked_seen = 0; _not_entrant_seen_on_stack = 0; } if (UseCodeCacheFlushing) { - if (!CodeCache::needs_flushing()) { - // scan_stacks() runs during a safepoint, no race with setters - _advise_to_sweep = 0; + // only allow new flushes after the interval is complete. + jlong now = os::javaTimeMillis(); + jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; + jlong curr_interval = now - _last_full_flush_time; + if (curr_interval > max_interval) { + _flush_token = 0; } - if (was_full()) { - // There was some progress so attempt to restart the compiler - jlong now = os::javaTimeMillis(); - jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; - jlong curr_interval = now - _last_was_full; - if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) { - CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); - set_was_full(false); - - // Update the _last_was_full time so we can tell how fast the - // code cache is filling up - _last_was_full = os::javaTimeMillis(); - - log_sweep("restart_compiler"); - } + if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) { + CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); + log_sweep("restart_compiler"); } } } void NMethodSweeper::possibly_sweep() { assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); - if ((!MethodFlushing) || (!_do_sweep)) return; + if (!MethodFlushing || !sweep_in_progress()) return; if (_invocations > 0) { // Only one thread at a time will sweep @@ -243,16 +244,25 @@ } void NMethodSweeper::sweep_code_cache() { -#ifdef ASSERT - jlong sweep_start; - if (PrintMethodFlushing) { - sweep_start = os::javaTimeMillis(); - } -#endif + + jlong sweep_start_counter = os::elapsed_counter(); + + _flushed_count = 0; + _zombified_count = 0; + _marked_count = 0; + if (PrintMethodFlushing && Verbose) { tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations); } + if (!CompileBroker::should_compile_new_jobs()) { + // If we have turned off compilations we might as well do full sweeps + // in order to reach the clean state faster. Otherwise the sleeping compiler + // threads will slow down sweeping. After a few iterations the cache + // will be clean and sweeping stops (_resweep will not be set) + _invocations = 1; + } + // We want to visit all nmethods after NmethodSweepFraction // invocations so divide the remaining number of nmethods by the // remaining number of invocations. This is only an estimate since @@ -296,7 +306,7 @@ assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); - if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) { + if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) { // we've completed a scan without making progress but there were // nmethods we were unable to process either because they were // locked or were still on stack. We don't have to aggresively @@ -308,16 +318,43 @@ } } + jlong sweep_end_counter = os::elapsed_counter(); + jlong sweep_time = sweep_end_counter - sweep_start_counter; + _total_time_sweeping += sweep_time; + _total_time_this_sweep += sweep_time; + _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time); + _total_nof_methods_reclaimed += _flushed_count; + + EventSweepCodeCache event(UNTIMED); + if (event.should_commit()) { + event.set_starttime(sweep_start_counter); + event.set_endtime(sweep_end_counter); + event.set_sweepIndex(_traversals); + event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1); + event.set_sweptCount(todo); + event.set_flushedCount(_flushed_count); + event.set_markedCount(_marked_count); + event.set_zombifiedCount(_zombified_count); + event.commit(); + } + #ifdef ASSERT if(PrintMethodFlushing) { - jlong sweep_end = os::javaTimeMillis(); - tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start); + tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time); } #endif if (_invocations == 1) { + _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); log_sweep("finished"); } + + // Sweeper is the only case where memory is released, + // check here if it is time to restart the compiler. + if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) { + CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); + log_sweep("restart_compiler"); + } } class NMethodMarker: public StackObj { @@ -395,12 +432,14 @@ tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm); } release_nmethod(nm); + _flushed_count++; } else { if (PrintMethodFlushing && Verbose) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); } nm->mark_for_reclamation(); - _rescan = true; + _resweep = true; + _marked_count++; SWEEP(nm); } } else if (nm->is_not_entrant()) { @@ -411,7 +450,8 @@ tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); } nm->make_zombie(); - _rescan = true; + _resweep = true; + _zombified_count++; SWEEP(nm); } else { // Still alive, clean up its inline caches @@ -427,22 +467,24 @@ // Unloaded code, just make it a zombie if (PrintMethodFlushing && Verbose) tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm); + if (nm->is_osr_method()) { SWEEP(nm); // No inline caches will ever point to osr methods, so we can just remove it release_nmethod(nm); + _flushed_count++; } else { nm->make_zombie(); - _rescan = true; + _resweep = true; + _zombified_count++; SWEEP(nm); } } else { assert(nm->is_alive(), "should be alive"); if (UseCodeCacheFlushing) { - if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) && - (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) && - CodeCache::needs_flushing()) { + if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() && + (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) { // This method has not been called since the forced cleanup happened nm->make_not_entrant(); } @@ -465,109 +507,112 @@ // _code field is restored and the Method*/nmethod // go back to their normal state. void NMethodSweeper::handle_full_code_cache(bool is_full) { - // Only the first one to notice can advise us to start early cleaning - if (!is_full){ - jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 ); - if (old != 0) { - return; - } - } if (is_full) { // Since code cache is full, immediately stop new compiles - bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); - if (!did_set) { - // only the first to notice can start the cleaning, - // others will go back and block - return; + if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) { + log_sweep("disable_compiler"); } - set_was_full(true); + } - // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up - jlong now = os::javaTimeMillis(); - jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; - jlong curr_interval = now - _last_was_full; - if (curr_interval < max_interval) { - _rescan = true; - log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'", - curr_interval/1000); - return; - } + // Make sure only one thread can flush + // The token is reset after CodeCacheMinimumFlushInterval in scan stacks, + // no need to check the timeout here. + jint old = Atomic::cmpxchg( 1, &_flush_token, 0 ); + if (old != 0) { + return; } VM_HandleFullCodeCache op(is_full); VMThread::execute(&op); - // rescan again as soon as possible - _rescan = true; + // resweep again as soon as possible + _resweep = true; } void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) { // If there was a race in detecting full code cache, only run // one vm op for it or keep the compiler shut off - debug_only(jlong start = os::javaTimeMillis();) - - if ((!was_full()) && (is_full)) { - if (!CodeCache::needs_flushing()) { - log_sweep("restart_compiler"); - CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); - return; - } - } + jlong disconnect_start_counter = os::elapsed_counter(); // Traverse the code cache trying to dump the oldest nmethods - uint curr_max_comp_id = CompileBroker::get_compilation_id(); - uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked; + int curr_max_comp_id = CompileBroker::get_compilation_id(); + int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids; + log_sweep("start_cleaning"); nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); jint disconnected = 0; jint made_not_entrant = 0; + jint nmethod_count = 0; + while ((nm != NULL)){ - uint curr_comp_id = nm->compile_id(); + int curr_comp_id = nm->compile_id(); // OSR methods cannot be flushed like this. Also, don't flush native methods // since they are part of the JDK in most cases - if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) && - (!nm->is_native_method()) && ((curr_comp_id < flush_target))) { + if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) { + + // only count methods that can be speculatively disconnected + nmethod_count++; - if ((nm->method()->code() == nm)) { - // This method has not been previously considered for - // unloading or it was restored already - CodeCache::speculatively_disconnect(nm); - disconnected++; - } else if (nm->is_speculatively_disconnected()) { - // This method was previously considered for preemptive unloading and was not called since then - CompilationPolicy::policy()->delay_compilation(nm->method()); - nm->make_not_entrant(); - made_not_entrant++; - } + if (nm->is_in_use() && (curr_comp_id < flush_target)) { + if ((nm->method()->code() == nm)) { + // This method has not been previously considered for + // unloading or it was restored already + CodeCache::speculatively_disconnect(nm); + disconnected++; + } else if (nm->is_speculatively_disconnected()) { + // This method was previously considered for preemptive unloading and was not called since then + CompilationPolicy::policy()->delay_compilation(nm->method()); + nm->make_not_entrant(); + made_not_entrant++; + } - if (curr_comp_id > _highest_marked) { - _highest_marked = curr_comp_id; + if (curr_comp_id > _highest_marked) { + _highest_marked = curr_comp_id; + } } } nm = CodeCache::alive_nmethod(CodeCache::next(nm)); } + // remember how many compile_ids wheren't seen last flush. + _dead_compile_ids = curr_max_comp_id - nmethod_count; + log_sweep("stop_cleaning", "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'", disconnected, made_not_entrant); // Shut off compiler. Sweeper will start over with a new stack scan and // traversal cycle and turn it back on if it clears enough space. - if (was_full()) { - _last_was_full = os::javaTimeMillis(); - CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); + if (is_full) { + _last_full_flush_time = os::javaTimeMillis(); } + jlong disconnect_end_counter = os::elapsed_counter(); + jlong disconnect_time = disconnect_end_counter - disconnect_start_counter; + _total_disconnect_time += disconnect_time; + _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time); + + EventCleanCodeCache event(UNTIMED); + if (event.should_commit()) { + event.set_starttime(disconnect_start_counter); + event.set_endtime(disconnect_end_counter); + event.set_disconnectedCount(disconnected); + event.set_madeNonEntrantCount(made_not_entrant); + event.commit(); + } + _number_of_flushes++; + // After two more traversals the sweeper will get rid of unrestored nmethods - _was_full_traversal = _traversals; + _last_flush_traversal_id = _traversals; + _resweep = true; #ifdef ASSERT - jlong end = os::javaTimeMillis(); + if(PrintMethodFlushing && Verbose) { - tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start); + tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time); } #endif } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/sweeper.hpp --- a/src/share/vm/runtime/sweeper.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/sweeper.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,33 +31,56 @@ // class NMethodSweeper : public AllStatic { - static long _traversals; // Stack traversal count - static nmethod* _current; // Current nmethod - static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache + static long _traversals; // Stack scan count, also sweep ID. + static nmethod* _current; // Current nmethod + static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache + static int _flushed_count; // Nof. nmethods flushed in current sweep + static int _zombified_count; // Nof. nmethods made zombie in current sweep + static int _marked_count; // Nof. nmethods marked for reclaim in current sweep - static volatile int _invocations; // No. of invocations left until we are completed with this pass - static volatile int _sweep_started; // Flag to control conc sweeper + static volatile int _invocations; // No. of invocations left until we are completed with this pass + static volatile int _sweep_started; // Flag to control conc sweeper + + //The following are reset in scan_stacks and synchronized by the safepoint + static bool _resweep; // Indicates that a change has happend and we want another sweep, + // always checked and reset at a safepoint so memory will be in sync. + static int _locked_seen; // Number of locked nmethods encountered during the scan + static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack + static jint _flush_token; // token that guards method flushing, making sure it is executed only once. - static bool _rescan; // Indicates that we should do a full rescan of the - // of the code cache looking for work to do. - static bool _do_sweep; // Flag to skip the conc sweep if no stack scan happened - static int _locked_seen; // Number of locked nmethods encountered during the scan - static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack + // These are set during a flush, a VM-operation + static long _last_flush_traversal_id; // trav number at last flush unloading + static jlong _last_full_flush_time; // timestamp of last emergency unloading + + // These are synchronized by the _sweep_started token + static int _highest_marked; // highest compile id dumped at last emergency unloading + static int _dead_compile_ids; // number of compile ids that where not in the cache last flush - static bool _was_full; // remember if we did emergency unloading - static jint _advise_to_sweep; // flag to indicate code cache getting full - static jlong _last_was_full; // timestamp of last emergency unloading - static uint _highest_marked; // highest compile id dumped at last emergency unloading - static long _was_full_traversal; // trav number at last emergency unloading + // Stat counters + static int _number_of_flushes; // Total of full traversals caused by full cache + static int _total_nof_methods_reclaimed; // Accumulated nof methods flushed + static jlong _total_time_sweeping; // Accumulated time sweeping + static jlong _total_time_this_sweep; // Total time this sweep + static jlong _peak_sweep_time; // Peak time for a full sweep + static jlong _peak_sweep_fraction_time; // Peak time sweeping one fraction + static jlong _total_disconnect_time; // Total time cleaning code mem + static jlong _peak_disconnect_time; // Peak time cleaning code mem static void process_nmethod(nmethod *nm); - static void release_nmethod(nmethod* nm); static void log_sweep(const char* msg, const char* format = NULL, ...); + static bool sweep_in_progress(); public: - static long traversal_count() { return _traversals; } + static long traversal_count() { return _traversals; } + static int number_of_flushes() { return _number_of_flushes; } + static int total_nof_methods_reclaimed() { return _total_nof_methods_reclaimed; } + static jlong total_time_sweeping() { return _total_time_sweeping; } + static jlong peak_sweep_time() { return _peak_sweep_time; } + static jlong peak_sweep_fraction_time() { return _peak_sweep_fraction_time; } + static jlong total_disconnect_time() { return _total_disconnect_time; } + static jlong peak_disconnect_time() { return _peak_disconnect_time; } #ifdef ASSERT // Keep track of sweeper activity in the ring buffer @@ -71,17 +94,14 @@ static void possibly_sweep(); // Compiler threads call this to sweep static void notify(nmethod* nm) { - // Perform a full scan of the code cache from the beginning. No + // Request a new sweep of the code cache from the beginning. No // need to synchronize the setting of this flag since it only // changes to false at safepoint so we can never overwrite it with false. - _rescan = true; + _resweep = true; } static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate static void speculative_disconnect_nmethods(bool was_full); // Called by vm op to deal with alloc failure - - static void set_was_full(bool state) { _was_full = state; } - static bool was_full() { return _was_full; } }; #endif // SHARE_VM_RUNTIME_SWEEPER_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/synchronizer.cpp --- a/src/share/vm/runtime/synchronizer.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/synchronizer.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -213,7 +213,7 @@ } } - ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ; + ObjectSynchronizer::inflate(THREAD, object)->exit (true, THREAD) ; } // ----------------------------------------------------------------------------- @@ -343,7 +343,7 @@ // If this thread has locked the object, exit the monitor. Note: can't use // monitor->check(CHECK); must exit even if an exception is pending. if (monitor->check(THREAD)) { - monitor->exit(THREAD); + monitor->exit(true, THREAD); } } @@ -1018,7 +1018,8 @@ // We might be able to induce a STW safepoint and scavenge enough // objectMonitors to permit progress. if (temp == NULL) { - vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), "Allocate ObjectMonitors") ; + vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), OOM_MALLOC_ERROR, + "Allocate ObjectMonitors"); } // Format the block. diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/task.cpp --- a/src/share/vm/runtime/task.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/task.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -114,9 +114,11 @@ disenroll(); } +/* enroll could be called from a JavaThread, so we have to check for + * safepoint when taking the lock to avoid deadlocking */ void PeriodicTask::enroll() { MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ? - NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag); + NULL : PeriodicTask_lock); if (_num_tasks == PeriodicTask::max_tasks) { fatal("Overflow in PeriodicTask table"); @@ -131,9 +133,11 @@ } } +/* disenroll could be called from a JavaThread, so we have to check for + * safepoint when taking the lock to avoid deadlocking */ void PeriodicTask::disenroll() { MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ? - NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag); + NULL : PeriodicTask_lock); int index; for(index = 0; index < _num_tasks && _tasks[index] != this; index++) diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/thread.cpp --- a/src/share/vm/runtime/thread.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/thread.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -81,7 +81,8 @@ #include "services/management.hpp" #include "services/memTracker.hpp" #include "services/threadService.hpp" -#include "trace/traceEventTypes.hpp" +#include "trace/tracing.hpp" +#include "trace/traceMacros.hpp" #include "utilities/defaultStream.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" @@ -242,7 +243,6 @@ CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;) _jvmti_env_iteration_count = 0; set_allocated_bytes(0); - set_trace_buffer(NULL); _vm_operation_started_count = 0; _vm_operation_completed_count = 0; _current_pending_monitor = NULL; @@ -1668,9 +1668,11 @@ JvmtiExport::post_thread_start(this); } - EVENT_BEGIN(TraceEventThreadStart, event); - EVENT_COMMIT(event, - EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(this->threadObj()))); + EventThreadStart event; + if (event.should_commit()) { + event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj())); + event.commit(); + } // We call another function to do the rest so we are sure that the stack addresses used // from there will be lower than the stack base just computed @@ -1800,9 +1802,11 @@ // Called before the java thread exit since we want to read info // from java_lang_Thread object - EVENT_BEGIN(TraceEventThreadEnd, event); - EVENT_COMMIT(event, - EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(this->threadObj()))); + EventThreadEnd event; + if (event.should_commit()) { + event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj())); + event.commit(); + } // Call after last event on thread EVENT_THREAD_EXIT(this); @@ -3454,7 +3458,8 @@ assert (Universe::is_fully_initialized(), "not initialized"); if (VerifyDuringStartup) { - VM_Verify verify_op(false /* silent */); // make sure we're starting with a clean slate + // Make sure we're starting with a clean slate. + VM_Verify verify_op; VMThread::execute(&verify_op); } @@ -3654,8 +3659,8 @@ // Notify JVMTI agents that VM initialization is complete - nop if no agents. JvmtiExport::post_vm_initialized(); - if (!TRACE_START()) { - vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION)); + if (TRACE_START() != JNI_OK) { + vm_exit_during_initialization("Failed to start tracing backend."); } if (CleanChunkPoolAsync) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/thread.hpp --- a/src/share/vm/runtime/thread.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/thread.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -47,7 +47,8 @@ #include "services/memRecorder.hpp" #endif // INCLUDE_NMT -#include "trace/tracing.hpp" +#include "trace/traceBackend.hpp" +#include "trace/traceMacros.hpp" #include "utilities/exceptions.hpp" #include "utilities/top.hpp" #if INCLUDE_ALL_GCS @@ -258,7 +259,7 @@ jlong _allocated_bytes; // Cumulative number of bytes allocated on // the Java heap - TRACE_BUFFER _trace_buffer; // Thread-local buffer for tracing + TRACE_DATA _trace_data; // Thread-local data for tracing int _vm_operation_started_count; // VM_Operation support int _vm_operation_completed_count; // VM_Operation support @@ -449,8 +450,7 @@ return allocated_bytes; } - TRACE_BUFFER trace_buffer() { return _trace_buffer; } - void set_trace_buffer(TRACE_BUFFER buf) { _trace_buffer = buf; } + TRACE_DATA* trace_data() { return &_trace_data; } // VM operation support int vm_operation_ticket() { return ++_vm_operation_started_count; } @@ -638,9 +638,6 @@ jint _hashStateZ ; void * _schedctl ; - intptr_t _ScratchA, _ScratchB ; // Scratch locations for fast-path sync code - static ByteSize ScratchA_offset() { return byte_offset_of(Thread, _ScratchA ); } - static ByteSize ScratchB_offset() { return byte_offset_of(Thread, _ScratchB ); } volatile jint rng [4] ; // RNG for spin loop @@ -1075,11 +1072,11 @@ #if INCLUDE_NMT // native memory tracking inline MemRecorder* get_recorder() const { return (MemRecorder*)_recorder; } - inline void set_recorder(MemRecorder* rc) { _recorder = (volatile MemRecorder*)rc; } + inline void set_recorder(MemRecorder* rc) { _recorder = rc; } private: // per-thread memory recorder - volatile MemRecorder* _recorder; + MemRecorder* volatile _recorder; #endif // INCLUDE_NMT // Suspend/resume support for JavaThread diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/timer.cpp --- a/src/share/vm/runtime/timer.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/timer.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,11 @@ # include "os_bsd.inline.hpp" #endif +double TimeHelper::counter_to_seconds(jlong counter) { + double count = (double) counter; + double freq = (double) os::elapsed_frequency(); + return counter/freq; +} void elapsedTimer::add(elapsedTimer t) { _counter += t._counter; @@ -59,9 +64,7 @@ } double elapsedTimer::seconds() const { - double count = (double) _counter; - double freq = (double) os::elapsed_frequency(); - return count/freq; + return TimeHelper::counter_to_seconds(_counter); } jlong elapsedTimer::milliseconds() const { @@ -90,9 +93,7 @@ double TimeStamp::seconds() const { assert(is_updated(), "must not be clear"); jlong new_count = os::elapsed_counter(); - double count = (double) new_count - _counter; - double freq = (double) os::elapsed_frequency(); - return count/freq; + return TimeHelper::counter_to_seconds(new_count - _counter); } jlong TimeStamp::milliseconds() const { @@ -110,19 +111,15 @@ } TraceTime::TraceTime(const char* title, - bool doit, - bool print_cr, - outputStream* logfile) { + bool doit) { _active = doit; _verbose = true; - _print_cr = print_cr; - _logfile = (logfile != NULL) ? logfile : tty; if (_active) { _accum = NULL; - _logfile->stamp(PrintGCTimeStamps); - _logfile->print("[%s", title); - _logfile->flush(); + tty->stamp(PrintGCTimeStamps); + tty->print("[%s", title); + tty->flush(); _t.start(); } } @@ -130,17 +127,14 @@ TraceTime::TraceTime(const char* title, elapsedTimer* accumulator, bool doit, - bool verbose, - outputStream* logfile) { + bool verbose) { _active = doit; _verbose = verbose; - _print_cr = true; - _logfile = (logfile != NULL) ? logfile : tty; if (_active) { if (_verbose) { - _logfile->stamp(PrintGCTimeStamps); - _logfile->print("[%s", title); - _logfile->flush(); + tty->stamp(PrintGCTimeStamps); + tty->print("[%s", title); + tty->flush(); } _accum = accumulator; _t.start(); @@ -152,12 +146,8 @@ _t.stop(); if (_accum!=NULL) _accum->add(_t); if (_verbose) { - if (_print_cr) { - _logfile->print_cr(", %3.7f secs]", _t.seconds()); - } else { - _logfile->print(", %3.7f secs]", _t.seconds()); - } - _logfile->flush(); + tty->print_cr(", %3.7f secs]", _t.seconds()); + tty->flush(); } } } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/timer.hpp --- a/src/share/vm/runtime/timer.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/timer.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -82,21 +82,16 @@ private: bool _active; // do timing bool _verbose; // report every timing - bool _print_cr; // add a CR to the end of the timer report elapsedTimer _t; // timer elapsedTimer* _accum; // accumulator - outputStream* _logfile; // output log file public: - // Constuctors + // Constructors TraceTime(const char* title, - bool doit = true, - bool print_cr = true, - outputStream *logfile = NULL); + bool doit = true); TraceTime(const char* title, elapsedTimer* accumulator, bool doit = true, - bool verbose = false, - outputStream *logfile = NULL ); + bool verbose = false); ~TraceTime(); // Accessors @@ -125,4 +120,9 @@ ~TraceCPUTime(); }; +class TimeHelper { + public: + static double counter_to_seconds(jlong counter); +}; + #endif // SHARE_VM_RUNTIME_TIMER_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/unhandledOops.hpp --- a/src/share/vm/runtime/unhandledOops.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/unhandledOops.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -48,7 +48,7 @@ class oop; class Thread; -class UnhandledOopEntry { +class UnhandledOopEntry : public CHeapObj { friend class UnhandledOops; private: oop* _oop_ptr; @@ -62,7 +62,7 @@ }; -class UnhandledOops { +class UnhandledOops : public CHeapObj { friend class Thread; private: Thread* _thread; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/virtualspace.cpp --- a/src/share/vm/runtime/virtualspace.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/virtualspace.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -60,72 +60,6 @@ initialize(size, alignment, large, NULL, 0, executable); } -char * -ReservedSpace::align_reserved_region(char* addr, const size_t len, - const size_t prefix_size, - const size_t prefix_align, - const size_t suffix_size, - const size_t suffix_align) -{ - assert(addr != NULL, "sanity"); - const size_t required_size = prefix_size + suffix_size; - assert(len >= required_size, "len too small"); - - const size_t s = size_t(addr); - const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1); - const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs; - - if (len < beg_delta + required_size) { - return NULL; // Cannot do proper alignment. - } - const size_t end_delta = len - (beg_delta + required_size); - - if (beg_delta != 0) { - os::release_memory(addr, beg_delta); - } - - if (end_delta != 0) { - char* release_addr = (char*) (s + beg_delta + required_size); - os::release_memory(release_addr, end_delta); - } - - return (char*) (s + beg_delta); -} - -char* ReservedSpace::reserve_and_align(const size_t reserve_size, - const size_t prefix_size, - const size_t prefix_align, - const size_t suffix_size, - const size_t suffix_align) -{ - assert(reserve_size > prefix_size + suffix_size, "should not be here"); - - char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align); - if (raw_addr == NULL) return NULL; - - char* result = align_reserved_region(raw_addr, reserve_size, prefix_size, - prefix_align, suffix_size, - suffix_align); - if (result == NULL && !os::release_memory(raw_addr, reserve_size)) { - fatal("os::release_memory failed"); - } - -#ifdef ASSERT - if (result != NULL) { - const size_t raw = size_t(raw_addr); - const size_t res = size_t(result); - assert(res >= raw, "alignment decreased start addr"); - assert(res + prefix_size + suffix_size <= raw + reserve_size, - "alignment increased end addr"); - assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix"); - assert(((res + prefix_size) & (suffix_align - 1)) == 0, - "bad alignment of suffix"); - } -#endif - - return result; -} - // Helper method. static bool failed_to_reserve_as_requested(char* base, char* requested_address, const size_t size, bool special) @@ -155,92 +89,6 @@ return true; } -ReservedSpace::ReservedSpace(const size_t suffix_size, - const size_t suffix_align, - char* requested_address, - const size_t noaccess_prefix) -{ - assert(suffix_size != 0, "sanity"); - assert(suffix_align != 0, "sanity"); - assert((suffix_size & (suffix_align - 1)) == 0, - "suffix_size not divisible by suffix_align"); - - // Assert that if noaccess_prefix is used, it is the same as prefix_align. - // Add in noaccess_prefix to prefix - const size_t adjusted_prefix_size = noaccess_prefix; - const size_t size = adjusted_prefix_size + suffix_size; - - // On systems where the entire region has to be reserved and committed up - // front, the compound alignment normally done by this method is unnecessary. - const bool try_reserve_special = UseLargePages && - suffix_align == os::large_page_size(); - if (!os::can_commit_large_page_memory() && try_reserve_special) { - initialize(size, suffix_align, true, requested_address, noaccess_prefix, - false); - return; - } - - _base = NULL; - _size = 0; - _alignment = 0; - _special = false; - _noaccess_prefix = 0; - _executable = false; - - // Optimistically try to reserve the exact size needed. - char* addr; - if (requested_address != 0) { - requested_address -= noaccess_prefix; // adjust address - assert(requested_address != NULL, "huge noaccess prefix?"); - addr = os::attempt_reserve_memory_at(size, requested_address); - if (failed_to_reserve_as_requested(addr, requested_address, size, false)) { - // OS ignored requested address. Try different address. - addr = NULL; - } - } else { - addr = os::reserve_memory(size, NULL, suffix_align); - } - if (addr == NULL) return; - - // Check whether the result has the needed alignment - const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1); - if (ofs != 0) { - // Wrong alignment. Release, allocate more space and do manual alignment. - // - // On most operating systems, another allocation with a somewhat larger size - // will return an address "close to" that of the previous allocation. The - // result is often the same address (if the kernel hands out virtual - // addresses from low to high), or an address that is offset by the increase - // in size. Exploit that to minimize the amount of extra space requested. - if (!os::release_memory(addr, size)) { - fatal("os::release_memory failed"); - } - - const size_t extra = MAX2(ofs, suffix_align - ofs); - addr = reserve_and_align(size + extra, adjusted_prefix_size, suffix_align, - suffix_size, suffix_align); - if (addr == NULL) { - // Try an even larger region. If this fails, address space is exhausted. - addr = reserve_and_align(size + suffix_align, adjusted_prefix_size, - suffix_align, suffix_size, suffix_align); - } - - if (requested_address != 0 && - failed_to_reserve_as_requested(addr, requested_address, size, false)) { - // As a result of the alignment constraints, the allocated addr differs - // from the requested address. Return back to the caller who can - // take remedial action (like try again without a requested address). - assert(_base == NULL, "should be"); - return; - } - } - - _base = addr; - _size = size; - _alignment = suffix_align; - _noaccess_prefix = noaccess_prefix; -} - void ReservedSpace::initialize(size_t size, size_t alignment, bool large, char* requested_address, const size_t noaccess_prefix, @@ -476,20 +324,6 @@ protect_noaccess_prefix(size); } -ReservedHeapSpace::ReservedHeapSpace(const size_t heap_space_size, - const size_t alignment, - char* requested_address) : - ReservedSpace(heap_space_size, alignment, - requested_address, - (UseCompressedOops && (Universe::narrow_oop_base() != NULL) && - Universe::narrow_oop_use_implicit_null_checks()) ? - lcm(os::vm_page_size(), alignment) : 0) { - if (base() > 0) { - MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap); - } - protect_noaccess_prefix(heap_space_size); -} - // Reserve space for code segment. Same as Java heap only we mark this as // executable. ReservedCodeSpace::ReservedCodeSpace(size_t r_size, diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/virtualspace.hpp --- a/src/share/vm/runtime/virtualspace.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/virtualspace.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -47,28 +47,6 @@ const size_t noaccess_prefix, bool executable); - // Release parts of an already-reserved memory region [addr, addr + len) to - // get a new region that has "compound alignment." Return the start of the - // resulting region, or NULL on failure. - // - // The region is logically divided into a prefix and a suffix. The prefix - // starts at the result address, which is aligned to prefix_align. The suffix - // starts at result address + prefix_size, which is aligned to suffix_align. - // The total size of the result region is size prefix_size + suffix_size. - char* align_reserved_region(char* addr, const size_t len, - const size_t prefix_size, - const size_t prefix_align, - const size_t suffix_size, - const size_t suffix_align); - - // Reserve memory, call align_reserved_region() to alignment it and return the - // result. - char* reserve_and_align(const size_t reserve_size, - const size_t prefix_size, - const size_t prefix_align, - const size_t suffix_size, - const size_t suffix_align); - protected: // Create protection page at the beginning of the space. void protect_noaccess_prefix(const size_t size); @@ -79,9 +57,6 @@ ReservedSpace(size_t size, size_t alignment, bool large, char* requested_address = NULL, const size_t noaccess_prefix = 0); - ReservedSpace(const size_t suffix_size, const size_t suffix_align, - char* requested_address, - const size_t noaccess_prefix = 0); ReservedSpace(size_t size, size_t alignment, bool large, bool executable); // Accessors @@ -128,8 +103,6 @@ // Constructor ReservedHeapSpace(size_t size, size_t forced_base_alignment, bool large, char* requested_address); - ReservedHeapSpace(const size_t prefix_size, const size_t prefix_align, - char* requested_address); }; // Class encapsulating behavior specific memory space for Code diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/vmStructs.cpp --- a/src/share/vm/runtime/vmStructs.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/vmStructs.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -60,6 +60,7 @@ #include "memory/generationSpec.hpp" #include "memory/heap.hpp" #include "memory/metablock.hpp" +#include "memory/referenceType.hpp" #include "memory/space.hpp" #include "memory/tenuredGeneration.hpp" #include "memory/universe.hpp" @@ -77,6 +78,7 @@ #include "oops/klass.hpp" #include "oops/markOop.hpp" #include "oops/methodData.hpp" +#include "oops/methodCounters.hpp" #include "oops/method.hpp" #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.hpp" @@ -291,10 +293,8 @@ nonstatic_field(InstanceKlass, _transitive_interfaces, Array*) \ nonstatic_field(InstanceKlass, _fields, Array*) \ nonstatic_field(InstanceKlass, _java_fields_count, u2) \ - nonstatic_field(InstanceKlass, _constants, ConstantPool*) \ + nonstatic_field(InstanceKlass, _constants, ConstantPool*) \ nonstatic_field(InstanceKlass, _class_loader_data, ClassLoaderData*) \ - nonstatic_field(InstanceKlass, _protection_domain, oop) \ - nonstatic_field(InstanceKlass, _signers, objArrayOop) \ nonstatic_field(InstanceKlass, _source_file_name, Symbol*) \ nonstatic_field(InstanceKlass, _source_debug_extension, char*) \ nonstatic_field(InstanceKlass, _inner_classes, Array*) \ @@ -348,16 +348,17 @@ nonstatic_field(MethodData, _arg_local, intx) \ nonstatic_field(MethodData, _arg_stack, intx) \ nonstatic_field(MethodData, _arg_returned, intx) \ - nonstatic_field(Method, _constMethod, ConstMethod*) \ - nonstatic_field(Method, _method_data, MethodData*) \ - nonstatic_field(Method, _interpreter_invocation_count, int) \ + nonstatic_field(MethodCounters, _interpreter_invocation_count, int) \ + nonstatic_field(MethodCounters, _interpreter_throwout_count, u2) \ + nonstatic_field(MethodCounters, _number_of_breakpoints, u2) \ + nonstatic_field(MethodCounters, _invocation_counter, InvocationCounter) \ + nonstatic_field(MethodCounters, _backedge_counter, InvocationCounter) \ + nonstatic_field(Method, _constMethod, ConstMethod*) \ + nonstatic_field(Method, _method_data, MethodData*) \ + nonstatic_field(Method, _method_counters, MethodCounters*) \ nonstatic_field(Method, _access_flags, AccessFlags) \ nonstatic_field(Method, _vtable_index, int) \ nonstatic_field(Method, _method_size, u2) \ - nonstatic_field(Method, _interpreter_throwout_count, u2) \ - nonstatic_field(Method, _number_of_breakpoints, u2) \ - nonstatic_field(Method, _invocation_counter, InvocationCounter) \ - nonstatic_field(Method, _backedge_counter, InvocationCounter) \ nonproduct_nonstatic_field(Method, _compiled_invocation_count, int) \ volatile_nonstatic_field(Method, _code, nmethod*) \ nonstatic_field(Method, _i2i_entry, address) \ @@ -826,6 +827,7 @@ nonstatic_field(nmethod, _lock_count, jint) \ nonstatic_field(nmethod, _stack_traversal_mark, long) \ nonstatic_field(nmethod, _compile_id, int) \ + nonstatic_field(nmethod, _comp_level, int) \ nonstatic_field(nmethod, _exception_cache, ExceptionCache*) \ nonstatic_field(nmethod, _marked_for_deoptimization, bool) \ \ @@ -1054,6 +1056,7 @@ c2_nonstatic_field(Compile, _save_argument_registers, const bool) \ c2_nonstatic_field(Compile, _subsume_loads, const bool) \ c2_nonstatic_field(Compile, _do_escape_analysis, const bool) \ + c2_nonstatic_field(Compile, _eliminate_boxing, const bool) \ c2_nonstatic_field(Compile, _ilt, InlineTree*) \ \ c2_nonstatic_field(InlineTree, _caller_jvms, JVMState*) \ @@ -1115,7 +1118,6 @@ c2_nonstatic_field(PhaseChaitin, _lo_stk_degree, uint) \ c2_nonstatic_field(PhaseChaitin, _hi_degree, uint) \ c2_nonstatic_field(PhaseChaitin, _simplified, uint) \ - c2_nonstatic_field(PhaseChaitin, _maxlrg, uint) \ \ c2_nonstatic_field(Block, _nodes, Node_List) \ c2_nonstatic_field(Block, _succs, Block_Array) \ @@ -1382,6 +1384,7 @@ declare_type(ConstantPoolCache, MetaspaceObj) \ declare_type(MethodData, Metadata) \ declare_type(Method, Metadata) \ + declare_type(MethodCounters, MetaspaceObj) \ declare_type(ConstMethod, MetaspaceObj) \ \ declare_toplevel_type(Symbol) \ @@ -3116,15 +3119,15 @@ // Search for the base type by peeling off const and * size_t len = strlen(typeName); if (typeName[len-1] == '*') { - char * s = new char[len]; + char * s = NEW_C_HEAP_ARRAY(char, len, mtInternal); strncpy(s, typeName, len - 1); s[len-1] = '\0'; // tty->print_cr("checking \"%s\" for \"%s\"", s, typeName); if (recursiveFindType(origtypes, s, true) == 1) { - delete [] s; + FREE_C_HEAP_ARRAY(char, s, mtInternal); return 1; } - delete [] s; + FREE_C_HEAP_ARRAY(char, s, mtInternal); } const char* start = NULL; if (strstr(typeName, "GrowableArray<") == typeName) { @@ -3135,15 +3138,15 @@ if (start != NULL) { const char * end = strrchr(typeName, '>'); int len = end - start + 1; - char * s = new char[len]; + char * s = NEW_C_HEAP_ARRAY(char, len, mtInternal); strncpy(s, start, len - 1); s[len-1] = '\0'; // tty->print_cr("checking \"%s\" for \"%s\"", s, typeName); if (recursiveFindType(origtypes, s, true) == 1) { - delete [] s; + FREE_C_HEAP_ARRAY(char, s, mtInternal); return 1; } - delete [] s; + FREE_C_HEAP_ARRAY(char, s, mtInternal); } if (strstr(typeName, "const ") == typeName) { const char * s = typeName + strlen("const "); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/vmThread.cpp --- a/src/share/vm/runtime/vmThread.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/vmThread.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -35,6 +35,7 @@ #include "runtime/vmThread.hpp" #include "runtime/vm_operations.hpp" #include "services/runtimeService.hpp" +#include "trace/tracing.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" #include "utilities/xmlstream.hpp" @@ -293,7 +294,7 @@ os::check_heap(); // Silent verification so as not to pollute normal output, // unless we really asked for it. - Universe::verify(!(PrintGCDetails || Verbose)); + Universe::verify(!(PrintGCDetails || Verbose) || VerifySilently); } CompileBroker::set_should_block(); @@ -365,7 +366,23 @@ (char *) op->name(), strlen(op->name()), op->evaluation_mode()); #endif /* USDT2 */ + + EventExecuteVMOperation event; + op->evaluate(); + + if (event.should_commit()) { + bool is_concurrent = op->evaluate_concurrently(); + event.set_operation(op->type()); + event.set_safepoint(op->evaluate_at_safepoint()); + event.set_blocking(!is_concurrent); + // Only write caller thread information for non-concurrent vm operations. + // For concurrent vm operations, the thread id is set to 0 indicating thread is unknown. + // This is because the caller thread could have exited already. + event.set_caller(is_concurrent ? 0 : op->calling_thread()->osthread()->thread_id()); + event.commit(); + } + #ifndef USDT2 HS_DTRACE_PROBE3(hotspot, vmops__end, op->name(), strlen(op->name()), op->evaluation_mode()); @@ -601,7 +618,7 @@ { VMOperationQueue_lock->lock_without_safepoint_check(); bool ok = _vm_queue->add(op); - op->set_timestamp(os::javaTimeMillis()); + op->set_timestamp(os::javaTimeMillis()); VMOperationQueue_lock->notify(); VMOperationQueue_lock->unlock(); // VM_Operation got skipped diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/vm_operations.cpp --- a/src/share/vm/runtime/vm_operations.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/vm_operations.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ #include "runtime/thread.inline.hpp" #include "runtime/vm_operations.hpp" #include "services/threadService.hpp" +#include "trace/tracing.hpp" #define VM_OP_NAME_INITIALIZE(name) #name, @@ -62,19 +63,21 @@ } } +const char* VM_Operation::mode_to_string(Mode mode) { + switch(mode) { + case _safepoint : return "safepoint"; + case _no_safepoint : return "no safepoint"; + case _concurrent : return "concurrent"; + case _async_safepoint: return "async safepoint"; + default : return "unknown"; + } +} // Called by fatal error handler. void VM_Operation::print_on_error(outputStream* st) const { st->print("VM_Operation (" PTR_FORMAT "): ", this); st->print("%s", name()); - const char* mode; - switch(evaluation_mode()) { - case _safepoint : mode = "safepoint"; break; - case _no_safepoint : mode = "no safepoint"; break; - case _concurrent : mode = "concurrent"; break; - case _async_safepoint: mode = "async safepoint"; break; - default : mode = "unknown"; break; - } + const char* mode = mode_to_string(evaluation_mode()); st->print(", mode: %s", mode); if (calling_thread()) { diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/runtime/vm_operations.hpp --- a/src/share/vm/runtime/vm_operations.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/runtime/vm_operations.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -178,6 +178,8 @@ evaluation_mode() == _async_safepoint; } + static const char* mode_to_string(Mode mode); + // Debugging void print_on_error(outputStream* st) const; const char* name() const { return _names[type()]; } @@ -303,7 +305,7 @@ private: bool _silent; public: - VM_Verify(bool silent) : _silent(silent) {} + VM_Verify(bool silent = VerifySilently) : _silent(silent) {} VMOp_Type type() const { return VMOp_Verify; } void doit(); }; diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/services/attachListener.cpp --- a/src/share/vm/services/attachListener.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/services/attachListener.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -157,7 +157,7 @@ Thread* THREAD = Thread::current(); // All the supplied jcmd arguments are stored as a single // string (op->arg(0)). This is parsed by the Dcmd framework. - DCmd::parse_and_execute(out, op->arg(0), ' ', THREAD); + DCmd::parse_and_execute(DCmd_Source_AttachAPI, out, op->arg(0), ' ', THREAD); if (HAS_PENDING_EXCEPTION) { java_lang_Throwable::print(PENDING_EXCEPTION, out); out->cr(); @@ -227,7 +227,7 @@ } live_objects_only = strcmp(arg0, "-live") == 0; } - VM_GC_HeapInspection heapop(out, live_objects_only /* request full gc */, true /* need_prologue */); + VM_GC_HeapInspection heapop(out, live_objects_only /* request full gc */); VMThread::execute(&heapop); return JNI_OK; } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/services/diagnosticArgument.cpp --- a/src/share/vm/services/diagnosticArgument.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/services/diagnosticArgument.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2013 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "runtime/thread.hpp" #include "services/diagnosticArgument.hpp" @@ -86,9 +87,18 @@ template <> void DCmdArgument::parse_value(const char* str, size_t len, TRAPS) { - if (str == NULL || sscanf(str, JLONG_FORMAT, &_value) != 1) { - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), - "Integer parsing error in diagnostic command arguments\n"); + int scanned = -1; + if (str == NULL + || sscanf(str, JLONG_FORMAT"%n", &_value, &scanned) != 1 + || (size_t)scanned != len) + { + ResourceMark rm; + + char* buf = NEW_RESOURCE_ARRAY(char, len + 1); + strncpy(buf, str, len); + buf[len] = '\0'; + Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalArgumentException(), + "Integer parsing error in command argument '%s'. Could not parse: %s.", _name, buf); } } @@ -96,7 +106,7 @@ if (has_default()) { this->parse_value(_default_string, strlen(_default_string), THREAD); if (HAS_PENDING_EXCEPTION) { - fatal("Default string must be parsable"); + fatal("Default string must be parseable"); } } else { set_value(0); @@ -116,8 +126,13 @@ } else if (len == strlen("false") && strncasecmp(str, "false", len) == 0) { set_value(false); } else { - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), - "Boolean parsing error in diagnostic command arguments"); + ResourceMark rm; + + char* buf = NEW_RESOURCE_ARRAY(char, len + 1); + strncpy(buf, str, len); + buf[len] = '\0'; + Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalArgumentException(), + "Boolean parsing error in command argument '%s'. Could not parse: %s.", _name, buf); } } } @@ -168,7 +183,7 @@ size_t len, TRAPS) { if (str == NULL) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), - "Integer parsing error nanotime value: syntax error"); + "Integer parsing error nanotime value: syntax error, value is null"); } int argc = sscanf(str, JLONG_FORMAT, &_value._time); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/services/diagnosticCommand.cpp --- a/src/share/vm/services/diagnosticCommand.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/services/diagnosticCommand.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -34,26 +34,33 @@ void DCmdRegistrant::register_dcmds(){ // Registration of the diagnostic commands - // First boolean argument specifies if the command is enabled - // Second boolean argument specifies if the command is hidden - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); + // First argument specifies which interfaces will export the command + // Second argument specifies if the command is enabled + // Third argument specifies if the command is hidden + uint32_t full_export = DCmd_Source_Internal | DCmd_Source_AttachAPI + | DCmd_Source_MBean; + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); #if INCLUDE_SERVICES // Heap dumping/inspection supported - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); #endif // INCLUDE_SERVICES - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - //Enhanced JMX Agent Support - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true,false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true,false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true,false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + + // Enhanced JMX Agent Support + // These commands won't be exported via the DiagnosticCommandMBean until an + // appropriate permission is created for them + uint32_t jmx_agent_export_flags = DCmd_Source_Internal | DCmd_Source_AttachAPI; + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(jmx_agent_export_flags, true,false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(jmx_agent_export_flags, true,false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(jmx_agent_export_flags, true,false)); } @@ -72,29 +79,37 @@ _dcmdparser.add_dcmd_argument(&_cmd); }; -void HelpDCmd::execute(TRAPS) { +void HelpDCmd::execute(DCmdSource source, TRAPS) { if (_all.value()) { - GrowableArray* cmd_list = DCmdFactory::DCmd_list(); + GrowableArray* cmd_list = DCmdFactory::DCmd_list(source); for (int i = 0; i < cmd_list->length(); i++) { - DCmdFactory* factory = DCmdFactory::factory(cmd_list->at(i), + DCmdFactory* factory = DCmdFactory::factory(source, cmd_list->at(i), strlen(cmd_list->at(i))); - if (!factory->is_hidden()) { - output()->print_cr("%s%s", factory->name(), - factory->is_enabled() ? "" : " [disabled]"); - output()->print_cr("\t%s", factory->description()); - output()->cr(); - } + output()->print_cr("%s%s", factory->name(), + factory->is_enabled() ? "" : " [disabled]"); + output()->print_cr("\t%s", factory->description()); + output()->cr(); factory = factory->next(); } } else if (_cmd.has_value()) { DCmd* cmd = NULL; - DCmdFactory* factory = DCmdFactory::factory(_cmd.value(), + DCmdFactory* factory = DCmdFactory::factory(source, _cmd.value(), strlen(_cmd.value())); if (factory != NULL) { output()->print_cr("%s%s", factory->name(), factory->is_enabled() ? "" : " [disabled]"); output()->print_cr(factory->description()); output()->print_cr("\nImpact: %s", factory->impact()); + JavaPermission p = factory->permission(); + if(p._class != NULL) { + if(p._action != NULL) { + output()->print_cr("\nPermission: %s(%s, %s)", + p._class, p._name == NULL ? "null" : p._name, p._action); + } else { + output()->print_cr("\nPermission: %s(%s)", + p._class, p._name == NULL ? "null" : p._name); + } + } output()->cr(); cmd = factory->create_resource_instance(output()); if (cmd != NULL) { @@ -106,14 +121,12 @@ } } else { output()->print_cr("The following commands are available:"); - GrowableArray* cmd_list = DCmdFactory::DCmd_list(); + GrowableArray* cmd_list = DCmdFactory::DCmd_list(source); for (int i = 0; i < cmd_list->length(); i++) { - DCmdFactory* factory = DCmdFactory::factory(cmd_list->at(i), + DCmdFactory* factory = DCmdFactory::factory(source, cmd_list->at(i), strlen(cmd_list->at(i))); - if (!factory->is_hidden()) { - output()->print_cr("%s%s", factory->name(), - factory->is_enabled() ? "" : " [disabled]"); - } + output()->print_cr("%s%s", factory->name(), + factory->is_enabled() ? "" : " [disabled]"); factory = factory->_next; } output()->print_cr("\nFor more information about a specific command use 'help '."); @@ -131,7 +144,7 @@ } } -void VersionDCmd::execute(TRAPS) { +void VersionDCmd::execute(DCmdSource source, TRAPS) { output()->print_cr("%s version %s", Abstract_VM_Version::vm_name(), Abstract_VM_Version::vm_release()); JDK_Version jdk_version = JDK_Version::current(); @@ -150,7 +163,7 @@ _dcmdparser.add_dcmd_option(&_all); } -void PrintVMFlagsDCmd::execute(TRAPS) { +void PrintVMFlagsDCmd::execute(DCmdSource source, TRAPS) { if (_all.value()) { CommandLineFlags::printFlags(output(), true); } else { @@ -169,7 +182,7 @@ } } -void PrintSystemPropertiesDCmd::execute(TRAPS) { +void PrintSystemPropertiesDCmd::execute(DCmdSource source, TRAPS) { // load sun.misc.VMSupport Symbol* klass = vmSymbols::sun_misc_VMSupport(); Klass* k = SystemDictionary::resolve_or_fail(klass, true, CHECK); @@ -219,7 +232,7 @@ _dcmdparser.add_dcmd_option(&_date); } -void VMUptimeDCmd::execute(TRAPS) { +void VMUptimeDCmd::execute(DCmdSource source, TRAPS) { if (_date.value()) { output()->date_stamp(true, "", ": "); } @@ -239,11 +252,15 @@ } } -void SystemGCDCmd::execute(TRAPS) { - Universe::heap()->collect(GCCause::_java_lang_system_gc); +void SystemGCDCmd::execute(DCmdSource source, TRAPS) { + if (!DisableExplicitGC) { + Universe::heap()->collect(GCCause::_java_lang_system_gc); + } else { + output()->print_cr("Explicit GC is disabled, no GC has been performed."); + } } -void RunFinalizationDCmd::execute(TRAPS) { +void RunFinalizationDCmd::execute(DCmdSource source, TRAPS) { Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_System(), true, CHECK); instanceKlassHandle klass(THREAD, k); @@ -263,7 +280,7 @@ _dcmdparser.add_dcmd_argument(&_filename); } -void HeapDumpDCmd::execute(TRAPS) { +void HeapDumpDCmd::execute(DCmdSource source, TRAPS) { // Request a full GC before heap dump if _all is false // This helps reduces the amount of unreachable objects in the dump // and makes it easier to browse. @@ -301,10 +318,9 @@ _dcmdparser.add_dcmd_option(&_all); } -void ClassHistogramDCmd::execute(TRAPS) { +void ClassHistogramDCmd::execute(DCmdSource source, TRAPS) { VM_GC_HeapInspection heapop(output(), - !_all.value() /* request full gc if false */, - true /* need_prologue */); + !_all.value() /* request full gc if false */); VMThread::execute(&heapop); } @@ -337,15 +353,14 @@ _dcmdparser.add_dcmd_argument(&_columns); } -void ClassStatsDCmd::execute(TRAPS) { +void ClassStatsDCmd::execute(DCmdSource source, TRAPS) { if (!UnlockDiagnosticVMOptions) { output()->print_cr("GC.class_stats command requires -XX:+UnlockDiagnosticVMOptions"); return; } VM_GC_HeapInspection heapop(output(), - true, /* request_full_gc */ - true /* need_prologue */); + true /* request_full_gc */); heapop.set_csv_format(_csv.value()); heapop.set_print_help(_help.value()); heapop.set_print_class_stats(true); @@ -384,7 +399,7 @@ _dcmdparser.add_dcmd_option(&_locks); } -void ThreadDumpDCmd::execute(TRAPS) { +void ThreadDumpDCmd::execute(DCmdSource source, TRAPS) { // thread stacks VM_PrintThreads op1(output(), _locks.value()); VMThread::execute(&op1); @@ -526,7 +541,8 @@ } } -void JMXStartRemoteDCmd::execute(TRAPS) { + +void JMXStartRemoteDCmd::execute(DCmdSource source, TRAPS) { ResourceMark rm(THREAD); HandleMark hm(THREAD); @@ -593,7 +609,7 @@ // do nothing } -void JMXStartLocalDCmd::execute(TRAPS) { +void JMXStartLocalDCmd::execute(DCmdSource source, TRAPS) { ResourceMark rm(THREAD); HandleMark hm(THREAD); @@ -611,7 +627,7 @@ } -void JMXStopRemoteDCmd::execute(TRAPS) { +void JMXStopRemoteDCmd::execute(DCmdSource source, TRAPS) { ResourceMark rm(THREAD); HandleMark hm(THREAD); diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/services/diagnosticCommand.hpp --- a/src/share/vm/services/diagnosticCommand.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/services/diagnosticCommand.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -51,7 +51,7 @@ } static const char* impact() { return "Low"; } static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; class VersionDCmd : public DCmd { @@ -62,8 +62,13 @@ return "Print JVM version information."; } static const char* impact() { return "Low"; } + static const JavaPermission permission() { + JavaPermission p = {"java.util.PropertyPermission", + "java.vm.version", "read"}; + return p; + } static int num_arguments() { return 0; } - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; class CommandLineDCmd : public DCmd { @@ -74,8 +79,13 @@ return "Print the command line used to start this VM instance."; } static const char* impact() { return "Low"; } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", + "monitor", NULL}; + return p; + } static int num_arguments() { return 0; } - virtual void execute(TRAPS) { + virtual void execute(DCmdSource source, TRAPS) { Arguments::print_on(_output); } }; @@ -91,8 +101,13 @@ static const char* impact() { return "Low"; } + static const JavaPermission permission() { + JavaPermission p = {"java.util.PropertyPermission", + "*", "read"}; + return p; + } static int num_arguments() { return 0; } - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; // See also: print_flag in attachListener.cpp @@ -108,8 +123,13 @@ static const char* impact() { return "Low"; } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", + "monitor", NULL}; + return p; + } static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; class VMUptimeDCmd : public DCmdWithParser { @@ -125,7 +145,7 @@ return "Low"; } static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; class SystemGCDCmd : public DCmd { @@ -139,7 +159,7 @@ return "Medium: Depends on Java heap size and content."; } static int num_arguments() { return 0; } - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; class RunFinalizationDCmd : public DCmd { @@ -153,7 +173,7 @@ return "Medium: Depends on Java content."; } static int num_arguments() { return 0; } - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; #if INCLUDE_SERVICES // Heap dumping supported @@ -174,8 +194,13 @@ return "High: Depends on Java heap size and content. " "Request a full GC unless the '-all' option is specified."; } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", + "monitor", NULL}; + return p; + } static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; #endif // INCLUDE_SERVICES @@ -194,8 +219,13 @@ static const char* impact() { return "High: Depends on Java heap size and content."; } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", + "monitor", NULL}; + return p; + } static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; class ClassStatsDCmd : public DCmdWithParser { @@ -216,7 +246,7 @@ return "High: Depends on Java heap size and content."; } static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; // See also: thread_dump in attachListener.cpp @@ -232,8 +262,13 @@ static const char* impact() { return "Medium: Depends on the number of threads."; } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", + "monitor", NULL}; + return p; + } static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; // Enhanced JMX Agent support @@ -281,7 +316,7 @@ static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; @@ -302,7 +337,7 @@ return "Start local management agent."; } - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; @@ -321,7 +356,7 @@ return "Stop remote management agent."; } - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; #endif // SHARE_VM_SERVICES_DIAGNOSTICCOMMAND_HPP diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/services/diagnosticFramework.cpp --- a/src/share/vm/services/diagnosticFramework.cpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/services/diagnosticFramework.cpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -359,7 +359,7 @@ while (arg != NULL) { array->append(new DCmdArgumentInfo(arg->name(), arg->description(), arg->type(), arg->default_string(), arg->is_mandatory(), - false, idx)); + false, arg->allow_multiple(), idx)); idx++; arg = arg->next(); } @@ -367,32 +367,42 @@ while (arg != NULL) { array->append(new DCmdArgumentInfo(arg->name(), arg->description(), arg->type(), arg->default_string(), arg->is_mandatory(), - true)); + true, arg->allow_multiple())); arg = arg->next(); } return array; } DCmdFactory* DCmdFactory::_DCmdFactoryList = NULL; +bool DCmdFactory::_has_pending_jmx_notification = false; -void DCmd::parse_and_execute(outputStream* out, const char* cmdline, - char delim, TRAPS) { +void DCmd::parse_and_execute(DCmdSource source, outputStream* out, + const char* cmdline, char delim, TRAPS) { if (cmdline == NULL) return; // Nothing to do! DCmdIter iter(cmdline, '\n'); + int count = 0; while (iter.has_next()) { + if(source == DCmd_Source_MBean && count > 0) { + // When diagnostic commands are invoked via JMX, each command line + // must contains one and only one command because of the Permission + // checks performed by the DiagnosticCommandMBean + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), + "Invalid syntax"); + } CmdLine line = iter.next(); if (line.is_stop()) { break; } if (line.is_executable()) { - DCmd* command = DCmdFactory::create_local_DCmd(line, out, CHECK); + DCmd* command = DCmdFactory::create_local_DCmd(source, line, out, CHECK); assert(command != NULL, "command error must be handled before this line"); DCmdMark mark(command); command->parse(&line, delim, CHECK); - command->execute(CHECK); + command->execute(source, CHECK); } + count++; } } @@ -420,15 +430,78 @@ return _dcmdparser.argument_info_array(); } -Mutex* DCmdFactory::_dcmdFactory_lock = new Mutex(Mutex::leaf, "DCmdFactory", true); +void DCmdFactory::push_jmx_notification_request() { + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); + _has_pending_jmx_notification = true; + Service_lock->notify_all(); +} + +void DCmdFactory::send_notification(TRAPS) { + DCmdFactory::send_notification_internal(THREAD); + // Clearing pending exception to avoid premature termination of + // the service thread + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + } +} +void DCmdFactory::send_notification_internal(TRAPS) { + ResourceMark rm(THREAD); + HandleMark hm(THREAD); + bool notif = false; + { + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); + notif = _has_pending_jmx_notification; + _has_pending_jmx_notification = false; + } + if (notif) { + + Klass* k = Management::sun_management_ManagementFactoryHelper_klass(CHECK); + instanceKlassHandle mgmt_factory_helper_klass(THREAD, k); -DCmdFactory* DCmdFactory::factory(const char* name, size_t len) { + JavaValue result(T_OBJECT); + JavaCalls::call_static(&result, + mgmt_factory_helper_klass, + vmSymbols::getDiagnosticCommandMBean_name(), + vmSymbols::getDiagnosticCommandMBean_signature(), + CHECK); + + instanceOop m = (instanceOop) result.get_jobject(); + instanceHandle dcmd_mbean_h(THREAD, m); + + Klass* k2 = Management::sun_management_DiagnosticCommandImpl_klass(CHECK); + instanceKlassHandle dcmd_mbean_klass(THREAD, k2); + + if (!dcmd_mbean_h->is_a(k2)) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), + "ManagementFactory.getDiagnosticCommandMBean didn't return a DiagnosticCommandMBean instance"); + } + + JavaValue result2(T_VOID); + JavaCallArguments args2(dcmd_mbean_h); + + JavaCalls::call_virtual(&result2, + dcmd_mbean_klass, + vmSymbols::createDiagnosticFrameworkNotification_name(), + vmSymbols::void_method_signature(), + &args2, + CHECK); + } +} + +Mutex* DCmdFactory::_dcmdFactory_lock = new Mutex(Mutex::leaf, "DCmdFactory", true); +bool DCmdFactory::_send_jmx_notification = false; + +DCmdFactory* DCmdFactory::factory(DCmdSource source, const char* name, size_t len) { MutexLockerEx ml(_dcmdFactory_lock, Mutex::_no_safepoint_check_flag); DCmdFactory* factory = _DCmdFactoryList; while (factory != NULL) { if (strlen(factory->name()) == len && strncmp(name, factory->name(), len) == 0) { - return factory; + if(factory->export_flags() & source) { + return factory; + } else { + return NULL; + } } factory = factory->_next; } @@ -439,11 +512,16 @@ MutexLockerEx ml(_dcmdFactory_lock, Mutex::_no_safepoint_check_flag); factory->_next = _DCmdFactoryList; _DCmdFactoryList = factory; + if (_send_jmx_notification && !factory->_hidden + && (factory->_export_flags & DCmd_Source_MBean)) { + DCmdFactory::push_jmx_notification_request(); + } return 0; // Actually, there's no checks for duplicates } -DCmd* DCmdFactory::create_global_DCmd(CmdLine &line, outputStream* out, TRAPS) { - DCmdFactory* f = factory(line.cmd_addr(), line.cmd_len()); +DCmd* DCmdFactory::create_global_DCmd(DCmdSource source, CmdLine &line, + outputStream* out, TRAPS) { + DCmdFactory* f = factory(source, line.cmd_addr(), line.cmd_len()); if (f != NULL) { if (f->is_enabled()) { THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), @@ -455,8 +533,9 @@ "Unknown diagnostic command"); } -DCmd* DCmdFactory::create_local_DCmd(CmdLine &line, outputStream* out, TRAPS) { - DCmdFactory* f = factory(line.cmd_addr(), line.cmd_len()); +DCmd* DCmdFactory::create_local_DCmd(DCmdSource source, CmdLine &line, + outputStream* out, TRAPS) { + DCmdFactory* f = factory(source, line.cmd_addr(), line.cmd_len()); if (f != NULL) { if (!f->is_enabled()) { THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), @@ -468,12 +547,12 @@ "Unknown diagnostic command"); } -GrowableArray* DCmdFactory::DCmd_list() { +GrowableArray* DCmdFactory::DCmd_list(DCmdSource source) { MutexLockerEx ml(_dcmdFactory_lock, Mutex::_no_safepoint_check_flag); GrowableArray* array = new GrowableArray(); DCmdFactory* factory = _DCmdFactoryList; while (factory != NULL) { - if (!factory->is_hidden()) { + if (!factory->is_hidden() && (factory->export_flags() & source)) { array->append(factory->name()); } factory = factory->next(); @@ -481,15 +560,16 @@ return array; } -GrowableArray* DCmdFactory::DCmdInfo_list() { +GrowableArray* DCmdFactory::DCmdInfo_list(DCmdSource source ) { MutexLockerEx ml(_dcmdFactory_lock, Mutex::_no_safepoint_check_flag); GrowableArray* array = new GrowableArray(); DCmdFactory* factory = _DCmdFactoryList; while (factory != NULL) { - if (!factory->is_hidden()) { + if (!factory->is_hidden() && (factory->export_flags() & source)) { array->append(new DCmdInfo(factory->name(), factory->description(), factory->impact(), - factory->num_arguments(), factory->is_enabled())); + factory->permission(), factory->num_arguments(), + factory->is_enabled())); } factory = factory->next(); } diff -r e0fb8a213650 -r 836a62f43af9 src/share/vm/services/diagnosticFramework.hpp --- a/src/share/vm/services/diagnosticFramework.hpp Tue Jun 18 14:23:29 2013 -0700 +++ b/src/share/vm/services/diagnosticFramework.hpp Wed Jun 19 10:45:56 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +34,22 @@ #include "utilities/ostream.hpp" +enum DCmdSource { + DCmd_Source_Internal = 0x01U, // invocation from the JVM + DCmd_Source_AttachAPI = 0x02U, // invocation via the attachAPI + DCmd_Source_MBean = 0x04U // invocation via a MBean +}; + +// Warning: strings referenced by the JavaPermission struct are passed to +// the native part of the JDK. Avoid use of dynamically allocated strings +// that could be de-allocated before the JDK native code had time to +// convert them into Java Strings. +struct JavaPermission { + const char* _class; + const char* _name; + const char* _action; +}; + // CmdLine is the class used to handle a command line containing a single // diagnostic command and its arguments. It provides methods to access the // command name and the beginning of the arguments. The class is also @@ -113,26 +129,30 @@ // used to export the description to the JMX interface of the framework. class DCmdInfo : public ResourceObj { protected: - const char* _name; - const char* _description; - const char* _impact; - int _num_arguments; - bool _is_enabled; + const char* _name; /* Name of the diagnostic command */ + const char* _description; /* Short description */ + const char* _impact; /* Impact on the JVM */ + JavaPermission _permission; /* Java Permission required to execute this command if any */ + int _num_arguments; /* Number of supported options or arguments */ + bool _is_enabled; /* True if the diagnostic command can be invoked, false otherwise */ public: DCmdInfo(const char* name, const char* description, const char* impact, + JavaPermission permission, int num_arguments, bool enabled) { this->_name = name; this->_description = description; this->_impact = impact; + this->_permission = permission; this->_num_arguments = num_arguments; this->_is_enabled = enabled; } const char* name() const { return _name; } const char* description() const { return _description; } const char* impact() const { return _impact; } + JavaPermission permission() const { return _permission; } int num_arguments() const { return _num_arguments; } bool is_enabled() const { return _is_enabled; } @@ -144,16 +164,20 @@ // framework. class DCmdArgumentInfo : public ResourceObj { protected: - const char* _name; - const char* _description; - const char* _type; - const char* _default_string; - bool _mandatory; - bool _option; - int _position; + const char* _name; /* Option/Argument name*/ + const char* _description; /* Short description */ + const char* _type; /* Type: STRING, BOOLEAN, etc. */ + const char* _default_string; /* Default value in a parsable string */ + bool _mandatory; /* True if the option/argument is mandatory */ + bool _option; /* True if it is an option, false if it is an argument */ + /* (see diagnosticFramework.hpp for option/argument definitions) */ + bool _multiple; /* True is the option can be specified several time */ + int _position; /* Expected position for this argument (this field is */ + /* meaningless for options) */ public: DCmdArgumentInfo(const char* name, const char* description, const char* type, - const char* default_string, bool mandatory, bool option) { + const char* default_string, bool mandatory, bool option, + bool multiple) { this->_name = name; this->_description = description; this->_type = type; @@ -161,11 +185,12 @@ this->_option = option; this->_mandatory = mandatory; this->_option = option; + this->_multiple = multiple; this->_position = -1; } DCmdArgumentInfo(const char* name, const char* description, const char* type, const char* default_string, bool mandatory, bool option, - int position) { + bool multiple, int position) { this->_name = name; this->_description = description; this->_type = type; @@ -173,6 +198,7 @@ this->_option = option; this->_mandatory = mandatory; this->_option = option; + this->_multiple = multiple; this->_position = position; } const char* name() const { return _name; } @@ -181,11 +207,29 @@ const char* default_string() const { return _default_string; } bool is_mandatory() const { return _mandatory; } bool is_option() const { return _option; } + bool is_multiple() const { return _multiple; } int position() const { return _position; } }; // The DCmdParser class can be used to create an argument parser for a // diagnostic command. It is not mandatory to use it to parse arguments. +// The DCmdParser parses a CmdLine instance according to the parameters that +// have been declared by its associated diagnostic command. A parameter can +// either be an option or an argument. Options are identified by the option name +// while arguments are identified by their position in the command line. The +// position of an argument is defined relative to all arguments passed on the +// command line, options are not considered when defining an argument position. +// The generic syntax of a diagnostic command is: +// +// [