# HG changeset patch # User acorn # Date 1367860818 25200 # Node ID b7f3bf2ba33bcfa84be6d787804db3901334c4a1 # Parent d9b08d62b95ea8578eaf2fa3a520597a5d813bde# Parent 800078be49d2f3c22d3470df74181b8d3ba68794 Merge diff -r d9b08d62b95e -r b7f3bf2ba33b .hgtags --- a/.hgtags Thu May 02 10:58:04 2013 -0400 +++ b/.hgtags Mon May 06 10:20:18 2013 -0700 @@ -337,3 +337,5 @@ d4c2667846607042370760e23f64c3ab9350e60d jdk8-b87 01d5f04e64dc2d64625b2db2056f5ed4de918a45 hs25-b29 c4af77d2045476c56fbf3f914b336bb1b7cd18af hs25-b30 +8482058e74bc8c1a890e6f3be3eff192dba6ce67 jdk8-b88 +4ec91349972255650f97bedfd07e6423e02428cf hs25-b31 diff -r d9b08d62b95e -r b7f3bf2ba33b agent/doc/c2replay.html --- a/agent/doc/c2replay.html Thu May 02 10:58:04 2013 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,41 +0,0 @@ - - - -C2 Replay - - - - -

C2 compiler replay

-

-The C2 compiler replay is a function to repeat the compiling process from a crashed java process in compiled method
-This function only exists in debug version of VM -

-

Usage

-
 
-First, use SA to attach to the core file, if suceeded, do
-       clhsdb>dumpreplaydata 
| -a | [> replay.txt] - create file replay.txt, address is address of Method, or nmethod(CodeBlob) - clhsdb>buildreplayjars [all | boot | app] - create files: - all: - app.jar, boot.jar - boot: - boot.jar - app: - app.jar - exit SA now. -Second, use the obtained replay text file, replay.txt and jar files, app.jar and boot.jar, using debug version of java - java -Xbootclasspath/p:boot.jar -cp app.jar -XX:ReplayDataFile= -XX:+ReplayCompiles .... - This will replay the compiling process. - - With ReplayCompiles, the replay will recompile all the methods in app.jar, and in boot.jar to emulate the process in java app. - -notes: - 1) Most time, we don't need the boot.jar which is the classes loaded from JDK. It will be only modified when an agent(JVMDI) is running and modifies the classes. - 2) If encounter error as "" not found, that means the SA is using a VMStructs which is different from the one with corefile. In this case, SA has a utility tool vmstructsdump which is located at agent/src/os//proc/ - - Use this tool to dump VM type library: - vmstructsdump libjvm.so > .db - - set env SA_TYPEDB=.db (refer different shell for set envs) diff -r d9b08d62b95e -r b7f3bf2ba33b agent/doc/cireplay.html --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/agent/doc/cireplay.html Mon May 06 10:20:18 2013 -0700 @@ -0,0 +1,41 @@ + + + +Replay + + + + +

Compiler replay

+

+The compiler replay is a function to repeat the compiling process from a crashed java process in compiled method
+This function only exists in debug version of VM +

+

Usage

+
+First, use SA to attach to the core file, if succeeded, do
+       hsdb> dumpreplaydata <address> | -a | <thread_id> [> replay.txt]
+       create file replay.txt, address is address of Method, or nmethod(CodeBlob)
+       hsdb> buildreplayjars [all | boot | app]
+       create files:
+         all:
+           app.jar, boot.jar
+         boot:
+           boot.jar
+         app:
+           app.jar
+       exit SA now.
+Second, use the obtained replay text file, replay.txt and jar files, app.jar and boot.jar, using debug version of java
+       java -Xbootclasspath/p:boot.jar -cp app.jar -XX:ReplayDataFile=<datafile> -XX:+ReplayCompiles ....
+       This will replay the compiling process.
+
+       With ReplayCompiles, the replay will recompile all the methods in app.jar, and in boot.jar to emulate the process in java app.
+
+notes:
+       1) Most time, we don't need the boot.jar which is the classes loaded from JDK. It will be only modified when an agent(JVMDI) is running and modifies the classes.
+       2) If encounter error as "<flag>" not found, that means the SA is using a VMStructs which is different from the one with corefile. In this case, SA has a utility tool vmstructsdump which is located at agent/src/os/<os>/proc/<os_platform>
+
+       Use this tool to dump VM type library:
+       vmstructsdump libjvm.so > <type_name>.db
+
+       set env SA_TYPEDB=<type_name>.db (refer different shell for set envs)
diff -r d9b08d62b95e -r b7f3bf2ba33b agent/doc/clhsdb.html
--- a/agent/doc/clhsdb.html	Thu May 02 10:58:04 2013 -0400
+++ b/agent/doc/clhsdb.html	Mon May 06 10:20:18 2013 -0700
@@ -15,7 +15,7 @@
 

There is also JavaScript based SA command line interface called jsdb. But, CLHSDB supports Unix shell-like (or dbx/gdb-like) command line interface with -support for output redirection/appending (familiar >, >>), command history and so on. +support for output redirection/appending (familiar >, >>), command history and so on. Each CLHSDB command can have zero or more arguments and optionally end with output redirection (or append) to a file. Commands may be stored in a file and run using source command. help command prints usage message for all supported commands (or a specific command) @@ -49,7 +49,7 @@ dumpheap [ file ] dump heap in hprof binary format dumpideal -a | id dump ideal graph like debug flag -XX:+PrintIdeal dumpilt -a | id dump inline tree for C2 compilation - dumpreplaydata

| -a | [>replay.txt] dump replay data into a file + dumpreplaydata <address> | -a | <thread_id> [>replay.txt] dump replay data into a file echo [ true | false ] turn on/off command echo mode examine [ address/count ] | [ address,address] show contents of memory from given address field [ type [ name fieldtype isStatic offset address ] ] print info about a field of HotSpot type @@ -96,11 +96,11 @@

JavaScript integration

-

Few CLHSDB commands are already implemented in JavaScript. It is possible to extend CLHSDB command set +

Few CLHSDB commands are already implemented in JavaScript. It is possible to extend CLHSDB command set by implementing more commands in a JavaScript file and by loading it by jsload command. jseval command may be used to evaluate arbitrary JavaScript expression from a string. Any JavaScript function may be exposed as a CLHSDB command by registering it using JavaScript registerCommand -function. This function accepts command name, usage and name of the JavaScript implementation function +function. This function accepts command name, usage and name of the JavaScript implementation function as arguments.

@@ -127,11 +127,11 @@
-

C2 Compilation Replay

+

Compilation Replay

When a java process crashes in compiled method, usually a core file is saved. -The C2 replay function can reproduce the compiling process in the core. -c2replay.html +The replay function can reproduce the compiling process in the core. +cireplay.html diff -r d9b08d62b95e -r b7f3bf2ba33b agent/src/share/classes/sun/jvm/hotspot/ci/ciEnv.java --- a/agent/src/share/classes/sun/jvm/hotspot/ci/ciEnv.java Thu May 02 10:58:04 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciEnv.java Mon May 06 10:20:18 2013 -0700 @@ -93,10 +93,11 @@ CompileTask task = task(); Method method = task.method(); int entryBci = task.osrBci(); + int compLevel = task.compLevel(); Klass holder = method.getMethodHolder(); out.println("compile " + holder.getName().asString() + " " + OopUtilities.escapeString(method.getName().asString()) + " " + method.getSignature().asString() + " " + - entryBci); + entryBci + " " + compLevel); } } diff -r d9b08d62b95e -r b7f3bf2ba33b agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java --- a/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java Thu May 02 10:58:04 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java Mon May 06 10:20:18 2013 -0700 @@ -78,6 +78,8 @@ current sweep traversal index. */ private static CIntegerField stackTraversalMarkField; + private static CIntegerField compLevelField; + static { VM.registerVMInitializedObserver(new Observer() { public void update(Observable o, Object data) { @@ -113,7 +115,7 @@ osrEntryPointField = type.getAddressField("_osr_entry_point"); lockCountField = type.getJIntField("_lock_count"); stackTraversalMarkField = type.getCIntegerField("_stack_traversal_mark"); - + compLevelField = type.getCIntegerField("_comp_level"); pcDescSize = db.lookupType("PcDesc").getSize(); } @@ -530,7 +532,7 @@ out.println("compile " + holder.getName().asString() + " " + OopUtilities.escapeString(method.getName().asString()) + " " + method.getSignature().asString() + " " + - getEntryBCI()); + getEntryBCI() + " " + getCompLevel()); } @@ -551,4 +553,5 @@ private int getHandlerTableOffset() { return (int) handlerTableOffsetField.getValue(addr); } private int getNulChkTableOffset() { return (int) nulChkTableOffsetField .getValue(addr); } private int getNMethodEndOffset() { return (int) nmethodEndOffsetField .getValue(addr); } + private int getCompLevel() { return (int) compLevelField .getValue(addr); } } diff -r d9b08d62b95e -r b7f3bf2ba33b agent/src/share/classes/sun/jvm/hotspot/compiler/CompileTask.java --- a/agent/src/share/classes/sun/jvm/hotspot/compiler/CompileTask.java Thu May 02 10:58:04 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/compiler/CompileTask.java Mon May 06 10:20:18 2013 -0700 @@ -46,10 +46,12 @@ Type type = db.lookupType("CompileTask"); methodField = type.getAddressField("_method"); osrBciField = new CIntField(type.getCIntegerField("_osr_bci"), 0); + compLevelField = new CIntField(type.getCIntegerField("_comp_level"), 0); } private static AddressField methodField; private static CIntField osrBciField; + private static CIntField compLevelField; public CompileTask(Address addr) { super(addr); @@ -63,4 +65,8 @@ public int osrBci() { return (int)osrBciField.getValue(getAddress()); } + + public int compLevel() { + return (int)compLevelField.getValue(getAddress()); + } } diff -r d9b08d62b95e -r b7f3bf2ba33b agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java Thu May 02 10:58:04 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java Mon May 06 10:20:18 2013 -0700 @@ -117,8 +117,6 @@ mode = MODE_HEAP_SUMMARY; } else if (modeFlag.equals("-histo")) { mode = MODE_HISTOGRAM; - } else if (modeFlag.equals("-permstat")) { - mode = MODE_CLSTATS; } else if (modeFlag.equals("-clstats")) { mode = MODE_CLSTATS; } else if (modeFlag.equals("-finalizerinfo")) { diff -r d9b08d62b95e -r b7f3bf2ba33b make/hotspot_version --- a/make/hotspot_version Thu May 02 10:58:04 2013 -0400 +++ b/make/hotspot_version Mon May 06 10:20:18 2013 -0700 @@ -35,7 +35,7 @@ HS_MAJOR_VER=25 HS_MINOR_VER=0 -HS_BUILD_NUMBER=31 +HS_BUILD_NUMBER=32 JDK_MAJOR_VER=1 JDK_MINOR_VER=8 diff -r d9b08d62b95e -r b7f3bf2ba33b make/windows/makefiles/compile.make --- a/make/windows/makefiles/compile.make Thu May 02 10:58:04 2013 -0400 +++ b/make/windows/makefiles/compile.make Mon May 06 10:20:18 2013 -0700 @@ -52,7 +52,7 @@ # improving the quality of crash log stack traces involving jvm.dll. # These are always used in all compiles -CXX_FLAGS=/nologo /W3 /WX +CXX_FLAGS=$(EXTRA_CFLAGS) /nologo /W3 /WX # Let's add debug information when Full Debug Symbols is enabled !if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1" diff -r d9b08d62b95e -r b7f3bf2ba33b make/windows/makefiles/defs.make --- a/make/windows/makefiles/defs.make Thu May 02 10:58:04 2013 -0400 +++ b/make/windows/makefiles/defs.make Mon May 06 10:20:18 2013 -0700 @@ -193,7 +193,7 @@ MAKE_ARGS += JDK_BUILD_NUMBER=$(COOKED_BUILD_NUMBER) endif -NMAKE= MAKEFLAGS= MFLAGS= nmake -NOLOGO +NMAKE= MAKEFLAGS= MFLAGS= EXTRA_CFLAGS="$(EXTRA_CFLAGS)" nmake -NOLOGO ifndef SYSTEM_UNAME SYSTEM_UNAME := $(shell uname) export SYSTEM_UNAME diff -r d9b08d62b95e -r b7f3bf2ba33b src/cpu/sparc/vm/compiledIC_sparc.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/cpu/sparc/vm/compiledIC_sparc.cpp Mon May 06 10:20:18 2013 -0700 @@ -0,0 +1,193 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "code/compiledIC.hpp" +#include "code/icBuffer.hpp" +#include "code/nmethod.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/safepoint.hpp" +#ifdef COMPILER2 +#include "opto/matcher.hpp" +#endif + +// Release the CompiledICHolder* associated with this call site is there is one. +void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + if (is_icholder_entry(call->destination())) { + NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); + InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); + } +} + +bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + return is_icholder_entry(call->destination()); +} + +//----------------------------------------------------------------------------- +// High-level access to an inline cache. Guaranteed to be MT-safe. + +CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) + : _ic_call(call) +{ + address ic_call = call->instruction_address(); + + assert(ic_call != NULL, "ic_call address must be set"); + assert(nm != NULL, "must pass nmethod"); + assert(nm->contains(ic_call), "must be in nmethod"); + + // Search for the ic_call at the given address. + RelocIterator iter(nm, ic_call, ic_call+1); + bool ret = iter.next(); + assert(ret == true, "relocInfo must exist at this address"); + assert(iter.addr() == ic_call, "must find ic_call"); + if (iter.type() == relocInfo::virtual_call_type) { + virtual_call_Relocation* r = iter.virtual_call_reloc(); + _is_optimized = false; + _value = nativeMovConstReg_at(r->cached_value()); + } else { + assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); + _is_optimized = true; + _value = NULL; + } +} + +// ---------------------------------------------------------------------------- + +#define __ _masm. +void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { +#ifdef COMPILER2 + // Stub is fixed up when the corresponding call is converted from calling + // compiled code to calling interpreted code. + // set (empty), G5 + // jmp -1 + + address mark = cbuf.insts_mark(); // Get mark within main instrs section. + + MacroAssembler _masm(&cbuf); + + address base = + __ start_a_stub(to_interp_stub_size()*2); + if (base == NULL) return; // CodeBuffer::expand failed. + + // Static stub relocation stores the instruction address of the call. + __ relocate(static_stub_Relocation::spec(mark)); + + __ set_metadata(NULL, as_Register(Matcher::inline_cache_reg_encode())); + + __ set_inst_mark(); + AddressLiteral addrlit(-1); + __ JUMP(addrlit, G3, 0); + + __ delayed()->nop(); + + // Update current stubs pointer and restore code_end. + __ end_a_stub(); +#else + ShouldNotReachHere(); +#endif +} +#undef __ + +int CompiledStaticCall::to_interp_stub_size() { + // This doesn't need to be accurate but it must be larger or equal to + // the real size of the stub. + return (NativeMovConstReg::instruction_size + // sethi/setlo; + NativeJump::instruction_size + // sethi; jmp; nop + (TraceJumps ? 20 * BytesPerInstWord : 0) ); +} + +// Relocation entries for call stub, compiled java to interpreter. +int CompiledStaticCall::reloc_to_interp_stub() { + return 10; // 4 in emit_java_to_interp + 1 in Java_Static_Call +} + +void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { + address stub = find_stub(); + guarantee(stub != NULL, "stub not found"); + + if (TraceICs) { + ResourceMark rm; + tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", + instruction_address(), + callee->name_and_sig_as_C_string()); + } + + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + + assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), + "a) MT-unsafe modification of inline cache"); + assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, + "b) MT-unsafe modification of inline cache"); + + // Update stub. + method_holder->set_data((intptr_t)callee()); + jump->set_jump_destination(entry); + + // Update jump to call. + set_destination_mt_safe(stub); +} + +void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); + // Reset stub. + address stub = static_stub->addr(); + assert(stub != NULL, "stub not found"); + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + method_holder->set_data(0); + jump->set_jump_destination((address)-1); +} + +//----------------------------------------------------------------------------- +// Non-product mode code +#ifndef PRODUCT + +void CompiledStaticCall::verify() { + // Verify call. + NativeCall::verify(); + if (os::is_MP()) { + verify_alignment(); + } + + // Verify stub. + address stub = find_stub(); + assert(stub != NULL, "no stub found for static call"); + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + + // Verify state. + assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); +} + +#endif // !PRODUCT diff -r d9b08d62b95e -r b7f3bf2ba33b src/cpu/sparc/vm/sparc.ad --- a/src/cpu/sparc/vm/sparc.ad Thu May 02 10:58:04 2013 -0400 +++ b/src/cpu/sparc/vm/sparc.ad Mon May 06 10:20:18 2013 -0700 @@ -1656,53 +1656,6 @@ } //============================================================================= - -// emit call stub, compiled java to interpretor -void emit_java_to_interp(CodeBuffer &cbuf ) { - - // Stub is fixed up when the corresponding call is converted from calling - // compiled code to calling interpreted code. - // set (empty), G5 - // jmp -1 - - address mark = cbuf.insts_mark(); // get mark within main instrs section - - MacroAssembler _masm(&cbuf); - - address base = - __ start_a_stub(Compile::MAX_stubs_size); - if (base == NULL) return; // CodeBuffer::expand failed - - // static stub relocation stores the instruction address of the call - __ relocate(static_stub_Relocation::spec(mark)); - - __ set_metadata(NULL, reg_to_register_object(Matcher::inline_cache_reg_encode())); - - __ set_inst_mark(); - AddressLiteral addrlit(-1); - __ JUMP(addrlit, G3, 0); - - __ delayed()->nop(); - - // Update current stubs pointer and restore code_end. - __ end_a_stub(); -} - -// size of call stub, compiled java to interpretor -uint size_java_to_interp() { - // This doesn't need to be accurate but it must be larger or equal to - // the real size of the stub. - return (NativeMovConstReg::instruction_size + // sethi/setlo; - NativeJump::instruction_size + // sethi; jmp; nop - (TraceJumps ? 20 * BytesPerInstWord : 0) ); -} -// relocation entries for call stub, compiled java to interpretor -uint reloc_java_to_interp() { - return 10; // 4 in emit_java_to_interp + 1 in Java_Static_Call -} - - -//============================================================================= #ifndef PRODUCT void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { st->print_cr("\nUEP:"); @@ -2576,15 +2529,15 @@ enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine // who we intended to call. - if ( !_method ) { + if (!_method) { emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type); } else if (_optimized_virtual) { emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type); } else { emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type); } - if( _method ) { // Emit stub for static call - emit_java_to_interp(cbuf); + if (_method) { // Emit stub for static call. + CompiledStaticCall::emit_to_interp_stub(cbuf); } %} diff -r d9b08d62b95e -r b7f3bf2ba33b src/cpu/x86/vm/compiledIC_x86.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/cpu/x86/vm/compiledIC_x86.cpp Mon May 06 10:20:18 2013 -0700 @@ -0,0 +1,180 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "code/compiledIC.hpp" +#include "code/icBuffer.hpp" +#include "code/nmethod.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/safepoint.hpp" + +// Release the CompiledICHolder* associated with this call site is there is one. +void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + if (is_icholder_entry(call->destination())) { + NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); + InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); + } +} + +bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + return is_icholder_entry(call->destination()); +} + +//----------------------------------------------------------------------------- +// High-level access to an inline cache. Guaranteed to be MT-safe. + +CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) + : _ic_call(call) +{ + address ic_call = call->instruction_address(); + + assert(ic_call != NULL, "ic_call address must be set"); + assert(nm != NULL, "must pass nmethod"); + assert(nm->contains(ic_call), "must be in nmethod"); + + // Search for the ic_call at the given address. + RelocIterator iter(nm, ic_call, ic_call+1); + bool ret = iter.next(); + assert(ret == true, "relocInfo must exist at this address"); + assert(iter.addr() == ic_call, "must find ic_call"); + if (iter.type() == relocInfo::virtual_call_type) { + virtual_call_Relocation* r = iter.virtual_call_reloc(); + _is_optimized = false; + _value = nativeMovConstReg_at(r->cached_value()); + } else { + assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); + _is_optimized = true; + _value = NULL; + } +} + +// ---------------------------------------------------------------------------- + +#define __ _masm. +void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { + // Stub is fixed up when the corresponding call is converted from + // calling compiled code to calling interpreted code. + // movq rbx, 0 + // jmp -5 # to self + + address mark = cbuf.insts_mark(); // Get mark within main instrs section. + + // Note that the code buffer's insts_mark is always relative to insts. + // That's why we must use the macroassembler to generate a stub. + MacroAssembler _masm(&cbuf); + + address base = + __ start_a_stub(to_interp_stub_size()*2); + if (base == NULL) return; // CodeBuffer::expand failed. + // Static stub relocation stores the instruction address of the call. + __ relocate(static_stub_Relocation::spec(mark), Assembler::imm_operand); + // Static stub relocation also tags the Method* in the code-stream. + __ mov_metadata(rbx, (Metadata*) NULL); // Method is zapped till fixup time. + // This is recognized as unresolved by relocs/nativeinst/ic code. + __ jump(RuntimeAddress(__ pc())); + + // Update current stubs pointer and restore insts_end. + __ end_a_stub(); +} +#undef __ + +int CompiledStaticCall::to_interp_stub_size() { + return NOT_LP64(10) // movl; jmp + LP64_ONLY(15); // movq (1+1+8); jmp (1+4) +} + +// Relocation entries for call stub, compiled java to interpreter. +int CompiledStaticCall::reloc_to_interp_stub() { + return 4; // 3 in emit_to_interp_stub + 1 in emit_call +} + +void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { + address stub = find_stub(); + guarantee(stub != NULL, "stub not found"); + + if (TraceICs) { + ResourceMark rm; + tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", + instruction_address(), + callee->name_and_sig_as_C_string()); + } + + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + + assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), + "a) MT-unsafe modification of inline cache"); + assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, + "b) MT-unsafe modification of inline cache"); + + // Update stub. + method_holder->set_data((intptr_t)callee()); + jump->set_jump_destination(entry); + + // Update jump to call. + set_destination_mt_safe(stub); +} + +void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); + // Reset stub. + address stub = static_stub->addr(); + assert(stub != NULL, "stub not found"); + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + method_holder->set_data(0); + jump->set_jump_destination((address)-1); +} + +//----------------------------------------------------------------------------- +// Non-product mode code +#ifndef PRODUCT + +void CompiledStaticCall::verify() { + // Verify call. + NativeCall::verify(); + if (os::is_MP()) { + verify_alignment(); + } + + // Verify stub. + address stub = find_stub(); + assert(stub != NULL, "no stub found for static call"); + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + + // Verify state. + assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); +} + +#endif // !PRODUCT diff -r d9b08d62b95e -r b7f3bf2ba33b src/cpu/x86/vm/x86_32.ad --- a/src/cpu/x86/vm/x86_32.ad Thu May 02 10:58:04 2013 -0400 +++ b/src/cpu/x86/vm/x86_32.ad Mon May 06 10:20:18 2013 -0700 @@ -1257,43 +1257,6 @@ } //============================================================================= - -// emit call stub, compiled java to interpreter -void emit_java_to_interp(CodeBuffer &cbuf ) { - // Stub is fixed up when the corresponding call is converted from calling - // compiled code to calling interpreted code. - // mov rbx,0 - // jmp -1 - - address mark = cbuf.insts_mark(); // get mark within main instrs section - - // Note that the code buffer's insts_mark is always relative to insts. - // That's why we must use the macroassembler to generate a stub. - MacroAssembler _masm(&cbuf); - - address base = - __ start_a_stub(Compile::MAX_stubs_size); - if (base == NULL) return; // CodeBuffer::expand failed - // static stub relocation stores the instruction address of the call - __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM32); - // static stub relocation also tags the Method* in the code-stream. - __ mov_metadata(rbx, (Metadata*)NULL); // method is zapped till fixup time - // This is recognized as unresolved by relocs/nativeInst/ic code - __ jump(RuntimeAddress(__ pc())); - - __ end_a_stub(); - // Update current stubs pointer and restore insts_end. -} -// size of call stub, compiled java to interpretor -uint size_java_to_interp() { - return 10; // movl; jmp -} -// relocation entries for call stub, compiled java to interpretor -uint reloc_java_to_interp() { - return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call -} - -//============================================================================= #ifndef PRODUCT void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const { st->print_cr( "CMP EAX,[ECX+4]\t# Inline cache check"); @@ -1909,8 +1872,8 @@ emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), static_call_Relocation::spec(), RELOC_IMM32 ); } - if (_method) { // Emit stub for static call - emit_java_to_interp(cbuf); + if (_method) { // Emit stub for static call. + CompiledStaticCall::emit_to_interp_stub(cbuf); } %} diff -r d9b08d62b95e -r b7f3bf2ba33b src/cpu/x86/vm/x86_64.ad --- a/src/cpu/x86/vm/x86_64.ad Thu May 02 10:58:04 2013 -0400 +++ b/src/cpu/x86/vm/x86_64.ad Mon May 06 10:20:18 2013 -0700 @@ -1388,48 +1388,6 @@ } //============================================================================= - -// emit call stub, compiled java to interpreter -void emit_java_to_interp(CodeBuffer& cbuf) -{ - // Stub is fixed up when the corresponding call is converted from - // calling compiled code to calling interpreted code. - // movq rbx, 0 - // jmp -5 # to self - - address mark = cbuf.insts_mark(); // get mark within main instrs section - - // Note that the code buffer's insts_mark is always relative to insts. - // That's why we must use the macroassembler to generate a stub. - MacroAssembler _masm(&cbuf); - - address base = - __ start_a_stub(Compile::MAX_stubs_size); - if (base == NULL) return; // CodeBuffer::expand failed - // static stub relocation stores the instruction address of the call - __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64); - // static stub relocation also tags the Method* in the code-stream. - __ mov_metadata(rbx, (Metadata*) NULL); // method is zapped till fixup time - // This is recognized as unresolved by relocs/nativeinst/ic code - __ jump(RuntimeAddress(__ pc())); - - // Update current stubs pointer and restore insts_end. - __ end_a_stub(); -} - -// size of call stub, compiled java to interpretor -uint size_java_to_interp() -{ - return 15; // movq (1+1+8); jmp (1+4) -} - -// relocation entries for call stub, compiled java to interpretor -uint reloc_java_to_interp() -{ - return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call -} - -//============================================================================= #ifndef PRODUCT void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const { @@ -2078,8 +2036,8 @@ RELOC_DISP32); } if (_method) { - // Emit stub for static call - emit_java_to_interp(cbuf); + // Emit stub for static call. + CompiledStaticCall::emit_to_interp_stub(cbuf); } %} diff -r d9b08d62b95e -r b7f3bf2ba33b src/cpu/zero/vm/compiledIC_zero.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/cpu/zero/vm/compiledIC_zero.cpp Mon May 06 10:20:18 2013 -0700 @@ -0,0 +1,122 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/systemDictionary.hpp" +#include "code/codeCache.hpp" +#include "code/compiledIC.hpp" +#include "code/icBuffer.hpp" +#include "code/nmethod.hpp" +#include "code/vtableStubs.hpp" +#include "interpreter/interpreter.hpp" +#include "interpreter/linkResolver.hpp" +#include "memory/metadataFactory.hpp" +#include "memory/oopFactory.hpp" +#include "oops/method.hpp" +#include "oops/oop.inline.hpp" +#include "oops/symbol.hpp" +#include "runtime/icache.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/stubRoutines.hpp" +#include "utilities/events.hpp" + + +// Release the CompiledICHolder* associated with this call site is there is one. +void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + if (is_icholder_entry(call->destination())) { + NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); + InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); + } +} + +bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + return is_icholder_entry(call->destination()); +} + +//----------------------------------------------------------------------------- +// High-level access to an inline cache. Guaranteed to be MT-safe. + +CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) + : _ic_call(call) +{ + address ic_call = call->instruction_address(); + + assert(ic_call != NULL, "ic_call address must be set"); + assert(nm != NULL, "must pass nmethod"); + assert(nm->contains(ic_call), "must be in nmethod"); + + // Search for the ic_call at the given address. + RelocIterator iter(nm, ic_call, ic_call+1); + bool ret = iter.next(); + assert(ret == true, "relocInfo must exist at this address"); + assert(iter.addr() == ic_call, "must find ic_call"); + if (iter.type() == relocInfo::virtual_call_type) { + virtual_call_Relocation* r = iter.virtual_call_reloc(); + _is_optimized = false; + _value = nativeMovConstReg_at(r->cached_value()); + } else { + assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); + _is_optimized = true; + _value = NULL; + } +} + +// ---------------------------------------------------------------------------- + +void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { + ShouldNotReachHere(); // Only needed for COMPILER2. +} + +int CompiledStaticCall::to_interp_stub_size() { + ShouldNotReachHere(); // Only needed for COMPILER2. + return 0; +} + +// Relocation entries for call stub, compiled java to interpreter. +int CompiledStaticCall::reloc_to_interp_stub() { + ShouldNotReachHere(); // Only needed for COMPILER2. + return 0; +} + +void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { + ShouldNotReachHere(); // Only needed for COMPILER2. +} + +void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { + ShouldNotReachHere(); // Only needed for COMPILER2. +} + +//----------------------------------------------------------------------------- +// Non-product mode code. +#ifndef PRODUCT + +void CompiledStaticCall::verify() { + ShouldNotReachHere(); // Only needed for COMPILER2. +} + +#endif // !PRODUCT diff -r d9b08d62b95e -r b7f3bf2ba33b src/os/bsd/vm/os_bsd.cpp --- a/src/os/bsd/vm/os_bsd.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/os/bsd/vm/os_bsd.cpp Mon May 06 10:20:18 2013 -0700 @@ -1230,10 +1230,6 @@ return retval; } -const char* os::get_current_directory(char *buf, int buflen) { - return getcwd(buf, buflen); -} - // check if addr is inside libjvm.so bool os::address_is_in_vm(address addr) { static address libjvm_base_addr; diff -r d9b08d62b95e -r b7f3bf2ba33b src/os/linux/vm/os_linux.cpp --- a/src/os/linux/vm/os_linux.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/os/linux/vm/os_linux.cpp Mon May 06 10:20:18 2013 -0700 @@ -1663,10 +1663,6 @@ return retval; } -const char* os::get_current_directory(char *buf, int buflen) { - return getcwd(buf, buflen); -} - // check if addr is inside libjvm.so bool os::address_is_in_vm(address addr) { static address libjvm_base_addr; diff -r d9b08d62b95e -r b7f3bf2ba33b src/os/posix/vm/os_posix.cpp --- a/src/os/posix/vm/os_posix.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/os/posix/vm/os_posix.cpp Mon May 06 10:20:18 2013 -0700 @@ -251,3 +251,11 @@ return true; #endif } + +const char* os::get_current_directory(char *buf, size_t buflen) { + return getcwd(buf, buflen); +} + +FILE* os::open(int fd, const char* mode) { + return ::fdopen(fd, mode); +} diff -r d9b08d62b95e -r b7f3bf2ba33b src/os/solaris/vm/os_solaris.cpp --- a/src/os/solaris/vm/os_solaris.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/os/solaris/vm/os_solaris.cpp Mon May 06 10:20:18 2013 -0700 @@ -1916,10 +1916,6 @@ return retval; } -const char* os::get_current_directory(char *buf, int buflen) { - return getcwd(buf, buflen); -} - // check if addr is inside libjvm.so bool os::address_is_in_vm(address addr) { static address libjvm_base_addr; diff -r d9b08d62b95e -r b7f3bf2ba33b src/os/windows/vm/os_windows.cpp --- a/src/os/windows/vm/os_windows.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/os/windows/vm/os_windows.cpp Mon May 06 10:20:18 2013 -0700 @@ -1221,8 +1221,10 @@ // Needs to be in os specific directory because windows requires another // header file -const char* os::get_current_directory(char *buf, int buflen) { - return _getcwd(buf, buflen); +const char* os::get_current_directory(char *buf, size_t buflen) { + int n = static_cast(buflen); + if (buflen > INT_MAX) n = INT_MAX; + return _getcwd(buf, n); } //----------------------------------------------------------- @@ -4098,6 +4100,10 @@ return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); } +FILE* os::open(int fd, const char* mode) { + return ::_fdopen(fd, mode); +} + // Is a (classpath) directory empty? bool os::dir_is_empty(const char* path) { WIN32_FIND_DATA fd; diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/adlc/main.cpp --- a/src/share/vm/adlc/main.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/adlc/main.cpp Mon May 06 10:20:18 2013 -0700 @@ -213,6 +213,7 @@ AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._HPP_file._name)); AD.addInclude(AD._CPP_file, "memory/allocation.inline.hpp"); AD.addInclude(AD._CPP_file, "asm/macroAssembler.inline.hpp"); + AD.addInclude(AD._CPP_file, "code/compiledIC.hpp"); AD.addInclude(AD._CPP_file, "code/vmreg.hpp"); AD.addInclude(AD._CPP_file, "gc_interface/collectedHeap.inline.hpp"); AD.addInclude(AD._CPP_file, "oops/compiledICHolder.hpp"); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/ci/ciEnv.cpp --- a/src/share/vm/ci/ciEnv.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/ci/ciEnv.cpp Mon May 06 10:20:18 2013 -0700 @@ -1150,23 +1150,9 @@ record_method_not_compilable("out of memory"); } -fileStream* ciEnv::_replay_data_stream = NULL; - -void ciEnv::dump_replay_data() { +void ciEnv::dump_replay_data(outputStream* out) { VM_ENTRY_MARK; MutexLocker ml(Compile_lock); - if (_replay_data_stream == NULL) { - _replay_data_stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(ReplayDataFile); - if (_replay_data_stream == NULL) { - fatal(err_msg("Can't open %s for replay data", ReplayDataFile)); - } - } - dump_replay_data(_replay_data_stream); -} - - -void ciEnv::dump_replay_data(outputStream* out) { - ASSERT_IN_VM; ResourceMark rm; #if INCLUDE_JVMTI out->print_cr("JvmtiExport can_access_local_variables %d", _jvmti_can_access_local_variables); @@ -1179,13 +1165,15 @@ for (int i = 0; i < objects->length(); i++) { objects->at(i)->dump_replay_data(out); } - Method* method = task()->method(); - int entry_bci = task()->osr_bci(); + CompileTask* task = this->task(); + Method* method = task->method(); + int entry_bci = task->osr_bci(); + int comp_level = task->comp_level(); // Klass holder = method->method_holder(); - out->print_cr("compile %s %s %s %d", + out->print_cr("compile %s %s %s %d %d", method->klass_name()->as_quoted_ascii(), method->name()->as_quoted_ascii(), method->signature()->as_quoted_ascii(), - entry_bci); + entry_bci, comp_level); out->flush(); } diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/ci/ciEnv.hpp --- a/src/share/vm/ci/ciEnv.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/ci/ciEnv.hpp Mon May 06 10:20:18 2013 -0700 @@ -46,8 +46,6 @@ friend class CompileBroker; friend class Dependencies; // for get_object, during logging - static fileStream* _replay_data_stream; - private: Arena* _arena; // Alias for _ciEnv_arena except in init_shared_objects() Arena _ciEnv_arena; @@ -451,10 +449,6 @@ // RedefineClasses support void metadata_do(void f(Metadata*)) { _factory->metadata_do(f); } - // Dump the compilation replay data for this ciEnv to - // ReplayDataFile, creating the file if needed. - void dump_replay_data(); - // Dump the compilation replay data for the ciEnv to the stream. void dump_replay_data(outputStream* out); }; diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/ci/ciMethod.hpp --- a/src/share/vm/ci/ciMethod.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/ci/ciMethod.hpp Mon May 06 10:20:18 2013 -0700 @@ -196,7 +196,6 @@ // Analysis and profiling. // // Usage note: liveness_at_bci and init_vars should be wrapped in ResourceMarks. - bool uses_monitors() const { return _uses_monitors; } // this one should go away, it has a misleading name bool has_monitor_bytecodes() const { return _uses_monitors; } bool has_balanced_monitors(); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/ci/ciReplay.cpp --- a/src/share/vm/ci/ciReplay.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/ci/ciReplay.cpp Mon May 06 10:20:18 2013 -0700 @@ -89,7 +89,7 @@ loader = Handle(thread, SystemDictionary::java_system_loader()); stream = fopen(filename, "rt"); if (stream == NULL) { - fprintf(stderr, "Can't open replay file %s\n", filename); + fprintf(stderr, "ERROR: Can't open replay file %s\n", filename); } buffer_length = 32; buffer = NEW_RESOURCE_ARRAY(char, buffer_length); @@ -327,7 +327,6 @@ if (had_error()) { tty->print_cr("Error while parsing line %d: %s\n", line_no, _error_message); tty->print_cr("%s", buffer); - assert(false, "error"); return; } pos = 0; @@ -370,11 +369,47 @@ } } - // compile + // validation of comp_level + bool is_valid_comp_level(int comp_level) { + const int msg_len = 256; + char* msg = NULL; + if (!is_compile(comp_level)) { + msg = NEW_RESOURCE_ARRAY(char, msg_len); + jio_snprintf(msg, msg_len, "%d isn't compilation level", comp_level); + } else if (!TieredCompilation && (comp_level != CompLevel_highest_tier)) { + msg = NEW_RESOURCE_ARRAY(char, msg_len); + switch (comp_level) { + case CompLevel_simple: + jio_snprintf(msg, msg_len, "compilation level %d requires Client VM or TieredCompilation", comp_level); + break; + case CompLevel_full_optimization: + jio_snprintf(msg, msg_len, "compilation level %d requires Server VM", comp_level); + break; + default: + jio_snprintf(msg, msg_len, "compilation level %d requires TieredCompilation", comp_level); + } + } + if (msg != NULL) { + report_error(msg); + return false; + } + return true; + } + + // compile void process_compile(TRAPS) { // methodHandle method; Method* method = parse_method(CHECK); int entry_bci = parse_int("entry_bci"); + const char* comp_level_label = "comp_level"; + int comp_level = parse_int(comp_level_label); + // old version w/o comp_level + if (had_error() && (error_message() == comp_level_label)) { + comp_level = CompLevel_full_optimization; + } + if (!is_valid_comp_level(comp_level)) { + return; + } Klass* k = method->method_holder(); ((InstanceKlass*)k)->initialize(THREAD); if (HAS_PENDING_EXCEPTION) { @@ -389,12 +424,12 @@ } } // Make sure the existence of a prior compile doesn't stop this one - nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, CompLevel_full_optimization, true) : method->code(); + nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code(); if (nm != NULL) { nm->make_not_entrant(); } replay_state = this; - CompileBroker::compile_method(method, entry_bci, CompLevel_full_optimization, + CompileBroker::compile_method(method, entry_bci, comp_level, methodHandle(), 0, "replay", THREAD); replay_state = NULL; reset(); @@ -551,7 +586,7 @@ if (parsed_two_word == i) continue; default: - ShouldNotReachHere(); + fatal(err_msg_res("Unexpected tag: %d", cp->tag_at(i).value())); break; } @@ -819,6 +854,11 @@ ReplaySuppressInitializers = 1; } + if (FLAG_IS_DEFAULT(ReplayDataFile)) { + tty->print_cr("ERROR: no compiler replay data file specified (use -XX:ReplayDataFile=replay_pid12345.txt)."); + return 1; + } + // Load and parse the replay data CompileReplay rp(ReplayDataFile, THREAD); int exit_code = 0; diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/classfile/classLoader.cpp --- a/src/share/vm/classfile/classLoader.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/classfile/classLoader.cpp Mon May 06 10:20:18 2013 -0700 @@ -1345,9 +1345,10 @@ tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_class_counter, buffer); // Preload all classes to get around uncommon traps // Iterate over all methods in class + int comp_level = CompilationPolicy::policy()->initial_compile_level(); for (int n = 0; n < k->methods()->length(); n++) { methodHandle m (THREAD, k->methods()->at(n)); - if (CompilationPolicy::can_be_compiled(m)) { + if (CompilationPolicy::can_be_compiled(m, comp_level)) { if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) { // Give sweeper a chance to keep up with CTW @@ -1356,7 +1357,7 @@ _codecache_sweep_counter = 0; } // Force compilation - CompileBroker::compile_method(m, InvocationEntryBci, CompilationPolicy::policy()->initial_compile_level(), + CompileBroker::compile_method(m, InvocationEntryBci, comp_level, methodHandle(), 0, "CTW", THREAD); if (HAS_PENDING_EXCEPTION) { clear_pending_exception_if_not_oom(CHECK); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/classfile/classLoaderData.cpp --- a/src/share/vm/classfile/classLoaderData.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/classfile/classLoaderData.cpp Mon May 06 10:20:18 2013 -0700 @@ -53,6 +53,7 @@ #include "classfile/metadataOnStackMark.hpp" #include "classfile/systemDictionary.hpp" #include "code/codeCache.hpp" +#include "memory/gcLocker.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspaceShared.hpp" #include "memory/oopFactory.hpp" @@ -65,17 +66,19 @@ ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL; -ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) : +ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) : _class_loader(h_class_loader()), _is_anonymous(is_anonymous), _keep_alive(is_anonymous), // initially _metaspace(NULL), _unloading(false), _klasses(NULL), _claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL), - _next(NULL), _dependencies(), + _next(NULL), _dependencies(dependencies), _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) { // empty } void ClassLoaderData::init_dependencies(TRAPS) { + assert(!Universe::is_fully_initialized(), "should only be called when initializing"); + assert(is_the_null_class_loader_data(), "should only call this for the null class loader"); _dependencies.init(CHECK); } @@ -429,7 +432,7 @@ // These anonymous class loaders are to contain classes used for JSR292 ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) { // Add a new class loader data to the graph. - return ClassLoaderDataGraph::add(NULL, loader, CHECK_NULL); + return ClassLoaderDataGraph::add(loader, true, CHECK_NULL); } const char* ClassLoaderData::loader_name() { @@ -501,19 +504,22 @@ ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL; ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL; - // Add a new class loader data node to the list. Assign the newly created // ClassLoaderData into the java/lang/ClassLoader object as a hidden field -ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle loader, TRAPS) { - // Not assigned a class loader data yet. - // Create one. - ClassLoaderData* *list_head = &_head; - ClassLoaderData* next = _head; +ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRAPS) { + // We need to allocate all the oops for the ClassLoaderData before allocating the + // actual ClassLoaderData object. + ClassLoaderData::Dependencies dependencies(CHECK_NULL); - bool is_anonymous = (cld_addr == NULL); - ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous); + No_Safepoint_Verifier no_safepoints; // we mustn't GC until we've installed the + // ClassLoaderData in the graph since the CLD + // contains unhandled oops - if (cld_addr != NULL) { + ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous, dependencies); + + + if (!is_anonymous) { + ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader()); // First, Atomically set it ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL); if (old != NULL) { @@ -525,6 +531,9 @@ // We won the race, and therefore the task of adding the data to the list of // class loader data + ClassLoaderData** list_head = &_head; + ClassLoaderData* next = _head; + do { cld->set_next(next); ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next); @@ -537,10 +546,6 @@ cld->loader_name()); tty->print_cr("]"); } - // Create dependencies after the CLD is added to the list. Otherwise, - // the GC GC will not find the CLD and the _class_loader field will - // not be updated. - cld->init_dependencies(CHECK_NULL); return cld; } next = exchanged; @@ -671,6 +676,8 @@ dead->unload(); data = data->next(); // Remove from loader list. + // This class loader data will no longer be found + // in the ClassLoaderDataGraph. if (prev != NULL) { prev->set_next(data); } else { @@ -692,6 +699,7 @@ next = purge_me->next(); delete purge_me; } + Metaspace::purge(); } // CDS support diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/classfile/classLoaderData.hpp --- a/src/share/vm/classfile/classLoaderData.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/classfile/classLoaderData.hpp Mon May 06 10:20:18 2013 -0700 @@ -62,7 +62,7 @@ // CMS support. static ClassLoaderData* _saved_head; - static ClassLoaderData* add(ClassLoaderData** loader_data_addr, Handle class_loader, TRAPS); + static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS); public: static ClassLoaderData* find_or_create(Handle class_loader, TRAPS); static void purge(); @@ -100,6 +100,9 @@ Thread* THREAD); public: Dependencies() : _list_head(NULL) {} + Dependencies(TRAPS) : _list_head(NULL) { + init(CHECK); + } void add(Handle dependency, TRAPS); void init(TRAPS); void oops_do(OopClosure* f); @@ -150,7 +153,7 @@ void set_next(ClassLoaderData* next) { _next = next; } ClassLoaderData* next() const { return _next; } - ClassLoaderData(Handle h_class_loader, bool is_anonymous); + ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies); ~ClassLoaderData(); void set_metaspace(Metaspace* m) { _metaspace = m; } @@ -190,7 +193,9 @@ static void init_null_class_loader_data() { assert(_the_null_class_loader_data == NULL, "cannot initialize twice"); assert(ClassLoaderDataGraph::_head == NULL, "cannot initialize twice"); - _the_null_class_loader_data = new ClassLoaderData((oop)NULL, false); + + // We explicitly initialize the Dependencies object at a later phase in the initialization + _the_null_class_loader_data = new ClassLoaderData((oop)NULL, false, Dependencies()); ClassLoaderDataGraph::_head = _the_null_class_loader_data; assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be"); if (DumpSharedSpaces) { diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/classfile/classLoaderData.inline.hpp --- a/src/share/vm/classfile/classLoaderData.inline.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/classfile/classLoaderData.inline.hpp Mon May 06 10:20:18 2013 -0700 @@ -43,10 +43,9 @@ assert(loader() != NULL,"Must be a class loader"); // Gets the class loader data out of the java/lang/ClassLoader object, if non-null // it's already in the loader_data, so no need to add - ClassLoaderData** loader_data_addr = java_lang_ClassLoader::loader_data_addr(loader()); - ClassLoaderData* loader_data_id = *loader_data_addr; - if (loader_data_id) { - return loader_data_id; + ClassLoaderData* loader_data= java_lang_ClassLoader::loader_data(loader()); + if (loader_data) { + return loader_data; } - return ClassLoaderDataGraph::add(loader_data_addr, loader, THREAD); + return ClassLoaderDataGraph::add(loader, false, THREAD); } diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/classfile/systemDictionary.cpp --- a/src/share/vm/classfile/systemDictionary.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/classfile/systemDictionary.cpp Mon May 06 10:20:18 2013 -0700 @@ -830,7 +830,7 @@ Klass *kk; { MutexLocker mu(SystemDictionary_lock, THREAD); - kk = find_class(name, ik->class_loader_data()); + kk = find_class(d_index, d_hash, name, ik->class_loader_data()); } if (kk != NULL) { // No clean up is needed if the shared class has been entered diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/classfile/vmSymbols.hpp --- a/src/share/vm/classfile/vmSymbols.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/classfile/vmSymbols.hpp Mon May 06 10:20:18 2013 -0700 @@ -517,13 +517,18 @@ template(sun_management_ManagementFactory, "sun/management/ManagementFactory") \ template(sun_management_Sensor, "sun/management/Sensor") \ template(sun_management_Agent, "sun/management/Agent") \ + template(sun_management_DiagnosticCommandImpl, "sun/management/DiagnosticCommandImpl") \ template(sun_management_GarbageCollectorImpl, "sun/management/GarbageCollectorImpl") \ + template(sun_management_ManagementFactoryHelper, "sun/management/ManagementFactoryHelper") \ + template(getDiagnosticCommandMBean_name, "getDiagnosticCommandMBean") \ + template(getDiagnosticCommandMBean_signature, "()Lcom/sun/management/DiagnosticCommandMBean;") \ template(getGcInfoBuilder_name, "getGcInfoBuilder") \ template(getGcInfoBuilder_signature, "()Lsun/management/GcInfoBuilder;") \ template(com_sun_management_GcInfo, "com/sun/management/GcInfo") \ template(com_sun_management_GcInfo_constructor_signature, "(Lsun/management/GcInfoBuilder;JJJ[Ljava/lang/management/MemoryUsage;[Ljava/lang/management/MemoryUsage;[Ljava/lang/Object;)V") \ template(createGCNotification_name, "createGCNotification") \ template(createGCNotification_signature, "(JLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Lcom/sun/management/GcInfo;)V") \ + template(createDiagnosticFrameworkNotification_name, "createDiagnosticFrameworkNotification") \ template(createMemoryPoolMBean_name, "createMemoryPoolMBean") \ template(createMemoryManagerMBean_name, "createMemoryManagerMBean") \ template(createGarbageCollectorMBean_name, "createGarbageCollectorMBean") \ diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/code/codeCache.cpp --- a/src/share/vm/code/codeCache.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/code/codeCache.cpp Mon May 06 10:20:18 2013 -0700 @@ -463,8 +463,10 @@ } #endif //PRODUCT - -nmethod* CodeCache::find_and_remove_saved_code(Method* m) { +/** + * Remove and return nmethod from the saved code list in order to reanimate it. + */ +nmethod* CodeCache::reanimate_saved_code(Method* m) { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); nmethod* saved = _saved_nmethods; nmethod* prev = NULL; @@ -479,7 +481,7 @@ saved->set_speculatively_disconnected(false); saved->set_saved_nmethod_link(NULL); if (PrintMethodFlushing) { - saved->print_on(tty, " ### nmethod is reconnected\n"); + saved->print_on(tty, " ### nmethod is reconnected"); } if (LogCompilation && (xtty != NULL)) { ttyLocker ttyl; @@ -496,6 +498,9 @@ return NULL; } +/** + * Remove nmethod from the saved code list in order to discard it permanently + */ void CodeCache::remove_saved_code(nmethod* nm) { // For conc swpr this will be called with CodeCache_lock taken by caller assert_locked_or_safepoint(CodeCache_lock); @@ -529,7 +534,7 @@ nm->set_saved_nmethod_link(_saved_nmethods); _saved_nmethods = nm; if (PrintMethodFlushing) { - nm->print_on(tty, " ### nmethod is speculatively disconnected\n"); + nm->print_on(tty, " ### nmethod is speculatively disconnected"); } if (LogCompilation && (xtty != NULL)) { ttyLocker ttyl; diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/code/codeCache.hpp --- a/src/share/vm/code/codeCache.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/code/codeCache.hpp Mon May 06 10:20:18 2013 -0700 @@ -57,7 +57,7 @@ static int _number_of_nmethods_with_dependencies; static bool _needs_cache_clean; static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() - static nmethod* _saved_nmethods; // linked via nm->saved_nmethod_look() + static nmethod* _saved_nmethods; // Linked list of speculatively disconnected nmethods. static void verify_if_often() PRODUCT_RETURN; @@ -168,7 +168,7 @@ static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } static void clear_inline_caches(); // clear all inline caches - static nmethod* find_and_remove_saved_code(Method* m); + static nmethod* reanimate_saved_code(Method* m); static void remove_saved_code(nmethod* nm); static void speculatively_disconnect(nmethod* nm); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/code/compiledIC.cpp --- a/src/share/vm/code/compiledIC.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/code/compiledIC.cpp Mon May 06 10:20:18 2013 -0700 @@ -45,25 +45,6 @@ // Every time a compiled IC is changed or its type is being accessed, // either the CompiledIC_lock must be set or we must be at a safe point. - -// Release the CompiledICHolder* associated with this call site is there is one. -void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) { - // This call site might have become stale so inspect it carefully. - NativeCall* call = nativeCall_at(call_site->addr()); - if (is_icholder_entry(call->destination())) { - NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); - InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); - } -} - - -bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { - // This call site might have become stale so inspect it carefully. - NativeCall* call = nativeCall_at(call_site->addr()); - return is_icholder_entry(call->destination()); -} - - //----------------------------------------------------------------------------- // Low-level access to an inline cache. Private, since they might not be // MT-safe to use. @@ -488,33 +469,6 @@ return (cb != NULL && cb->is_adapter_blob()); } - -CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) - : _ic_call(call) -{ - address ic_call = call->instruction_address(); - - assert(ic_call != NULL, "ic_call address must be set"); - assert(nm != NULL, "must pass nmethod"); - assert(nm->contains(ic_call), "must be in nmethod"); - - // search for the ic_call at the given address - RelocIterator iter(nm, ic_call, ic_call+1); - bool ret = iter.next(); - assert(ret == true, "relocInfo must exist at this address"); - assert(iter.addr() == ic_call, "must find ic_call"); - if (iter.type() == relocInfo::virtual_call_type) { - virtual_call_Relocation* r = iter.virtual_call_reloc(); - _is_optimized = false; - _value = nativeMovConstReg_at(r->cached_value()); - } else { - assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); - _is_optimized = true; - _value = NULL; -} -} - - // ---------------------------------------------------------------------------- void CompiledStaticCall::set_to_clean() { @@ -549,33 +503,6 @@ return nm->stub_contains(destination()); } - -void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { - address stub=find_stub(); - guarantee(stub != NULL, "stub not found"); - - if (TraceICs) { - ResourceMark rm; - tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", - instruction_address(), - callee->name_and_sig_as_C_string()); - } - - NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object - NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); - - assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), "a) MT-unsafe modification of inline cache"); - assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache"); - - // Update stub - method_holder->set_data((intptr_t)callee()); - jump->set_jump_destination(entry); - - // Update jump to call - set_destination_mt_safe(stub); -} - - void CompiledStaticCall::set(const StaticCallInfo& info) { assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); @@ -618,19 +545,6 @@ } } - -void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { - assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); - // Reset stub - address stub = static_stub->addr(); - assert(stub!=NULL, "stub not found"); - NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object - NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); - method_holder->set_data(0); - jump->set_jump_destination((address)-1); -} - - address CompiledStaticCall::find_stub() { // Find reloc. information containing this call-site RelocIterator iter((nmethod*)NULL, instruction_address()); @@ -668,19 +582,16 @@ || is_optimized() || is_megamorphic(), "sanity check"); } - void CompiledIC::print() { print_compiled_ic(); tty->cr(); } - void CompiledIC::print_compiled_ic() { tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT, instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination(), is_optimized() ? NULL : cached_value()); } - void CompiledStaticCall::print() { tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address()); if (is_clean()) { @@ -693,21 +604,4 @@ tty->cr(); } -void CompiledStaticCall::verify() { - // Verify call - NativeCall::verify(); - if (os::is_MP()) { - verify_alignment(); - } - - // Verify stub - address stub = find_stub(); - assert(stub != NULL, "no stub found for static call"); - NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object - NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); - - // Verify state - assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); -} - -#endif +#endif // !PRODUCT diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/code/compiledIC.hpp --- a/src/share/vm/code/compiledIC.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/code/compiledIC.hpp Mon May 06 10:20:18 2013 -0700 @@ -304,6 +304,11 @@ friend CompiledStaticCall* compiledStaticCall_at(address native_call); friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site); + // Code + static void emit_to_interp_stub(CodeBuffer &cbuf); + static int to_interp_stub_size(); + static int reloc_to_interp_stub(); + // State bool is_clean() const; bool is_call_to_compiled() const; diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/compiler/compileBroker.cpp --- a/src/share/vm/compiler/compileBroker.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/compiler/compileBroker.cpp Mon May 06 10:20:18 2013 -0700 @@ -65,7 +65,7 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end, char*, intptr_t, char*, intptr_t, char*, intptr_t, char*, intptr_t, bool); -#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name) \ +#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \ { \ Symbol* klass_name = (method)->klass_name(); \ Symbol* name = (method)->name(); \ @@ -77,8 +77,7 @@ signature->bytes(), signature->utf8_length()); \ } -#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, \ - comp_name, success) \ +#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \ { \ Symbol* klass_name = (method)->klass_name(); \ Symbol* name = (method)->name(); \ @@ -92,7 +91,7 @@ #else /* USDT2 */ -#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name) \ +#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \ { \ Symbol* klass_name = (method)->klass_name(); \ Symbol* name = (method)->name(); \ @@ -104,8 +103,7 @@ (char *) signature->bytes(), signature->utf8_length()); \ } -#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, \ - comp_name, success) \ +#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \ { \ Symbol* klass_name = (method)->klass_name(); \ Symbol* name = (method)->name(); \ @@ -120,8 +118,8 @@ #else // ndef DTRACE_ENABLED -#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name) -#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, comp_name, success) +#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) +#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) #endif // ndef DTRACE_ENABLED @@ -1229,7 +1227,7 @@ if (method->is_not_compilable(comp_level)) return NULL; if (UseCodeCacheFlushing) { - nmethod* saved = CodeCache::find_and_remove_saved_code(method()); + nmethod* saved = CodeCache::reanimate_saved_code(method()); if (saved != NULL) { method->set_code(method, saved); return saved; @@ -1288,9 +1286,9 @@ method->jmethod_id(); } - // If the compiler is shut off due to code cache flushing or otherwise, + // If the compiler is shut off due to code cache getting full // fail out now so blocking compiles dont hang the java thread - if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) { + if (!should_compile_new_jobs()) { CompilationPolicy::policy()->delay_compilation(method()); return NULL; } @@ -1766,8 +1764,7 @@ // Save information about this method in case of failure. set_last_compile(thread, method, is_osr, task_level); - DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler(task_level), method, - compiler_name(task_level)); + DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level)); } // Allocate a new set of JNI handles. @@ -1842,13 +1839,14 @@ } } } + // simulate crash during compilation + assert(task->compile_id() != CICrashAt, "just as planned"); } pop_jni_handle_block(); methodHandle method(thread, task->method()); - DTRACE_METHOD_COMPILE_END_PROBE(compiler(task_level), method, - compiler_name(task_level), task->is_success()); + DTRACE_METHOD_COMPILE_END_PROBE(method, compiler_name(task_level), task->is_success()); collect_statistics(thread, time, task); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Mon May 06 10:20:18 2013 -0700 @@ -2444,8 +2444,7 @@ // initial marking in checkpointRootsInitialWork has been completed if (VerifyDuringGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - gclog_or_tty->print("Verify before initial mark: "); - Universe::verify(); + Universe::verify("Verify before initial mark: "); } { bool res = markFromRoots(false); @@ -2456,8 +2455,7 @@ case FinalMarking: if (VerifyDuringGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - gclog_or_tty->print("Verify before re-mark: "); - Universe::verify(); + Universe::verify("Verify before re-mark: "); } checkpointRootsFinal(false, clear_all_soft_refs, init_mark_was_synchronous); @@ -2468,8 +2466,7 @@ // final marking in checkpointRootsFinal has been completed if (VerifyDuringGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - gclog_or_tty->print("Verify before sweep: "); - Universe::verify(); + Universe::verify("Verify before sweep: "); } sweep(false); assert(_collectorState == Resizing, "Incorrect state"); @@ -2484,8 +2481,7 @@ // The heap has been resized. if (VerifyDuringGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - gclog_or_tty->print("Verify before reset: "); - Universe::verify(); + Universe::verify("Verify before reset: "); } reset(false); assert(_collectorState == Idling, "Collector state should " @@ -2853,8 +2849,8 @@ bool failed() { return _failed; } }; -bool CMSCollector::verify_after_remark() { - gclog_or_tty->print(" [Verifying CMS Marking... "); +bool CMSCollector::verify_after_remark(bool silent) { + if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... "); MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); static bool init = false; @@ -2915,7 +2911,7 @@ warning("Unrecognized value %d for CMSRemarkVerifyVariant", CMSRemarkVerifyVariant); } - gclog_or_tty->print(" done] "); + if (!silent) gclog_or_tty->print(" done] "); return true; } @@ -3426,8 +3422,9 @@ void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) { assert_locked_or_safepoint(Heap_lock); assert_lock_strong(freelistLock()); - // XXX Fix when compaction is implemented. - warning("Shrinking of CMS not yet implemented"); + if (PrintGCDetails && Verbose) { + warning("Shrinking of CMS not yet implemented"); + } return; } @@ -6010,26 +6007,23 @@ &cmsDrainMarkingStackClosure, NULL); } - verify_work_stacks_empty(); - } + } + + // This is the point where the entire marking should have completed. + verify_work_stacks_empty(); if (should_unload_classes()) { { TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty); - // Follow SystemDictionary roots and unload classes + // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); - // Follow CodeCache roots and unload any methods marked for unloading + // Unload nmethods. CodeCache::do_unloading(&_is_alive_closure, purged_class); - cmsDrainMarkingStackClosure.do_void(); - verify_work_stacks_empty(); - - // Update subklass/sibling/implementor links in KlassKlass descendants + // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&_is_alive_closure); - // Nothing should have been pushed onto the working stacks. - verify_work_stacks_empty(); } { @@ -6043,11 +6037,10 @@ // Need to check if we really scanned the StringTable. if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) { TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty); - // Now clean up stale oops in StringTable + // Delete entries for dead interned strings. StringTable::unlink(&_is_alive_closure); } - verify_work_stacks_empty(); // Restore any preserved marks as a result of mark stack or // work queue overflow restore_preserved_marks_if_any(); // done single-threaded for now diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Mon May 06 10:20:18 2013 -0700 @@ -990,7 +990,7 @@ // debugging void verify(); - bool verify_after_remark(); + bool verify_after_remark(bool silent = VerifySilently); void verify_ok_to_terminate() const PRODUCT_RETURN; void verify_work_stacks_empty() const PRODUCT_RETURN; void verify_overflow_empty() const PRODUCT_RETURN; diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Mon May 06 10:20:18 2013 -0700 @@ -1273,10 +1273,9 @@ if (VerifyDuringGC) { HandleMark hm; // handle scope - gclog_or_tty->print(" VerifyDuringGC:(before)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* silent */ false, - /* option */ VerifyOption_G1UsePrevMarking); + Universe::verify(VerifyOption_G1UsePrevMarking, + " VerifyDuringGC:(before)"); } G1CollectorPolicy* g1p = g1h->g1_policy(); @@ -1300,10 +1299,9 @@ // Verify the heap w.r.t. the previous marking bitmap. if (VerifyDuringGC) { HandleMark hm; // handle scope - gclog_or_tty->print(" VerifyDuringGC:(overflow)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* silent */ false, - /* option */ VerifyOption_G1UsePrevMarking); + Universe::verify(VerifyOption_G1UsePrevMarking, + " VerifyDuringGC:(overflow)"); } // Clear the marking state because we will be restarting @@ -1323,10 +1321,9 @@ if (VerifyDuringGC) { HandleMark hm; // handle scope - gclog_or_tty->print(" VerifyDuringGC:(after)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* silent */ false, - /* option */ VerifyOption_G1UseNextMarking); + Universe::verify(VerifyOption_G1UseNextMarking, + " VerifyDuringGC:(after)"); } assert(!restart_for_overflow(), "sanity"); // Completely reset the marking state since marking completed @@ -1972,10 +1969,9 @@ if (VerifyDuringGC) { HandleMark hm; // handle scope - gclog_or_tty->print(" VerifyDuringGC:(before)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* silent */ false, - /* option */ VerifyOption_G1UsePrevMarking); + Universe::verify(VerifyOption_G1UsePrevMarking, + " VerifyDuringGC:(before)"); } G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); @@ -2127,10 +2123,9 @@ if (VerifyDuringGC) { HandleMark hm; // handle scope - gclog_or_tty->print(" VerifyDuringGC:(after)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* silent */ false, - /* option */ VerifyOption_G1UsePrevMarking); + Universe::verify(VerifyOption_G1UsePrevMarking, + " VerifyDuringGC:(after)"); } g1h->verify_region_sets_optional(); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon May 06 10:20:18 2013 -0700 @@ -1271,9 +1271,8 @@ if (guard && total_collections() >= VerifyGCStartAt) { double verify_start = os::elapsedTime(); HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(msg); prepare_for_verify(); - Universe::verify(false /* silent */, VerifyOption_G1UsePrevMarking); + Universe::verify(VerifyOption_G1UsePrevMarking, msg); verify_time_ms = (os::elapsedTime() - verify_start) * 1000; } @@ -1304,7 +1303,7 @@ print_heap_before_gc(); - size_t metadata_prev_used = MetaspaceAux::used_in_bytes(); + size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); HRSPhaseSetter x(HRSPhaseFullGC); verify_region_sets_optional(); @@ -1425,6 +1424,7 @@ // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); // Note: since we've just done a full GC, concurrent // marking is no longer active. Therefore we need not @@ -1955,13 +1955,6 @@ int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); assert(n_rem_sets > 0, "Invariant."); - HeapRegionRemSetIterator** iter_arr = - NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC); - for (int i = 0; i < n_queues; i++) { - iter_arr[i] = new HeapRegionRemSetIterator(); - } - _rem_set_iterator = iter_arr; - _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC); @@ -5079,10 +5072,9 @@ } void -G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, - OopClosure* non_root_closure) { +G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) { CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); - SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); + SharedHeap::process_weak_roots(root_closure, &roots_in_blobs); } // Weak Reference Processing support diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon May 06 10:20:18 2013 -0700 @@ -786,9 +786,6 @@ // concurrently after the collection. DirtyCardQueueSet _dirty_card_queue_set; - // The Heap Region Rem Set Iterator. - HeapRegionRemSetIterator** _rem_set_iterator; - // The closure used to refine a single card. RefineCardTableEntryClosure* _refine_cte_cl; @@ -827,8 +824,7 @@ // Apply "blk" to all the weak roots of the system. These include // JNI weak roots, the code cache, system dictionary, symbol table, // string table, and referents of reachable weak refs. - void g1_process_weak_roots(OopClosure* root_closure, - OopClosure* non_root_closure); + void g1_process_weak_roots(OopClosure* root_closure); // Frees a non-humongous region by initializing its contents and // adding it to the free list that's passed as a parameter (this is @@ -1114,15 +1110,6 @@ G1RemSet* g1_rem_set() const { return _g1_rem_set; } ModRefBarrierSet* mr_bs() const { return _mr_bs; } - // The rem set iterator. - HeapRegionRemSetIterator* rem_set_iterator(int i) { - return _rem_set_iterator[i]; - } - - HeapRegionRemSetIterator* rem_set_iterator() { - return _rem_set_iterator[0]; - } - unsigned get_gc_time_stamp() { return _gc_time_stamp; } diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/g1/g1MarkSweep.cpp --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Mon May 06 10:20:18 2013 -0700 @@ -144,33 +144,28 @@ &GenMarkSweep::follow_stack_closure, NULL); - // Follow system dictionary roots and unload classes + + // This is the point where the entire marking should have completed. + assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed"); + + // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive); - assert(GenMarkSweep::_marking_stack.is_empty(), - "stack should be empty by now"); - // Follow code cache roots (has to be done after system dictionary, - // assumes all live klasses are marked) + // Unload nmethods. CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class); - GenMarkSweep::follow_stack(); - // Update subklass/sibling/implementor links of live klasses + // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&GenMarkSweep::is_alive); - assert(GenMarkSweep::_marking_stack.is_empty(), - "stack should be empty by now"); - // Visit interned string tables and delete unmarked oops + // Delete entries for dead interned strings. StringTable::unlink(&GenMarkSweep::is_alive); + // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); - assert(GenMarkSweep::_marking_stack.is_empty(), - "stack should be empty by now"); - if (VerifyDuringGC) { HandleMark hm; // handle scope COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact); - gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying "); Universe::heap()->prepare_for_verify(); // Note: we can verify only the heap here. When an object is // marked, the previous value of the mark word (including @@ -182,11 +177,13 @@ // fail. At the end of the GC, the orginal mark word values // (including hash values) are restored to the appropriate // objects. - Universe::heap()->verify(/* silent */ false, - /* option */ VerifyOption_G1UseMarkWord); - - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - gclog_or_tty->print_cr("]"); + if (!VerifySilently) { + gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying "); + } + Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord); + if (!VerifySilently) { + gclog_or_tty->print_cr("]"); + } } } @@ -308,17 +305,16 @@ sh->process_strong_roots(true, // activate StrongRootsScope false, // not scavenging. SharedHeap::SO_AllClasses, - &GenMarkSweep::adjust_root_pointer_closure, + &GenMarkSweep::adjust_pointer_closure, NULL, // do not touch code cache here &GenMarkSweep::adjust_klass_closure); assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity"); - g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure); + g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) - g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure, - &GenMarkSweep::adjust_pointer_closure); + g1h->g1_process_weak_roots(&GenMarkSweep::adjust_pointer_closure); GenMarkSweep::adjust_marks(); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/g1/g1RemSet.cpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Mon May 06 10:20:18 2013 -0700 @@ -169,14 +169,13 @@ // _try_claimed || r->claim_iter() // is true: either we're supposed to work on claimed-but-not-complete // regions, or we successfully claimed the region. - HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i); - hrrs->init_iterator(iter); + HeapRegionRemSetIterator iter(hrrs); size_t card_index; // We claim cards in block so as to recude the contention. The block size is determined by // the G1RSetScanBlockSize parameter. size_t jump_to_card = hrrs->iter_claimed_next(_block_size); - for (size_t current_card = 0; iter->has_next(card_index); current_card++) { + for (size_t current_card = 0; iter.has_next(card_index); current_card++) { if (current_card >= jump_to_card + _block_size) { jump_to_card = hrrs->iter_claimed_next(_block_size); } diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/g1/g1RemSet.hpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp Mon May 06 10:20:18 2013 -0700 @@ -53,14 +53,14 @@ NumSeqTasks = 1 }; - CardTableModRefBS* _ct_bs; - SubTasksDone* _seq_task; - G1CollectorPolicy* _g1p; + CardTableModRefBS* _ct_bs; + SubTasksDone* _seq_task; + G1CollectorPolicy* _g1p; - ConcurrentG1Refine* _cg1r; + ConcurrentG1Refine* _cg1r; - size_t* _cards_scanned; - size_t _total_cards_scanned; + size_t* _cards_scanned; + size_t _total_cards_scanned; // Used for caching the closure that is responsible for scanning // references into the collection set. diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Mon May 06 10:20:18 2013 -0700 @@ -877,14 +877,9 @@ return _iter_state == Complete; } -void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const { - iter->initialize(this); -} - #ifndef PRODUCT void HeapRegionRemSet::print() const { - HeapRegionRemSetIterator iter; - init_iterator(&iter); + HeapRegionRemSetIterator iter(this); size_t card_index; while (iter.has_next(card_index)) { HeapWord* card_start = @@ -928,35 +923,23 @@ //-------------------- Iteration -------------------- -HeapRegionRemSetIterator:: -HeapRegionRemSetIterator() : - _hrrs(NULL), +HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) : + _hrrs(hrrs), _g1h(G1CollectedHeap::heap()), - _bosa(NULL), - _sparse_iter() { } - -void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) { - _hrrs = hrrs; - _coarse_map = &_hrrs->_other_regions._coarse_map; - _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions; - _bosa = _hrrs->bosa(); - - _is = Sparse; + _coarse_map(&hrrs->_other_regions._coarse_map), + _fine_grain_regions(hrrs->_other_regions._fine_grain_regions), + _bosa(hrrs->bosa()), + _is(Sparse), // Set these values so that we increment to the first region. - _coarse_cur_region_index = -1; - _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1); - - _cur_region_cur_card = 0; - - _fine_array_index = -1; - _fine_cur_prt = NULL; - - _n_yielded_coarse = 0; - _n_yielded_fine = 0; - _n_yielded_sparse = 0; - - _sparse_iter.init(&hrrs->_other_regions._sparse_table); -} + _coarse_cur_region_index(-1), + _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1), + _cur_region_cur_card(0), + _fine_array_index(-1), + _fine_cur_prt(NULL), + _n_yielded_coarse(0), + _n_yielded_fine(0), + _n_yielded_sparse(0), + _sparse_iter(&hrrs->_other_regions._sparse_table) {} bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) { if (_hrrs->_other_regions._n_coarse_entries == 0) return false; @@ -1209,8 +1192,7 @@ hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom()); // Now, does iteration yield these three? - HeapRegionRemSetIterator iter; - hrrs->init_iterator(&iter); + HeapRegionRemSetIterator iter(hrrs); size_t sum = 0; size_t card_index; while (iter.has_next(card_index)) { diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Mon May 06 10:20:18 2013 -0700 @@ -281,9 +281,6 @@ return (_iter_state == Unclaimed) && (_iter_claimed == 0); } - // Initialize the given iterator to iterate over this rem set. - void init_iterator(HeapRegionRemSetIterator* iter) const; - // The actual # of bytes this hr_remset takes up. size_t mem_size() { return _other_regions.mem_size() @@ -345,9 +342,9 @@ #endif }; -class HeapRegionRemSetIterator : public CHeapObj { +class HeapRegionRemSetIterator : public StackObj { - // The region over which we're iterating. + // The region RSet over which we're iterating. const HeapRegionRemSet* _hrrs; // Local caching of HRRS fields. @@ -362,8 +359,10 @@ size_t _n_yielded_coarse; size_t _n_yielded_sparse; - // If true we're iterating over the coarse table; if false the fine - // table. + // Indicates what granularity of table that we're currently iterating over. + // We start iterating over the sparse table, progress to the fine grain + // table, and then finish with the coarse table. + // See HeapRegionRemSetIterator::has_next(). enum IterState { Sparse, Fine, @@ -403,9 +402,7 @@ public: // We require an iterator to be initialized before use, so the // constructor does little. - HeapRegionRemSetIterator(); - - void initialize(const HeapRegionRemSet* hrrs); + HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs); // If there remains one or more cards to be yielded, returns true and // sets "card_index" to one of those cards (which is then considered diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/g1/sparsePRT.cpp --- a/src/share/vm/gc_implementation/g1/sparsePRT.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/sparsePRT.cpp Mon May 06 10:20:18 2013 -0700 @@ -35,10 +35,6 @@ #define UNROLL_CARD_LOOPS 1 -void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) { - sprt_iter->init(this); -} - void SparsePRTEntry::init(RegionIdx_t region_ind) { _region_ind = region_ind; _next_index = NullEntry; diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/g1/sparsePRT.hpp --- a/src/share/vm/gc_implementation/g1/sparsePRT.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/sparsePRT.hpp Mon May 06 10:20:18 2013 -0700 @@ -192,18 +192,11 @@ size_t compute_card_ind(CardIdx_t ci); public: - RSHashTableIter() : - _tbl_ind(RSHashTable::NullEntry), + RSHashTableIter(RSHashTable* rsht) : + _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0. _bl_ind(RSHashTable::NullEntry), _card_ind((SparsePRTEntry::cards_num() - 1)), - _rsht(NULL) {} - - void init(RSHashTable* rsht) { - _rsht = rsht; - _tbl_ind = -1; // So that first increment gets to 0. - _bl_ind = RSHashTable::NullEntry; - _card_ind = (SparsePRTEntry::cards_num() - 1); - } + _rsht(rsht) {} bool has_next(size_t& card_index); }; @@ -284,8 +277,6 @@ static void cleanup_all(); RSHashTable* cur() const { return _cur; } - void init_iterator(SparsePRTIter* sprt_iter); - static void add_to_expanded_list(SparsePRT* sprt); static SparsePRT* get_from_expanded_list(); @@ -321,9 +312,9 @@ class SparsePRTIter: public RSHashTableIter { public: - void init(const SparsePRT* sprt) { - RSHashTableIter::init(sprt->cur()); - } + SparsePRTIter(const SparsePRT* sprt) : + RSHashTableIter(sprt->cur()) {} + bool has_next(size_t& card_index) { return RSHashTableIter::has_next(card_index); } diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Mon May 06 10:20:18 2013 -0700 @@ -138,8 +138,7 @@ if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyBeforeGC:"); - Universe::verify(); + Universe::verify(" VerifyBeforeGC:"); } // Verify object start arrays @@ -177,7 +176,7 @@ size_t prev_used = heap->used(); // Capture metadata size before collection for sizing. - size_t metadata_prev_used = MetaspaceAux::used_in_bytes(); + size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); // For PrintGCDetails size_t old_gen_prev_used = old_gen->used_in_bytes(); @@ -238,6 +237,7 @@ // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); BiasedLocking::restore_marks(); Threads::gc_epilogue(); @@ -340,8 +340,7 @@ if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyAfterGC:"); - Universe::verify(); + Universe::verify(" VerifyAfterGC:"); } // Re-verify object start arrays @@ -518,23 +517,23 @@ is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL); } - // Follow system dictionary roots and unload classes + // This is the point where the entire marking should have completed. + assert(_marking_stack.is_empty(), "Marking should have completed"); + + // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); - // Follow code cache roots + // Unload nmethods. CodeCache::do_unloading(is_alive_closure(), purged_class); - follow_stack(); // Flush marking stack - // Update subklass/sibling/implementor links of live klasses - Klass::clean_weak_klass_links(&is_alive); - assert(_marking_stack.is_empty(), "just drained"); + // Prune dead klasses from subklass/sibling/implementor lists. + Klass::clean_weak_klass_links(is_alive_closure()); - // Visit interned string tables and delete unmarked oops + // Delete entries for dead interned strings. StringTable::unlink(is_alive_closure()); + // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); - - assert(_marking_stack.is_empty(), "stack should be empty by now"); } @@ -583,28 +582,27 @@ ClassLoaderDataGraph::clear_claimed_marks(); // General strong roots. - Universe::oops_do(adjust_root_pointer_closure()); - JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles - CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure()); - Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL); - ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); - FlatProfiler::oops_do(adjust_root_pointer_closure()); - Management::oops_do(adjust_root_pointer_closure()); - JvmtiExport::oops_do(adjust_root_pointer_closure()); + Universe::oops_do(adjust_pointer_closure()); + JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles + CLDToOopClosure adjust_from_cld(adjust_pointer_closure()); + Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL); + ObjectSynchronizer::oops_do(adjust_pointer_closure()); + FlatProfiler::oops_do(adjust_pointer_closure()); + Management::oops_do(adjust_pointer_closure()); + JvmtiExport::oops_do(adjust_pointer_closure()); // SO_AllClasses - SystemDictionary::oops_do(adjust_root_pointer_closure()); - ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true); - //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure()); + SystemDictionary::oops_do(adjust_pointer_closure()); + ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles - JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); + JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); CodeCache::oops_do(adjust_pointer_closure()); - StringTable::oops_do(adjust_root_pointer_closure()); - ref_processor()->weak_oops_do(adjust_root_pointer_closure()); - PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure()); + StringTable::oops_do(adjust_pointer_closure()); + ref_processor()->weak_oops_do(adjust_pointer_closure()); + PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); adjust_marks(); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp Mon May 06 10:20:18 2013 -0700 @@ -44,7 +44,6 @@ static KlassClosure* follow_klass_closure() { return &MarkSweep::follow_klass_closure; } static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; } static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; } - static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_root_pointer_closure; } static KlassClosure* adjust_klass_closure() { return &MarkSweep::adjust_klass_closure; } static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; } diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Mon May 06 10:20:18 2013 -0700 @@ -787,12 +787,11 @@ void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } -PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true); -PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false); +PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure; PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure; -void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } -void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } +void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); } +void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); } void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); } @@ -805,7 +804,7 @@ klass->oops_do(_mark_and_push_closure); } void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) { - klass->oops_do(&PSParallelCompact::_adjust_root_pointer_closure); + klass->oops_do(&PSParallelCompact::_adjust_pointer_closure); } void PSParallelCompact::post_initialize() { @@ -892,7 +891,7 @@ _heap_used = heap->used(); _young_gen_used = heap->young_gen()->used_in_bytes(); _old_gen_used = heap->old_gen()->used_in_bytes(); - _metadata_used = MetaspaceAux::used_in_bytes(); + _metadata_used = MetaspaceAux::allocated_used_bytes(); }; size_t heap_used() const { return _heap_used; } @@ -967,8 +966,7 @@ if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyBeforeGC:"); - Universe::verify(); + Universe::verify(" VerifyBeforeGC:"); } // Verify object start arrays @@ -1027,6 +1025,7 @@ // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); Threads::gc_epilogue(); CodeCache::gc_epilogue(); @@ -2168,8 +2167,7 @@ if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyAfterGC:"); - Universe::verify(); + Universe::verify(" VerifyAfterGC:"); } // Re-verify object start arrays @@ -2356,22 +2354,24 @@ } TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty); + + // This is the point where the entire marking should have completed. + assert(cm->marking_stacks_empty(), "Marking should have completed"); + // Follow system dictionary roots and unload classes. bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); - // Follow code cache roots. + // Unload nmethods. CodeCache::do_unloading(is_alive_closure(), purged_class); - cm->follow_marking_stacks(); // Flush marking stack. - - // Update subklass/sibling/implementor links of live klasses + + // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(is_alive_closure()); - // Visit interned string tables and delete unmarked oops + // Delete entries for dead interned strings. StringTable::unlink(is_alive_closure()); + // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); - - assert(cm->marking_stacks_empty(), "marking stacks should be empty"); } void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) { @@ -2398,7 +2398,7 @@ void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm, ClassLoaderData* cld) { - cld->oops_do(PSParallelCompact::adjust_root_pointer_closure(), + cld->oops_do(PSParallelCompact::adjust_pointer_closure(), PSParallelCompact::adjust_klass_closure(), true); } @@ -2419,32 +2419,31 @@ ClassLoaderDataGraph::clear_claimed_marks(); // General strong roots. - Universe::oops_do(adjust_root_pointer_closure()); - JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles - CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure()); - Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL); - ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); - FlatProfiler::oops_do(adjust_root_pointer_closure()); - Management::oops_do(adjust_root_pointer_closure()); - JvmtiExport::oops_do(adjust_root_pointer_closure()); + Universe::oops_do(adjust_pointer_closure()); + JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles + CLDToOopClosure adjust_from_cld(adjust_pointer_closure()); + Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL); + ObjectSynchronizer::oops_do(adjust_pointer_closure()); + FlatProfiler::oops_do(adjust_pointer_closure()); + Management::oops_do(adjust_pointer_closure()); + JvmtiExport::oops_do(adjust_pointer_closure()); // SO_AllClasses - SystemDictionary::oops_do(adjust_root_pointer_closure()); - ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true); + SystemDictionary::oops_do(adjust_pointer_closure()); + ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles - JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); + JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); CodeCache::oops_do(adjust_pointer_closure()); - StringTable::oops_do(adjust_root_pointer_closure()); - ref_processor()->weak_oops_do(adjust_root_pointer_closure()); + StringTable::oops_do(adjust_pointer_closure()); + ref_processor()->weak_oops_do(adjust_pointer_closure()); // Roots were visited so references into the young gen in roots // may have been scanned. Process them also. // Should the reference processor have a span that excludes // young gen objects? - PSScavenge::reference_processor()->weak_oops_do( - adjust_root_pointer_closure()); + PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); } void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Mon May 06 10:20:18 2013 -0700 @@ -799,16 +799,6 @@ virtual void do_oop(narrowOop* p); }; - // Current unused - class FollowRootClosure: public OopsInGenClosure { - private: - ParCompactionManager* _compaction_manager; - public: - FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } - virtual void do_oop(oop* p); - virtual void do_oop(narrowOop* p); - }; - class FollowStackClosure: public VoidClosure { private: ParCompactionManager* _compaction_manager; @@ -818,10 +808,7 @@ }; class AdjustPointerClosure: public OopClosure { - private: - bool _is_root; public: - AdjustPointerClosure(bool is_root) : _is_root(is_root) { } virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); // do not walk from thread stacks to the code cache on this phase @@ -838,7 +825,6 @@ friend class AdjustPointerClosure; friend class AdjustKlassClosure; friend class FollowKlassClosure; - friend class FollowRootClosure; friend class InstanceClassLoaderKlass; friend class RefProcTaskProxy; @@ -853,7 +839,6 @@ static IsAliveClosure _is_alive_closure; static SpaceInfo _space_info[last_space_id]; static bool _print_phases; - static AdjustPointerClosure _adjust_root_pointer_closure; static AdjustPointerClosure _adjust_pointer_closure; static AdjustKlassClosure _adjust_klass_closure; @@ -889,9 +874,6 @@ static void marking_phase(ParCompactionManager* cm, bool maximum_heap_compaction); - template static inline void adjust_pointer(T* p, bool is_root); - static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); } - template static inline void follow_root(ParCompactionManager* cm, T* p); @@ -1046,7 +1028,6 @@ // Closure accessors static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; } - static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; } static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; } static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; } @@ -1067,6 +1048,7 @@ // Check mark and maybe push on marking stack template static inline void mark_and_push(ParCompactionManager* cm, T* p); + template static inline void adjust_pointer(T* p); static void follow_klass(ParCompactionManager* cm, Klass* klass); static void adjust_klass(ParCompactionManager* cm, Klass* klass); @@ -1151,9 +1133,6 @@ static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; } static ParallelCompactData& summary_data() { return _summary_data; } - static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); } - static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); } - // Reference Processing static ReferenceProcessor* const ref_processor() { return _ref_processor; } @@ -1230,7 +1209,7 @@ } template -inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) { +inline void PSParallelCompact::adjust_pointer(T* p) { T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Mon May 06 10:20:18 2013 -0700 @@ -314,8 +314,7 @@ if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyBeforeGC:"); - Universe::verify(); + Universe::verify(" VerifyBeforeGC:"); } { @@ -638,8 +637,7 @@ if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyAfterGC:"); - Universe::verify(); + Universe::verify(" VerifyAfterGC:"); } heap->print_heap_after_gc(); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/shared/markSweep.cpp --- a/src/share/vm/gc_implementation/shared/markSweep.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/shared/markSweep.cpp Mon May 06 10:20:18 2013 -0700 @@ -81,7 +81,7 @@ } void MarkSweep::adjust_class_loader(ClassLoaderData* cld) { - cld->oops_do(&MarkSweep::adjust_root_pointer_closure, &MarkSweep::adjust_klass_closure, true); + cld->oops_do(&MarkSweep::adjust_pointer_closure, &MarkSweep::adjust_klass_closure, true); } @@ -121,11 +121,10 @@ } } -MarkSweep::AdjustPointerClosure MarkSweep::adjust_root_pointer_closure(true); -MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure(false); +MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure; -void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } -void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } +void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); } +void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); } void MarkSweep::adjust_marks() { assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(), diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/shared/markSweep.hpp --- a/src/share/vm/gc_implementation/shared/markSweep.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/shared/markSweep.hpp Mon May 06 10:20:18 2013 -0700 @@ -80,10 +80,7 @@ }; class AdjustPointerClosure: public OopsInGenClosure { - private: - bool _is_root; public: - AdjustPointerClosure(bool is_root) : _is_root(is_root) {} virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); }; @@ -146,7 +143,6 @@ static MarkAndPushClosure mark_and_push_closure; static FollowKlassClosure follow_klass_closure; static FollowStackClosure follow_stack_closure; - static AdjustPointerClosure adjust_root_pointer_closure; static AdjustPointerClosure adjust_pointer_closure; static AdjustKlassClosure adjust_klass_closure; @@ -179,12 +175,7 @@ static void adjust_marks(); // Adjust the pointers in the preserved marks table static void restore_marks(); // Restore the marks that we saved in preserve_mark - template static inline void adjust_pointer(T* p, bool isroot); - - static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); } - static void adjust_pointer(oop* p) { adjust_pointer(p, false); } - static void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); } - + template static inline void adjust_pointer(T* p); }; class PreservedMark VALUE_OBJ_CLASS_SPEC { diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/shared/markSweep.inline.hpp --- a/src/share/vm/gc_implementation/shared/markSweep.inline.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/shared/markSweep.inline.hpp Mon May 06 10:20:18 2013 -0700 @@ -76,7 +76,7 @@ _objarray_stack.push(task); } -template inline void MarkSweep::adjust_pointer(T* p, bool isroot) { +template inline void MarkSweep::adjust_pointer(T* p) { T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/gc_implementation/shared/vmGCOperations.cpp --- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Mon May 06 10:20:18 2013 -0700 @@ -225,7 +225,10 @@ gclog_or_tty->print_cr("\nCMS full GC for Metaspace"); } heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold); - _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); + // After a GC try to allocate without expanding. Could fail + // and expansion will be tried below. + _result = + _loader_data->metaspace_non_null()->allocate(_size, _mdtype); } if (_result == NULL && !UseConcMarkSweepGC /* CMS already tried */) { // If still failing, allow the Metaspace to expand. diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/memory/filemap.cpp --- a/src/share/vm/memory/filemap.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/memory/filemap.cpp Mon May 06 10:20:18 2013 -0700 @@ -238,8 +238,8 @@ void FileMapInfo::write_space(int i, Metaspace* space, bool read_only) { align_file_position(); - size_t used = space->used_words(Metaspace::NonClassType) * BytesPerWord; - size_t capacity = space->capacity_words(Metaspace::NonClassType) * BytesPerWord; + size_t used = space->used_bytes_slow(Metaspace::NonClassType); + size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType); struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i]; write_region(i, (char*)space->bottom(), used, capacity, read_only, false); } diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/memory/genCollectedHeap.cpp --- a/src/share/vm/memory/genCollectedHeap.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/memory/genCollectedHeap.cpp Mon May 06 10:20:18 2013 -0700 @@ -377,7 +377,7 @@ ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); - const size_t metadata_prev_used = MetaspaceAux::used_in_bytes(); + const size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); print_heap_before_gc(); @@ -447,8 +447,7 @@ prepare_for_verify(); prepared_for_verification = true; } - gclog_or_tty->print(" VerifyBeforeGC:"); - Universe::verify(); + Universe::verify(" VerifyBeforeGC:"); } COMPILER2_PRESENT(DerivedPointerTable::clear()); @@ -519,8 +518,7 @@ if (VerifyAfterGC && i >= VerifyGCLevel && total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyAfterGC:"); - Universe::verify(); + Universe::verify(" VerifyAfterGC:"); } if (PrintGCDetails) { @@ -556,6 +554,7 @@ if (complete) { // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); // Resize the metaspace capacity after full collections MetaspaceGC::compute_new_size(); update_full_collections_completed(); @@ -633,9 +632,8 @@ } void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure) { - SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure); + CodeBlobClosure* code_roots) { + SharedHeap::process_weak_roots(root_closure, code_roots); // "Local" "weak" refs for (int i = 0; i < _n_gens; i++) { _gens[i]->ref_processor()->weak_oops_do(root_closure); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/memory/genCollectedHeap.hpp --- a/src/share/vm/memory/genCollectedHeap.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/memory/genCollectedHeap.hpp Mon May 06 10:20:18 2013 -0700 @@ -432,8 +432,7 @@ // JNI weak roots, the code cache, system dictionary, symbol table, // string table, and referents of reachable weak refs. void gen_process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure); + CodeBlobClosure* code_roots); // Set the saved marks of generations, if that makes sense. // In particular, if any generation might iterate over the oops diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/memory/genMarkSweep.cpp --- a/src/share/vm/memory/genMarkSweep.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/memory/genMarkSweep.cpp Mon May 06 10:20:18 2013 -0700 @@ -223,23 +223,23 @@ &is_alive, &keep_alive, &follow_stack_closure, NULL); } - // Follow system dictionary roots and unload classes + // This is the point where the entire marking should have completed. + assert(_marking_stack.is_empty(), "Marking should have completed"); + + // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&is_alive); - // Follow code cache roots + // Unload nmethods. CodeCache::do_unloading(&is_alive, purged_class); - follow_stack(); // Flush marking stack - // Update subklass/sibling/implementor links of live klasses + // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&is_alive); - assert(_marking_stack.is_empty(), "just drained"); - // Visit interned string tables and delete unmarked oops + // Delete entries for dead interned strings. StringTable::unlink(&is_alive); + // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); - - assert(_marking_stack.is_empty(), "stack should be empty by now"); } @@ -282,11 +282,10 @@ // Need new claim bits for the pointer adjustment tracing. ClassLoaderDataGraph::clear_claimed_marks(); - // Because the two closures below are created statically, cannot + // Because the closure below is created statically, we cannot // use OopsInGenClosure constructor which takes a generation, // as the Universe has not been created when the static constructors // are run. - adjust_root_pointer_closure.set_orig_generation(gch->get_gen(level)); adjust_pointer_closure.set_orig_generation(gch->get_gen(level)); gch->gen_process_strong_roots(level, @@ -294,18 +293,17 @@ true, // activate StrongRootsScope false, // not scavenging SharedHeap::SO_AllClasses, - &adjust_root_pointer_closure, + &adjust_pointer_closure, false, // do not walk code - &adjust_root_pointer_closure, + &adjust_pointer_closure, &adjust_klass_closure); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure, /*do_marking=*/ false); - gch->gen_process_weak_roots(&adjust_root_pointer_closure, - &adjust_code_pointer_closure, - &adjust_pointer_closure); + gch->gen_process_weak_roots(&adjust_pointer_closure, + &adjust_code_pointer_closure); adjust_marks(); GenAdjustPointersClosure blk; diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/memory/metachunk.cpp --- a/src/share/vm/memory/metachunk.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/memory/metachunk.cpp Mon May 06 10:20:18 2013 -0700 @@ -28,6 +28,7 @@ #include "utilities/copy.hpp" #include "utilities/debug.hpp" +class VirtualSpaceNode; // // Future modification // @@ -45,27 +46,30 @@ // Metachunk methods -Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) { - // Set bottom, top, and end. Allow space for the Metachunk itself - Metachunk* chunk = (Metachunk*) ptr; - - MetaWord* chunk_bottom = ptr + _overhead; - chunk->set_bottom(ptr); - chunk->set_top(chunk_bottom); - MetaWord* chunk_end = ptr + word_size; - assert(chunk_end > chunk_bottom, "Chunk must be too small"); - chunk->set_end(chunk_end); - chunk->set_next(NULL); - chunk->set_prev(NULL); - chunk->set_word_size(word_size); +Metachunk::Metachunk(size_t word_size, + VirtualSpaceNode* container) : + _word_size(word_size), + _bottom(NULL), + _end(NULL), + _top(NULL), + _next(NULL), + _prev(NULL), + _container(container) +{ + _bottom = (MetaWord*)this; + _top = (MetaWord*)this + _overhead; + _end = (MetaWord*)this + word_size; #ifdef ASSERT - size_t data_word_size = pointer_delta(chunk_end, chunk_bottom, sizeof(MetaWord)); - Copy::fill_to_words((HeapWord*) chunk_bottom, data_word_size, metadata_chunk_initialize); + set_is_free(false); + size_t data_word_size = pointer_delta(end(), + top(), + sizeof(MetaWord)); + Copy::fill_to_words((HeapWord*) top(), + data_word_size, + metadata_chunk_initialize); #endif - return chunk; } - MetaWord* Metachunk::allocate(size_t word_size) { MetaWord* result = NULL; // If available, bump the pointer to allocate. diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/memory/metachunk.hpp --- a/src/share/vm/memory/metachunk.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/memory/metachunk.hpp Mon May 06 10:20:18 2013 -0700 @@ -41,10 +41,13 @@ // | | | | // +--------------+ <- bottom ---+ ---+ +class VirtualSpaceNode; + class Metachunk VALUE_OBJ_CLASS_SPEC { // link to support lists of chunks Metachunk* _next; Metachunk* _prev; + VirtualSpaceNode* _container; MetaWord* _bottom; MetaWord* _end; @@ -61,29 +64,20 @@ // the space. static size_t _overhead; - void set_bottom(MetaWord* v) { _bottom = v; } - void set_end(MetaWord* v) { _end = v; } - void set_top(MetaWord* v) { _top = v; } - void set_word_size(size_t v) { _word_size = v; } public: -#ifdef ASSERT - Metachunk() : _bottom(NULL), _end(NULL), _top(NULL), _is_free(false), - _next(NULL), _prev(NULL) {} -#else - Metachunk() : _bottom(NULL), _end(NULL), _top(NULL), - _next(NULL), _prev(NULL) {} -#endif + Metachunk(size_t word_size , VirtualSpaceNode* container); // Used to add a Metachunk to a list of Metachunks void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");} void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");} + void set_container(VirtualSpaceNode* v) { _container = v; } MetaWord* allocate(size_t word_size); - static Metachunk* initialize(MetaWord* ptr, size_t word_size); // Accessors Metachunk* next() const { return _next; } Metachunk* prev() const { return _prev; } + VirtualSpaceNode* container() const { return _container; } MetaWord* bottom() const { return _bottom; } MetaWord* end() const { return _end; } MetaWord* top() const { return _top; } diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/memory/metaspace.cpp --- a/src/share/vm/memory/metaspace.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/memory/metaspace.cpp Mon May 06 10:20:18 2013 -0700 @@ -47,7 +47,6 @@ // the free chunk lists const bool metaspace_slow_verify = false; - // Parameters for stress mode testing const uint metadata_deallocate_a_lot_block = 10; const uint metadata_deallocate_a_lock_chunk = 3; @@ -112,6 +111,7 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC { // Free list of chunks of different sizes. + // SpecializedChunk // SmallChunk // MediumChunk // HumongousChunk @@ -165,6 +165,10 @@ // for special, small, medium, and humongous chunks. static ChunkIndex list_index(size_t size); + // Remove the chunk from its freelist. It is + // expected to be on one of the _free_chunks[] lists. + void remove_chunk(Metachunk* chunk); + // Add the simple linked list of chunks to the freelist of chunks // of type index. void return_chunks(ChunkIndex index, Metachunk* chunks); @@ -215,7 +219,6 @@ void print_on(outputStream* st); }; - // Used to manage the free list of Metablocks (a block corresponds // to the allocation of a quantum of metadata). class BlockFreelist VALUE_OBJ_CLASS_SPEC { @@ -255,6 +258,8 @@ ReservedSpace _rs; VirtualSpace _virtual_space; MetaWord* _top; + // count of chunks contained in this VirtualSpace + uintx _container_count; // Convenience functions for logical bottom and end MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } @@ -264,10 +269,19 @@ char* low() const { return virtual_space()->low(); } char* high() const { return virtual_space()->high(); } + // The first Metachunk will be allocated at the bottom of the + // VirtualSpace + Metachunk* first_chunk() { return (Metachunk*) bottom(); } + + void inc_container_count(); +#ifdef ASSERT + uint container_count_slow(); +#endif + public: VirtualSpaceNode(size_t byte_size); - VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs) {} + VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} ~VirtualSpaceNode(); // address of next available space in _virtual_space; @@ -282,15 +296,22 @@ MemRegion* reserved() { return &_reserved; } VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } - // Returns true if "word_size" is available in the virtual space + // Returns true if "word_size" is available in the VirtualSpace bool is_available(size_t word_size) { return _top + word_size <= end(); } MetaWord* top() const { return _top; } void inc_top(size_t word_size) { _top += word_size; } + uintx container_count() { return _container_count; } + void dec_container_count(); +#ifdef ASSERT + void verify_container_count(); +#endif + // used and capacity in this single entry in the list size_t used_words_in_vs() const; size_t capacity_words_in_vs() const; + size_t free_words_in_vs() const; bool initialize(); @@ -306,6 +327,10 @@ bool expand_by(size_t words, bool pre_touch = false); bool shrink_by(size_t words); + // In preparation for deleting this node, remove all the chunks + // in the node from any freelist. + void purge(ChunkManager* chunk_manager); + #ifdef ASSERT // Debug support static void verify_virtual_space_total(); @@ -317,7 +342,7 @@ }; // byte_size is the size of the associated virtualspace. -VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0) { +VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) { // align up to vm allocation granularity byte_size = align_size_up(byte_size, os::vm_allocation_granularity()); @@ -341,6 +366,39 @@ MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); } +void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { + Metachunk* chunk = first_chunk(); + Metachunk* invalid_chunk = (Metachunk*) top(); + while (chunk < invalid_chunk ) { + assert(chunk->is_free(), "Should be marked free"); + MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); + chunk_manager->remove_chunk(chunk); + assert(chunk->next() == NULL && + chunk->prev() == NULL, + "Was not removed from its list"); + chunk = (Metachunk*) next; + } +} + +#ifdef ASSERT +uint VirtualSpaceNode::container_count_slow() { + uint count = 0; + Metachunk* chunk = first_chunk(); + Metachunk* invalid_chunk = (Metachunk*) top(); + while (chunk < invalid_chunk ) { + MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); + // Don't count the chunks on the free lists. Those are + // still part of the VirtualSpaceNode but not currently + // counted. + if (!chunk->is_free()) { + count++; + } + chunk = (Metachunk*) next; + } + return count; +} +#endif + // List of VirtualSpaces for metadata allocation. // It has a _next link for singly linked list and a MemRegion // for total space in the VirtualSpace. @@ -390,6 +448,8 @@ VirtualSpaceList(size_t word_size); VirtualSpaceList(ReservedSpace rs); + size_t free_bytes(); + Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words, size_t medium_chunk_bunch); @@ -410,14 +470,14 @@ void initialize(size_t word_size); size_t virtual_space_total() { return _virtual_space_total; } - void inc_virtual_space_total(size_t v) { - Atomic::add_ptr(v, &_virtual_space_total); - } - - size_t virtual_space_count() { return _virtual_space_count; } - void inc_virtual_space_count() { - Atomic::inc_ptr(&_virtual_space_count); - } + + void inc_virtual_space_total(size_t v); + void dec_virtual_space_total(size_t v); + void inc_virtual_space_count(); + void dec_virtual_space_count(); + + // Unlink empty VirtualSpaceNodes and free it. + void purge(); // Used and capacity in the entire list of virtual spaces. // These are global values shared by all Metaspaces @@ -520,7 +580,11 @@ bool has_small_chunk_limit() { return !vs_list()->is_class(); } // Sum of all space in allocated chunks - size_t _allocation_total; + size_t _allocated_blocks_words; + + // Sum of all allocated chunks + size_t _allocated_chunks_words; + size_t _allocated_chunks_count; // Free lists of blocks are per SpaceManager since they // are assumed to be in chunks in use by the SpaceManager @@ -576,12 +640,27 @@ size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; } size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } - size_t allocation_total() const { return _allocation_total; } - void inc_allocation_total(size_t v) { Atomic::add_ptr(v, &_allocation_total); } + size_t allocated_blocks_words() const { return _allocated_blocks_words; } + size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } + size_t allocated_chunks_words() const { return _allocated_chunks_words; } + size_t allocated_chunks_count() const { return _allocated_chunks_count; } + bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } static Mutex* expand_lock() { return _expand_lock; } + // Increment the per Metaspace and global running sums for Metachunks + // by the given size. This is used when a Metachunk to added to + // the in-use list. + void inc_size_metrics(size_t words); + // Increment the per Metaspace and global running sums Metablocks by the given + // size. This is used when a Metablock is allocated. + void inc_used_metrics(size_t words); + // Delete the portion of the running sums for this SpaceManager. That is, + // the globals running sums for the Metachunks and Metablocks are + // decremented for all the Metachunks in-use by this SpaceManager. + void dec_total_from_size_metrics(); + // Set the sizes for the initial chunks. void get_initial_chunk_sizes(Metaspace::MetaspaceType type, size_t* chunk_word_size, @@ -627,7 +706,7 @@ void verify_chunk_size(Metachunk* chunk); NOT_PRODUCT(void mangle_freed_chunks();) #ifdef ASSERT - void verify_allocation_total(); + void verify_allocated_blocks_words(); #endif }; @@ -641,6 +720,28 @@ SpaceManager::_expand_lock_name, Mutex::_allow_vm_block_flag); +void VirtualSpaceNode::inc_container_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _container_count++; + assert(_container_count == container_count_slow(), + err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT + "container_count_slow() " SIZE_FORMAT, + _container_count, container_count_slow())); +} + +void VirtualSpaceNode::dec_container_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _container_count--; +} + +#ifdef ASSERT +void VirtualSpaceNode::verify_container_count() { + assert(_container_count == container_count_slow(), + err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT + "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow())); +} +#endif + // BlockFreelist methods BlockFreelist::BlockFreelist() : _dictionary(NULL) {} @@ -701,6 +802,10 @@ VirtualSpaceNode::~VirtualSpaceNode() { _rs.release(); +#ifdef ASSERT + size_t word_size = sizeof(*this) / BytesPerWord; + Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); +#endif } size_t VirtualSpaceNode::used_words_in_vs() const { @@ -712,6 +817,9 @@ return pointer_delta(end(), bottom(), sizeof(MetaWord)); } +size_t VirtualSpaceNode::free_words_in_vs() const { + return pointer_delta(end(), top(), sizeof(MetaWord)); +} // Allocates the chunk from the virtual space only. // This interface is also used internally for debugging. Not all @@ -733,8 +841,8 @@ // Take the space (bump top on the current virtual space). inc_top(chunk_word_size); - // Point the chunk at the space - Metachunk* result = Metachunk::initialize(chunk_limit, chunk_word_size); + // Initialize the chunk + Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); return result; } @@ -762,9 +870,11 @@ Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { assert_lock_strong(SpaceManager::expand_lock()); - Metachunk* result = NULL; - - return take_from_committed(chunk_word_size); + Metachunk* result = take_from_committed(chunk_word_size); + if (result != NULL) { + inc_container_count(); + } + return result; } Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) { @@ -843,6 +953,83 @@ } } +void VirtualSpaceList::inc_virtual_space_total(size_t v) { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_total = _virtual_space_total + v; +} +void VirtualSpaceList::dec_virtual_space_total(size_t v) { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_total = _virtual_space_total - v; +} + +void VirtualSpaceList::inc_virtual_space_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_count++; +} +void VirtualSpaceList::dec_virtual_space_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_count--; +} + +void ChunkManager::remove_chunk(Metachunk* chunk) { + size_t word_size = chunk->word_size(); + ChunkIndex index = list_index(word_size); + if (index != HumongousIndex) { + free_chunks(index)->remove_chunk(chunk); + } else { + humongous_dictionary()->remove_chunk(chunk); + } + + // Chunk is being removed from the chunks free list. + dec_free_chunks_total(chunk->capacity_word_size()); +} + +// Walk the list of VirtualSpaceNodes and delete +// nodes with a 0 container_count. Remove Metachunks in +// the node from their respective freelists. +void VirtualSpaceList::purge() { + assert_lock_strong(SpaceManager::expand_lock()); + // Don't use a VirtualSpaceListIterator because this + // list is being changed and a straightforward use of an iterator is not safe. + VirtualSpaceNode* purged_vsl = NULL; + VirtualSpaceNode* prev_vsl = virtual_space_list(); + VirtualSpaceNode* next_vsl = prev_vsl; + while (next_vsl != NULL) { + VirtualSpaceNode* vsl = next_vsl; + next_vsl = vsl->next(); + // Don't free the current virtual space since it will likely + // be needed soon. + if (vsl->container_count() == 0 && vsl != current_virtual_space()) { + // Unlink it from the list + if (prev_vsl == vsl) { + // This is the case of the current note being the first note. + assert(vsl == virtual_space_list(), "Expected to be the first note"); + set_virtual_space_list(vsl->next()); + } else { + prev_vsl->set_next(vsl->next()); + } + + vsl->purge(chunk_manager()); + dec_virtual_space_total(vsl->reserved()->word_size()); + dec_virtual_space_count(); + purged_vsl = vsl; + delete vsl; + } else { + prev_vsl = vsl; + } + } +#ifdef ASSERT + if (purged_vsl != NULL) { + // List should be stable enough to use an iterator here. + VirtualSpaceListIterator iter(virtual_space_list()); + while (iter.repeat()) { + VirtualSpaceNode* vsl = iter.get_next(); + assert(vsl != purged_vsl, "Purge of vsl failed"); + } + } +#endif +} + size_t VirtualSpaceList::used_words_sum() { size_t allocated_by_vs = 0; VirtualSpaceListIterator iter(virtual_space_list()); @@ -907,6 +1094,10 @@ link_vs(class_entry, rs.size()/BytesPerWord); } +size_t VirtualSpaceList::free_bytes() { + return virtual_space_list()->free_words_in_vs() * BytesPerWord; +} + // Allocate another meta virtual space and add it to the list. bool VirtualSpaceList::grow_vs(size_t vs_word_size) { assert_lock_strong(SpaceManager::expand_lock()); @@ -955,8 +1146,10 @@ // Get a chunk from the chunk freelist Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); - // Allocate a chunk out of the current virtual space. - if (next == NULL) { + if (next != NULL) { + next->container()->inc_container_count(); + } else { + // Allocate a chunk out of the current virtual space. next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); } @@ -1045,9 +1238,9 @@ // // After the GC the compute_new_size() for MetaspaceGC is called to // resize the capacity of the metaspaces. The current implementation -// is based on the flags MinMetaspaceFreeRatio and MaxHeapFreeRatio used +// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used // to resize the Java heap by some GC's. New flags can be implemented -// if really needed. MinHeapFreeRatio is used to calculate how much +// if really needed. MinMetaspaceFreeRatio is used to calculate how much // free space is desirable in the metaspace capacity to decide how much // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much // free space is desirable in the metaspace capacity before decreasing @@ -1082,7 +1275,11 @@ } bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { + + size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes(); // If the user wants a limit, impose one. + size_t max_metaspace_size_bytes = MaxMetaspaceSize; + size_t metaspace_size_bytes = MetaspaceSize; if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) && MetaspaceAux::reserved_in_bytes() >= MaxMetaspaceSize) { return false; @@ -1094,57 +1291,48 @@ // If this is part of an allocation after a GC, expand // unconditionally. - if(MetaspaceGC::expand_after_GC()) { + if (MetaspaceGC::expand_after_GC()) { return true; } - size_t metaspace_size_words = MetaspaceSize / BytesPerWord; + // If the capacity is below the minimum capacity, allow the // expansion. Also set the high-water-mark (capacity_until_GC) // to that minimum capacity so that a GC will not be induced // until that minimum capacity is exceeded. - if (vsl->capacity_words_sum() < metaspace_size_words || + if (committed_capacity_bytes < metaspace_size_bytes || capacity_until_GC() == 0) { - set_capacity_until_GC(metaspace_size_words); + set_capacity_until_GC(metaspace_size_bytes); return true; } else { - if (vsl->capacity_words_sum() < capacity_until_GC()) { + if (committed_capacity_bytes < capacity_until_GC()) { return true; } else { if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT " capacity_until_GC " SIZE_FORMAT - " capacity_words_sum " SIZE_FORMAT - " used_words_sum " SIZE_FORMAT - " free chunks " SIZE_FORMAT - " free chunks count %d", + " allocated_capacity_bytes " SIZE_FORMAT, word_size, capacity_until_GC(), - vsl->capacity_words_sum(), - vsl->used_words_sum(), - vsl->chunk_manager()->free_chunks_total(), - vsl->chunk_manager()->free_chunks_count()); + MetaspaceAux::allocated_capacity_bytes()); } return false; } } } -// Variables are in bytes + void MetaspaceGC::compute_new_size() { assert(_shrink_factor <= 100, "invalid shrink factor"); uint current_shrink_factor = _shrink_factor; _shrink_factor = 0; - VirtualSpaceList *vsl = Metaspace::space_list(); - - size_t capacity_after_gc = vsl->capacity_bytes_sum(); - // Check to see if these two can be calculated without walking the CLDG - size_t used_after_gc = vsl->used_bytes_sum(); - size_t capacity_until_GC = vsl->capacity_bytes_sum(); - size_t free_after_gc = capacity_until_GC - used_after_gc; + // Until a faster way of calculating the "used" quantity is implemented, + // use "capacity". + const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes(); + const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; const double maximum_used_percentage = 1.0 - minimum_free_percentage; @@ -1157,45 +1345,34 @@ MetaspaceSize); if (PrintGCDetails && Verbose) { - const double free_percentage = ((double)free_after_gc) / capacity_until_GC; gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: "); gclog_or_tty->print_cr(" " " minimum_free_percentage: %6.2f" " maximum_used_percentage: %6.2f", minimum_free_percentage, maximum_used_percentage); - double d_free_after_gc = free_after_gc / (double) K; gclog_or_tty->print_cr(" " - " free_after_gc : %6.1fK" - " used_after_gc : %6.1fK" - " capacity_after_gc : %6.1fK" - " metaspace HWM : %6.1fK", - free_after_gc / (double) K, - used_after_gc / (double) K, - capacity_after_gc / (double) K, - capacity_until_GC / (double) K); - gclog_or_tty->print_cr(" " - " free_percentage: %6.2f", - free_percentage); + " used_after_gc : %6.1fKB", + used_after_gc / (double) K); } + size_t shrink_bytes = 0; if (capacity_until_GC < minimum_desired_capacity) { // If we have less capacity below the metaspace HWM, then // increment the HWM. size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; // Don't expand unless it's significant if (expand_bytes >= MinMetaspaceExpansion) { - size_t expand_words = expand_bytes / BytesPerWord; - MetaspaceGC::inc_capacity_until_GC(expand_words); + MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes); } if (PrintGCDetails && Verbose) { - size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes(); + size_t new_capacity_until_GC = capacity_until_GC; gclog_or_tty->print_cr(" expanding:" - " minimum_desired_capacity: %6.1fK" - " expand_words: %6.1fK" - " MinMetaspaceExpansion: %6.1fK" - " new metaspace HWM: %6.1fK", + " minimum_desired_capacity: %6.1fKB" + " expand_bytes: %6.1fKB" + " MinMetaspaceExpansion: %6.1fKB" + " new metaspace HWM: %6.1fKB", minimum_desired_capacity / (double) K, expand_bytes / (double) K, MinMetaspaceExpansion / (double) K, @@ -1205,11 +1382,10 @@ } // No expansion, now see if we want to shrink - size_t shrink_words = 0; // We would never want to shrink more than this - size_t max_shrink_words = capacity_until_GC - minimum_desired_capacity; - assert(max_shrink_words >= 0, err_msg("max_shrink_words " SIZE_FORMAT, - max_shrink_words)); + size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; + assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT, + max_shrink_bytes)); // Should shrinking be considered? if (MaxMetaspaceFreeRatio < 100) { @@ -1219,17 +1395,15 @@ size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); maximum_desired_capacity = MAX2(maximum_desired_capacity, MetaspaceSize); - if (PrintGC && Verbose) { + if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr(" " " maximum_free_percentage: %6.2f" " minimum_used_percentage: %6.2f", maximum_free_percentage, minimum_used_percentage); gclog_or_tty->print_cr(" " - " capacity_until_GC: %6.1fK" - " minimum_desired_capacity: %6.1fK" - " maximum_desired_capacity: %6.1fK", - capacity_until_GC / (double) K, + " minimum_desired_capacity: %6.1fKB" + " maximum_desired_capacity: %6.1fKB", minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); } @@ -1239,17 +1413,17 @@ if (capacity_until_GC > maximum_desired_capacity) { // Capacity too large, compute shrinking size - shrink_words = capacity_until_GC - maximum_desired_capacity; + shrink_bytes = capacity_until_GC - maximum_desired_capacity; // We don't want shrink all the way back to initSize if people call // System.gc(), because some programs do that between "phases" and then // we'd just have to grow the heap up again for the next phase. So we // damp the shrinking: 0% on the first call, 10% on the second call, 40% // on the third call, and 100% by the fourth call. But if we recompute // size without shrinking, it goes back to 0%. - shrink_words = shrink_words / 100 * current_shrink_factor; - assert(shrink_words <= max_shrink_words, + shrink_bytes = shrink_bytes / 100 * current_shrink_factor; + assert(shrink_bytes <= max_shrink_bytes, err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, - shrink_words, max_shrink_words)); + shrink_bytes, max_shrink_bytes)); if (current_shrink_factor == 0) { _shrink_factor = 10; } else { @@ -1263,11 +1437,11 @@ MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); gclog_or_tty->print_cr(" " - " shrink_words: %.1fK" + " shrink_bytes: %.1fK" " current_shrink_factor: %d" " new shrink factor: %d" " MinMetaspaceExpansion: %.1fK", - shrink_words / (double) K, + shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); @@ -1275,23 +1449,11 @@ } } - // Don't shrink unless it's significant - if (shrink_words >= MinMetaspaceExpansion) { - VirtualSpaceNode* csp = vsl->current_virtual_space(); - size_t available_to_shrink = csp->capacity_words_in_vs() - - csp->used_words_in_vs(); - shrink_words = MIN2(shrink_words, available_to_shrink); - csp->shrink_by(shrink_words); - MetaspaceGC::dec_capacity_until_GC(shrink_words); - if (PrintGCDetails && Verbose) { - size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes(); - gclog_or_tty->print_cr(" metaspace HWM: %.1fK", new_capacity_until_GC / (double) K); - } + if (shrink_bytes >= MinMetaspaceExpansion && + ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { + MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes); } - assert(used_after_gc <= vsl->capacity_bytes_sum(), - "sanity check"); - } // Metadebug methods @@ -1567,9 +1729,6 @@ } // Chunk is being removed from the chunks free list. dec_free_chunks_total(chunk->capacity_word_size()); -#ifdef ASSERT - chunk->set_is_free(false); -#endif } else { return NULL; } @@ -1578,6 +1737,11 @@ // Remove it from the links to this freelist chunk->set_next(NULL); chunk->set_prev(NULL); +#ifdef ASSERT + // Chunk is no longer on any freelist. Setting to false make container_count_slow() + // work. + chunk->set_is_free(false); +#endif slow_locked_verify(); return chunk; } @@ -1692,18 +1856,28 @@ } size_t SpaceManager::sum_capacity_in_chunks_in_use() const { - MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); - size_t sum = 0; - for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { - Metachunk* chunk = chunks_in_use(i); - while (chunk != NULL) { - // Just changed this sum += chunk->capacity_word_size(); - // sum += chunk->word_size() - Metachunk::overhead(); - sum += chunk->capacity_word_size(); - chunk = chunk->next(); + // For CMS use "allocated_chunks_words()" which does not need the + // Metaspace lock. For the other collectors sum over the + // lists. Use both methods as a check that "allocated_chunks_words()" + // is correct. That is, sum_capacity_in_chunks() is too expensive + // to use in the product and allocated_chunks_words() should be used + // but allow for checking that allocated_chunks_words() returns the same + // value as sum_capacity_in_chunks_in_use() which is the definitive + // answer. + if (UseConcMarkSweepGC) { + return allocated_chunks_words(); + } else { + MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); + size_t sum = 0; + for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { + Metachunk* chunk = chunks_in_use(i); + while (chunk != NULL) { + sum += chunk->capacity_word_size(); + chunk = chunk->next(); + } } + return sum; } - return sum; } size_t SpaceManager::sum_count_in_chunks_in_use() { @@ -1861,12 +2035,44 @@ SpaceManager::SpaceManager(Mutex* lock, VirtualSpaceList* vs_list) : _vs_list(vs_list), - _allocation_total(0), + _allocated_blocks_words(0), + _allocated_chunks_words(0), + _allocated_chunks_count(0), _lock(lock) { initialize(); } +void SpaceManager::inc_size_metrics(size_t words) { + assert_lock_strong(SpaceManager::expand_lock()); + // Total of allocated Metachunks and allocated Metachunks count + // for each SpaceManager + _allocated_chunks_words = _allocated_chunks_words + words; + _allocated_chunks_count++; + // Global total of capacity in allocated Metachunks + MetaspaceAux::inc_capacity(words); + // Global total of allocated Metablocks. + // used_words_slow() includes the overhead in each + // Metachunk so include it in the used when the + // Metachunk is first added (so only added once per + // Metachunk). + MetaspaceAux::inc_used(Metachunk::overhead()); +} + +void SpaceManager::inc_used_metrics(size_t words) { + // Add to the per SpaceManager total + Atomic::add_ptr(words, &_allocated_blocks_words); + // Add to the global total + MetaspaceAux::inc_used(words); +} + +void SpaceManager::dec_total_from_size_metrics() { + MetaspaceAux::dec_capacity(allocated_chunks_words()); + MetaspaceAux::dec_used(allocated_blocks_words()); + // Also deduct the overhead per Metachunk + MetaspaceAux::dec_used(allocated_chunks_count() * Metachunk::overhead()); +} + void SpaceManager::initialize() { Metadebug::init_allocation_fail_alot_count(); for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { @@ -1887,11 +2093,13 @@ assert_lock_strong(SpaceManager::expand_lock()); Metachunk* cur = chunks; - // This return chunks one at a time. If a new + // This returns chunks one at a time. If a new // class List can be created that is a base class // of FreeList then something like FreeList::prepend() // can be used in place of this loop while (cur != NULL) { + assert(cur->container() != NULL, "Container should have been set"); + cur->container()->dec_container_count(); // Capture the next link before it is changed // by the call to return_chunk_at_head(); Metachunk* next = cur->next(); @@ -1903,7 +2111,10 @@ SpaceManager::~SpaceManager() { // This call this->_lock which can't be done while holding expand_lock() - const size_t in_use_before = sum_capacity_in_chunks_in_use(); + assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), + err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT + " allocated_chunks_words() " SIZE_FORMAT, + sum_capacity_in_chunks_in_use(), allocated_chunks_words())); MutexLockerEx fcl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); @@ -1912,17 +2123,19 @@ chunk_manager->slow_locked_verify(); + dec_total_from_size_metrics(); + if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this); locked_print_chunks_in_use_on(gclog_or_tty); } - // Mangle freed memory. - NOT_PRODUCT(mangle_freed_chunks();) + // Do not mangle freed Metachunks. The chunk size inside Metachunks + // is during the freeing of a VirtualSpaceNodes. // Have to update before the chunks_in_use lists are emptied // below. - chunk_manager->inc_free_chunks_total(in_use_before, + chunk_manager->inc_free_chunks_total(allocated_chunks_words(), sum_count_in_chunks_in_use()); // Add all the chunks in use by this space manager @@ -1978,6 +2191,7 @@ " granularity %d", humongous_chunks->word_size(), HumongousChunkGranularity)); Metachunk* next_humongous_chunks = humongous_chunks->next(); + humongous_chunks->container()->dec_container_count(); chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks); humongous_chunks = next_humongous_chunks; } @@ -1987,7 +2201,6 @@ chunk_manager->humongous_dictionary()->total_count(), chunk_size_name(HumongousIndex)); } - set_chunks_in_use(HumongousIndex, NULL); chunk_manager->slow_locked_verify(); } @@ -2067,12 +2280,17 @@ assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); } + // Add to the running sum of capacity + inc_size_metrics(new_chunk->word_size()); + assert(new_chunk->is_empty(), "Not ready for reuse"); if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print("SpaceManager::add_chunk: %d) ", sum_count_in_chunks_in_use()); new_chunk->print_on(gclog_or_tty); - vs_list()->chunk_manager()->locked_print_free_chunks(tty); + if (vs_list() != NULL) { + vs_list()->chunk_manager()->locked_print_free_chunks(tty); + } } } @@ -2143,7 +2361,7 @@ // of memory if this returns null. if (DumpSharedSpaces) { assert(current_chunk() != NULL, "should never happen"); - inc_allocation_total(word_size); + inc_used_metrics(word_size); return current_chunk()->allocate(word_size); // caller handles null result } if (current_chunk() != NULL) { @@ -2154,7 +2372,7 @@ result = grow_and_allocate(word_size); } if (result > 0) { - inc_allocation_total(word_size); + inc_used_metrics(word_size); assert(result != (MetaWord*) chunks_in_use(MediumIndex), "Head of the list is being allocated"); } @@ -2188,20 +2406,14 @@ } #ifdef ASSERT -void SpaceManager::verify_allocation_total() { +void SpaceManager::verify_allocated_blocks_words() { // Verification is only guaranteed at a safepoint. - if (SafepointSynchronize::is_at_safepoint()) { - gclog_or_tty->print_cr("Chunk " PTR_FORMAT " allocation_total " SIZE_FORMAT - " sum_used_in_chunks_in_use " SIZE_FORMAT, - this, - allocation_total(), - sum_used_in_chunks_in_use()); - } - MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); - assert(allocation_total() == sum_used_in_chunks_in_use(), + assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), + "Verification can fail if the applications is running"); + assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), err_msg("allocation total is not consistent " SIZE_FORMAT " vs " SIZE_FORMAT, - allocation_total(), sum_used_in_chunks_in_use())); + allocated_blocks_words(), sum_used_in_chunks_in_use())); } #endif @@ -2257,14 +2469,65 @@ // MetaspaceAux -size_t MetaspaceAux::used_in_bytes(Metaspace::MetadataType mdtype) { + +size_t MetaspaceAux::_allocated_capacity_words = 0; +size_t MetaspaceAux::_allocated_used_words = 0; + +size_t MetaspaceAux::free_bytes() { + size_t result = 0; + if (Metaspace::class_space_list() != NULL) { + result = result + Metaspace::class_space_list()->free_bytes(); + } + if (Metaspace::space_list() != NULL) { + result = result + Metaspace::space_list()->free_bytes(); + } + return result; +} + +void MetaspaceAux::dec_capacity(size_t words) { + assert_lock_strong(SpaceManager::expand_lock()); + assert(words <= _allocated_capacity_words, + err_msg("About to decrement below 0: words " SIZE_FORMAT + " is greater than _allocated_capacity_words " SIZE_FORMAT, + words, _allocated_capacity_words)); + _allocated_capacity_words = _allocated_capacity_words - words; +} + +void MetaspaceAux::inc_capacity(size_t words) { + assert_lock_strong(SpaceManager::expand_lock()); + // Needs to be atomic + _allocated_capacity_words = _allocated_capacity_words + words; +} + +void MetaspaceAux::dec_used(size_t words) { + assert(words <= _allocated_used_words, + err_msg("About to decrement below 0: words " SIZE_FORMAT + " is greater than _allocated_used_words " SIZE_FORMAT, + words, _allocated_used_words)); + // For CMS deallocation of the Metaspaces occurs during the + // sweep which is a concurrent phase. Protection by the expand_lock() + // is not enough since allocation is on a per Metaspace basis + // and protected by the Metaspace lock. + jlong minus_words = (jlong) - (jlong) words; + Atomic::add_ptr(minus_words, &_allocated_used_words); +} + +void MetaspaceAux::inc_used(size_t words) { + // _allocated_used_words tracks allocations for + // each piece of metadata. Those allocations are + // generally done concurrently by different application + // threads so must be done atomically. + Atomic::add_ptr(words, &_allocated_used_words); +} + +size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { size_t used = 0; ClassLoaderDataGraphMetaspaceIterator iter; while (iter.repeat()) { Metaspace* msp = iter.get_next(); - // Sum allocation_total for each metaspace + // Sum allocated_blocks_words for each metaspace if (msp != NULL) { - used += msp->used_words(mdtype); + used += msp->used_words_slow(mdtype); } } return used * BytesPerWord; @@ -2282,13 +2545,15 @@ return free * BytesPerWord; } -size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) { - size_t capacity = free_chunks_total(mdtype); +size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { + // Don't count the space in the freelists. That space will be + // added to the capacity calculation as needed. + size_t capacity = 0; ClassLoaderDataGraphMetaspaceIterator iter; while (iter.repeat()) { Metaspace* msp = iter.get_next(); if (msp != NULL) { - capacity += msp->capacity_words(mdtype); + capacity += msp->capacity_words_slow(mdtype); } } return capacity * BytesPerWord; @@ -2315,23 +2580,30 @@ return free_chunks_total(mdtype) * BytesPerWord; } +size_t MetaspaceAux::free_chunks_total() { + return free_chunks_total(Metaspace::ClassType) + + free_chunks_total(Metaspace::NonClassType); +} + +size_t MetaspaceAux::free_chunks_total_in_bytes() { + return free_chunks_total() * BytesPerWord; +} + void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { gclog_or_tty->print(", [Metaspace:"); if (PrintGCDetails && Verbose) { gclog_or_tty->print(" " SIZE_FORMAT "->" SIZE_FORMAT - "(" SIZE_FORMAT "/" SIZE_FORMAT ")", + "(" SIZE_FORMAT ")", prev_metadata_used, - used_in_bytes(), - capacity_in_bytes(), + allocated_capacity_bytes(), reserved_in_bytes()); } else { gclog_or_tty->print(" " SIZE_FORMAT "K" "->" SIZE_FORMAT "K" - "(" SIZE_FORMAT "K/" SIZE_FORMAT "K)", + "(" SIZE_FORMAT "K)", prev_metadata_used / K, - used_in_bytes()/ K, - capacity_in_bytes()/K, + allocated_capacity_bytes() / K, reserved_in_bytes()/ K); } @@ -2346,23 +2618,30 @@ out->print_cr(" Metaspace total " SIZE_FORMAT "K, used " SIZE_FORMAT "K," " reserved " SIZE_FORMAT "K", - capacity_in_bytes()/K, used_in_bytes()/K, reserved_in_bytes()/K); - out->print_cr(" data space " - SIZE_FORMAT "K, used " SIZE_FORMAT "K," - " reserved " SIZE_FORMAT "K", - capacity_in_bytes(nct)/K, used_in_bytes(nct)/K, reserved_in_bytes(nct)/K); - out->print_cr(" class space " - SIZE_FORMAT "K, used " SIZE_FORMAT "K," - " reserved " SIZE_FORMAT "K", - capacity_in_bytes(ct)/K, used_in_bytes(ct)/K, reserved_in_bytes(ct)/K); + allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K); +#if 0 +// The calls to capacity_bytes_slow() and used_bytes_slow() cause +// lock ordering assertion failures with some collectors. Do +// not include this code until the lock ordering is fixed. + if (PrintGCDetails && Verbose) { + out->print_cr(" data space " + SIZE_FORMAT "K, used " SIZE_FORMAT "K," + " reserved " SIZE_FORMAT "K", + capacity_bytes_slow(nct)/K, used_bytes_slow(nct)/K, reserved_in_bytes(nct)/K); + out->print_cr(" class space " + SIZE_FORMAT "K, used " SIZE_FORMAT "K," + " reserved " SIZE_FORMAT "K", + capacity_bytes_slow(ct)/K, used_bytes_slow(ct)/K, reserved_in_bytes(ct)/K); + } +#endif } // Print information for class space and data space separately. // This is almost the same as above. void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype); - size_t capacity_bytes = capacity_in_bytes(mdtype); - size_t used_bytes = used_in_bytes(mdtype); + size_t capacity_bytes = capacity_bytes_slow(mdtype); + size_t used_bytes = used_bytes_slow(mdtype); size_t free_bytes = free_in_bytes(mdtype); size_t used_and_free = used_bytes + free_bytes + free_chunks_capacity_bytes; @@ -2435,6 +2714,36 @@ Metaspace::class_space_list()->chunk_manager()->verify(); } +void MetaspaceAux::verify_capacity() { +#ifdef ASSERT + size_t running_sum_capacity_bytes = allocated_capacity_bytes(); + // For purposes of the running sum of used, verify against capacity + size_t capacity_in_use_bytes = capacity_bytes_slow(); + assert(running_sum_capacity_bytes == capacity_in_use_bytes, + err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT + " capacity_bytes_slow()" SIZE_FORMAT, + running_sum_capacity_bytes, capacity_in_use_bytes)); +#endif +} + +void MetaspaceAux::verify_used() { +#ifdef ASSERT + size_t running_sum_used_bytes = allocated_used_bytes(); + // For purposes of the running sum of used, verify against capacity + size_t used_in_use_bytes = used_bytes_slow(); + assert(allocated_used_bytes() == used_in_use_bytes, + err_msg("allocated_used_bytes() " SIZE_FORMAT + " used_bytes_slow()()" SIZE_FORMAT, + allocated_used_bytes(), used_in_use_bytes)); +#endif +} + +void MetaspaceAux::verify_metrics() { + verify_capacity(); + verify_used(); +} + + // Metaspace methods size_t Metaspace::_first_chunk_word_size = 0; @@ -2584,8 +2893,8 @@ MetaWord* result; MetaspaceGC::set_expand_after_GC(true); size_t before_inc = MetaspaceGC::capacity_until_GC(); - size_t delta_words = MetaspaceGC::delta_capacity_until_GC(word_size); - MetaspaceGC::inc_capacity_until_GC(delta_words); + size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord; + MetaspaceGC::inc_capacity_until_GC(delta_bytes); if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC()); @@ -2603,8 +2912,8 @@ return (char*)vsm()->current_chunk()->bottom(); } -size_t Metaspace::used_words(MetadataType mdtype) const { - // return vsm()->allocation_total(); +size_t Metaspace::used_words_slow(MetadataType mdtype) const { + // return vsm()->allocated_used_words(); return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() : vsm()->sum_used_in_chunks_in_use(); // includes overhead! } @@ -2619,16 +2928,24 @@ // have been made. Don't include space in the global freelist and // in the space available in the dictionary which // is already counted in some chunk. -size_t Metaspace::capacity_words(MetadataType mdtype) const { +size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() : vsm()->sum_capacity_in_chunks_in_use(); } +size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { + return used_words_slow(mdtype) * BytesPerWord; +} + +size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { + return capacity_words_slow(mdtype) * BytesPerWord; +} + void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { if (SafepointSynchronize::is_at_safepoint()) { assert(Thread::current()->is_VM_thread(), "should be the VM thread"); // Don't take Heap_lock - MutexLocker ml(vsm()->lock()); + MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); if (word_size < TreeChunk::min_size()) { // Dark matter. Too small for dictionary. #ifdef ASSERT @@ -2642,7 +2959,7 @@ vsm()->deallocate(ptr, word_size); } } else { - MutexLocker ml(vsm()->lock()); + MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); if (word_size < TreeChunk::min_size()) { // Dark matter. Too small for dictionary. @@ -2716,6 +3033,13 @@ return Metablock::initialize(result, word_size); } +void Metaspace::purge() { + MutexLockerEx cl(SpaceManager::expand_lock(), + Mutex::_no_safepoint_check_flag); + space_list()->purge(); + class_space_list()->purge(); +} + void Metaspace::print_on(outputStream* out) const { // Print both class virtual space counts and metaspace. if (Verbose) { @@ -2733,7 +3057,8 @@ // aren't deleted presently. When they are, some sort of locking might // be needed. Note, locking this can cause inversion problems with the // caller in MetaspaceObj::is_metadata() function. - return space_list()->contains(ptr) || class_space_list()->contains(ptr); + return space_list()->contains(ptr) || + class_space_list()->contains(ptr); } void Metaspace::verify() { @@ -2742,10 +3067,6 @@ } void Metaspace::dump(outputStream* const out) const { - if (UseMallocOnly) { - // Just print usage for now - out->print_cr("usage %d", used_words(Metaspace::NonClassType)); - } out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm()); vsm()->dump(out); out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm()); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/memory/metaspace.hpp --- a/src/share/vm/memory/metaspace.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/memory/metaspace.hpp Mon May 06 10:20:18 2013 -0700 @@ -111,6 +111,10 @@ SpaceManager* _class_vsm; SpaceManager* class_vsm() const { return _class_vsm; } + // Allocate space for metadata of type mdtype. This is space + // within a Metachunk and is used by + // allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS) + // which returns a Metablock. MetaWord* allocate(size_t word_size, MetadataType mdtype); // Virtual Space lists for both classes and other metadata @@ -133,11 +137,14 @@ static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; } char* bottom() const; - size_t used_words(MetadataType mdtype) const; + size_t used_words_slow(MetadataType mdtype) const; size_t free_words(MetadataType mdtype) const; - size_t capacity_words(MetadataType mdtype) const; + size_t capacity_words_slow(MetadataType mdtype) const; size_t waste_words(MetadataType mdtype) const; + size_t used_bytes_slow(MetadataType mdtype) const; + size_t capacity_bytes_slow(MetadataType mdtype) const; + static Metablock* allocate(ClassLoaderData* loader_data, size_t size, bool read_only, MetadataType mdtype, TRAPS); void deallocate(MetaWord* ptr, size_t byte_size, bool is_class); @@ -150,6 +157,9 @@ static bool contains(const void *ptr); void dump(outputStream* const out) const; + // Free empty virtualspaces + static void purge(); + void print_on(outputStream* st) const; // Debugging support void verify(); @@ -158,28 +168,81 @@ class MetaspaceAux : AllStatic { // Statistics for class space and data space in metaspace. - static size_t used_in_bytes(Metaspace::MetadataType mdtype); + + // These methods iterate over the classloader data graph + // for the given Metaspace type. These are slow. + static size_t used_bytes_slow(Metaspace::MetadataType mdtype); static size_t free_in_bytes(Metaspace::MetadataType mdtype); - static size_t capacity_in_bytes(Metaspace::MetadataType mdtype); + static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype); + + // Iterates over the virtual space list. static size_t reserved_in_bytes(Metaspace::MetadataType mdtype); static size_t free_chunks_total(Metaspace::MetadataType mdtype); static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype); public: - // Total of space allocated to metadata in all Metaspaces - static size_t used_in_bytes() { - return used_in_bytes(Metaspace::ClassType) + - used_in_bytes(Metaspace::NonClassType); + // Running sum of space in all Metachunks that has been + // allocated to a Metaspace. This is used instead of + // iterating over all the classloaders + static size_t _allocated_capacity_words; + // Running sum of space in all Metachunks that have + // are being used for metadata. + static size_t _allocated_used_words; + + public: + // Decrement and increment _allocated_capacity_words + static void dec_capacity(size_t words); + static void inc_capacity(size_t words); + + // Decrement and increment _allocated_used_words + static void dec_used(size_t words); + static void inc_used(size_t words); + + // Total of space allocated to metadata in all Metaspaces. + // This sums the space used in each Metachunk by + // iterating over the classloader data graph + static size_t used_bytes_slow() { + return used_bytes_slow(Metaspace::ClassType) + + used_bytes_slow(Metaspace::NonClassType); } - // Total of available space in all Metaspaces - // Total of capacity allocated to all Metaspaces. This includes - // space in Metachunks not yet allocated and in the Metachunk - // freelist. - static size_t capacity_in_bytes() { - return capacity_in_bytes(Metaspace::ClassType) + - capacity_in_bytes(Metaspace::NonClassType); + // Used by MetaspaceCounters + static size_t free_chunks_total(); + static size_t free_chunks_total_in_bytes(); + + static size_t allocated_capacity_words() { + return _allocated_capacity_words; + } + static size_t allocated_capacity_bytes() { + return _allocated_capacity_words * BytesPerWord; + } + + static size_t allocated_used_words() { + return _allocated_used_words; + } + static size_t allocated_used_bytes() { + return _allocated_used_words * BytesPerWord; + } + + static size_t free_bytes(); + + // Total capacity in all Metaspaces + static size_t capacity_bytes_slow() { +#ifdef PRODUCT + // Use allocated_capacity_bytes() in PRODUCT instead of this function. + guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); +#endif + size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); + size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); + assert(allocated_capacity_bytes() == class_capacity + non_class_capacity, + err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT + " class_capacity + non_class_capacity " SIZE_FORMAT + " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, + allocated_capacity_bytes(), class_capacity + non_class_capacity, + class_capacity, non_class_capacity)); + + return class_capacity + non_class_capacity; } // Total space reserved in all Metaspaces @@ -198,6 +261,11 @@ static void print_waste(outputStream* out); static void dump(outputStream* out); static void verify_free_chunks(); + // Checks that the values returned by allocated_capacity_bytes() and + // capacity_bytes_slow() are the same. + static void verify_capacity(); + static void verify_used(); + static void verify_metrics(); }; // Metaspace are deallocated when their class loader are GC'ed. @@ -232,7 +300,6 @@ public: static size_t capacity_until_GC() { return _capacity_until_GC; } - static size_t capacity_until_GC_in_bytes() { return _capacity_until_GC * BytesPerWord; } static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; } static void dec_capacity_until_GC(size_t v) { _capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0; diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/memory/metaspaceCounters.cpp --- a/src/share/vm/memory/metaspaceCounters.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/memory/metaspaceCounters.cpp Mon May 06 10:20:18 2013 -0700 @@ -29,6 +29,16 @@ MetaspaceCounters* MetaspaceCounters::_metaspace_counters = NULL; +size_t MetaspaceCounters::calc_total_capacity() { + // The total capacity is the sum of + // 1) capacity of Metachunks in use by all Metaspaces + // 2) unused space at the end of each Metachunk + // 3) space in the freelist + size_t total_capacity = MetaspaceAux::allocated_capacity_bytes() + + MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes(); + return total_capacity; +} + MetaspaceCounters::MetaspaceCounters() : _capacity(NULL), _used(NULL), @@ -36,8 +46,8 @@ if (UsePerfData) { size_t min_capacity = MetaspaceAux::min_chunk_size(); size_t max_capacity = MetaspaceAux::reserved_in_bytes(); - size_t curr_capacity = MetaspaceAux::capacity_in_bytes(); - size_t used = MetaspaceAux::used_in_bytes(); + size_t curr_capacity = calc_total_capacity(); + size_t used = MetaspaceAux::allocated_used_bytes(); initialize(min_capacity, max_capacity, curr_capacity, used); } @@ -82,15 +92,13 @@ void MetaspaceCounters::update_capacity() { assert(UsePerfData, "Should not be called unless being used"); - assert(_capacity != NULL, "Should be initialized"); - size_t capacity_in_bytes = MetaspaceAux::capacity_in_bytes(); - _capacity->set_value(capacity_in_bytes); + size_t total_capacity = calc_total_capacity(); + _capacity->set_value(total_capacity); } void MetaspaceCounters::update_used() { assert(UsePerfData, "Should not be called unless being used"); - assert(_used != NULL, "Should be initialized"); - size_t used_in_bytes = MetaspaceAux::used_in_bytes(); + size_t used_in_bytes = MetaspaceAux::allocated_used_bytes(); _used->set_value(used_in_bytes); } diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/memory/metaspaceCounters.hpp --- a/src/share/vm/memory/metaspaceCounters.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/memory/metaspaceCounters.hpp Mon May 06 10:20:18 2013 -0700 @@ -37,6 +37,7 @@ size_t max_capacity, size_t curr_capacity, size_t used); + size_t calc_total_capacity(); public: MetaspaceCounters(); ~MetaspaceCounters(); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/memory/metaspaceShared.cpp --- a/src/share/vm/memory/metaspaceShared.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/memory/metaspaceShared.cpp Mon May 06 10:20:18 2013 -0700 @@ -376,18 +376,17 @@ const char* fmt = "%s space: %9d [ %4.1f%% of total] out of %9d bytes [%4.1f%% used] at " PTR_FORMAT; Metaspace* ro_space = _loader_data->ro_metaspace(); Metaspace* rw_space = _loader_data->rw_metaspace(); - const size_t BPW = BytesPerWord; // Allocated size of each space (may not be all occupied) - const size_t ro_alloced = ro_space->capacity_words(Metaspace::NonClassType) * BPW; - const size_t rw_alloced = rw_space->capacity_words(Metaspace::NonClassType) * BPW; + const size_t ro_alloced = ro_space->capacity_bytes_slow(Metaspace::NonClassType); + const size_t rw_alloced = rw_space->capacity_bytes_slow(Metaspace::NonClassType); const size_t md_alloced = md_end-md_low; const size_t mc_alloced = mc_end-mc_low; const size_t total_alloced = ro_alloced + rw_alloced + md_alloced + mc_alloced; // Occupied size of each space. - const size_t ro_bytes = ro_space->used_words(Metaspace::NonClassType) * BPW; - const size_t rw_bytes = rw_space->used_words(Metaspace::NonClassType) * BPW; + const size_t ro_bytes = ro_space->used_bytes_slow(Metaspace::NonClassType); + const size_t rw_bytes = rw_space->used_bytes_slow(Metaspace::NonClassType); const size_t md_bytes = size_t(md_top - md_low); const size_t mc_bytes = size_t(mc_top - mc_low); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/memory/sharedHeap.cpp --- a/src/share/vm/memory/sharedHeap.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/memory/sharedHeap.cpp Mon May 06 10:20:18 2013 -0700 @@ -218,14 +218,13 @@ static AlwaysTrueClosure always_true; void SharedHeap::process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure) { + CodeBlobClosure* code_roots) { // Global (weak) JNI handles JNIHandles::weak_oops_do(&always_true, root_closure); CodeCache::blobs_do(code_roots); - StringTable::oops_do(root_closure); - } + StringTable::oops_do(root_closure); +} void SharedHeap::set_barrier_set(BarrierSet* bs) { _barrier_set = bs; diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/memory/sharedHeap.hpp --- a/src/share/vm/memory/sharedHeap.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/memory/sharedHeap.hpp Mon May 06 10:20:18 2013 -0700 @@ -249,8 +249,7 @@ // JNI weak roots, the code cache, system dictionary, symbol table, // string table. void process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure); + CodeBlobClosure* code_roots); // The functions below are helper functions that a subclass of // "SharedHeap" can use in the implementation of its virtual diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/memory/universe.cpp --- a/src/share/vm/memory/universe.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/memory/universe.cpp Mon May 06 10:20:18 2013 -0700 @@ -1270,7 +1270,7 @@ st->print_cr("}"); } -void Universe::verify(bool silent, VerifyOption option) { +void Universe::verify(VerifyOption option, const char* prefix, bool silent) { // The use of _verify_in_progress is a temporary work around for // 6320749. Don't bother with a creating a class to set and clear // it since it is only used in this method and the control flow is @@ -1287,11 +1287,12 @@ HandleMark hm; // Handles created during verification can be zapped _verify_count++; + if (!silent) gclog_or_tty->print(prefix); if (!silent) gclog_or_tty->print("[Verifying "); if (!silent) gclog_or_tty->print("threads "); Threads::verify(); + if (!silent) gclog_or_tty->print("heap "); heap()->verify(silent, option); - if (!silent) gclog_or_tty->print("syms "); SymbolTable::verify(); if (!silent) gclog_or_tty->print("strs "); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/memory/universe.hpp --- a/src/share/vm/memory/universe.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/memory/universe.hpp Mon May 06 10:20:18 2013 -0700 @@ -445,12 +445,12 @@ // Debugging static bool verify_in_progress() { return _verify_in_progress; } - static void verify(bool silent, VerifyOption option); - static void verify(bool silent) { - verify(silent, VerifyOption_Default /* option */); + static void verify(VerifyOption option, const char* prefix, bool silent = VerifySilently); + static void verify(const char* prefix, bool silent = VerifySilently) { + verify(VerifyOption_Default, prefix, silent); } - static void verify() { - verify(false /* silent */); + static void verify(bool silent = VerifySilently) { + verify("", silent); } static int verify_count() { return _verify_count; } diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/oops/klassVtable.cpp --- a/src/share/vm/oops/klassVtable.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/oops/klassVtable.cpp Mon May 06 10:20:18 2013 -0700 @@ -519,6 +519,9 @@ // check if a method is a miranda method, given a class's methods table and it's super // the caller must make sure that the method belongs to an interface implemented by the class bool klassVtable::is_miranda(Method* m, Array* class_methods, Klass* super) { + if (m->is_static()) { + return false; + } Symbol* name = m->name(); Symbol* signature = m->signature(); if (InstanceKlass::find_method(class_methods, name, signature) == NULL) { diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/oops/method.cpp --- a/src/share/vm/oops/method.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/oops/method.cpp Mon May 06 10:20:18 2013 -0700 @@ -877,7 +877,7 @@ debug_only(No_Safepoint_Verifier nsv;) nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code); if (code == NULL && UseCodeCacheFlushing) { - nmethod *saved_code = CodeCache::find_and_remove_saved_code(this); + nmethod *saved_code = CodeCache::reanimate_saved_code(this); if (saved_code != NULL) { methodHandle method(this); assert( ! saved_code->is_osr_method(), "should not get here for osr" ); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/opto/graphKit.cpp --- a/src/share/vm/opto/graphKit.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/opto/graphKit.cpp Mon May 06 10:20:18 2013 -0700 @@ -3564,7 +3564,8 @@ Node* no_ctrl = NULL; Node* no_base = __ top(); - Node* zero = __ ConI(0); + Node* zero = __ ConI(0); + Node* zeroX = __ ConX(0); float likely = PROB_LIKELY(0.999); float unlikely = PROB_UNLIKELY(0.999); @@ -3590,7 +3591,9 @@ // if (!marking) __ if_then(marking, BoolTest::ne, zero); { - Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw); + BasicType index_bt = TypeX_X->basic_type(); + assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size."); + Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw); if (do_load) { // load original value @@ -3603,22 +3606,16 @@ Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); // is the queue for this thread full? - __ if_then(index, BoolTest::ne, zero, likely); { + __ if_then(index, BoolTest::ne, zeroX, likely); { // decrement the index - Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t))); - Node* next_indexX = next_index; -#ifdef _LP64 - // We could refine the type for what it's worth - // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue); - next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) ); -#endif + Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); // Now get the buffer location we will log the previous value into and store it - Node *log_addr = __ AddP(no_base, buffer, next_indexX); + Node *log_addr = __ AddP(no_base, buffer, next_index); __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw); // update the index - __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw); + __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw); } __ else_(); { @@ -3645,26 +3642,21 @@ Node* buffer, const TypeFunc* tf) { - Node* zero = __ ConI(0); + Node* zero = __ ConI(0); + Node* zeroX = __ ConX(0); Node* no_base = __ top(); BasicType card_bt = T_BYTE; // Smash zero into card. MUST BE ORDERED WRT TO STORE __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw); // Now do the queue work - __ if_then(index, BoolTest::ne, zero); { - - Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t))); - Node* next_indexX = next_index; -#ifdef _LP64 - // We could refine the type for what it's worth - // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue); - next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) ); -#endif // _LP64 - Node* log_addr = __ AddP(no_base, buffer, next_indexX); + __ if_then(index, BoolTest::ne, zeroX); { + + Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); + Node* log_addr = __ AddP(no_base, buffer, next_index); __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw); - __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw); + __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw); } __ else_(); { __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread()); @@ -3725,7 +3717,7 @@ // Now some values // Use ctrl to avoid hoisting these values past a safepoint, which could // potentially reset these fields in the JavaThread. - Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw); + Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw); Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); // Convert the store obj pointer to an int prior to doing math on it diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/opto/output.cpp --- a/src/share/vm/opto/output.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/opto/output.cpp Mon May 06 10:20:18 2013 -0700 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "asm/assembler.inline.hpp" +#include "code/compiledIC.hpp" #include "code/debugInfo.hpp" #include "code/debugInfoRec.hpp" #include "compiler/compileBroker.hpp" @@ -41,8 +42,6 @@ #include "runtime/handles.inline.hpp" #include "utilities/xmlstream.hpp" -extern uint size_java_to_interp(); -extern uint reloc_java_to_interp(); extern uint size_exception_handler(); extern uint size_deopt_handler(); @@ -389,15 +388,15 @@ MachNode *mach = nj->as_Mach(); blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding reloc_size += mach->reloc(); - if( mach->is_MachCall() ) { + if (mach->is_MachCall()) { MachCallNode *mcall = mach->as_MachCall(); // This destination address is NOT PC-relative mcall->method_set((intptr_t)mcall->entry_point()); - if( mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method ) { - stub_size += size_java_to_interp(); - reloc_size += reloc_java_to_interp(); + if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) { + stub_size += CompiledStaticCall::to_interp_stub_size(); + reloc_size += CompiledStaticCall::reloc_to_interp_stub(); } } else if (mach->is_MachSafePoint()) { // If call/safepoint are adjacent, account for possible diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/prims/jvmtiClassFileReconstituter.cpp --- a/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Mon May 06 10:20:18 2013 -0700 @@ -513,6 +513,11 @@ AnnotationArray* param_anno = method->parameter_annotations(); AnnotationArray* default_anno = method->annotation_default(); + // skip generated default interface methods + if (method->is_overpass()) { + return; + } + write_u2(access_flags.get_flags() & JVM_RECOGNIZED_METHOD_MODIFIERS); write_u2(const_method->name_index()); write_u2(const_method->signature_index()); @@ -619,8 +624,19 @@ HandleMark hm(thread()); Array* methods = ikh()->methods(); int num_methods = methods->length(); + int num_overpass = 0; - write_u2(num_methods); + // count the generated default interface methods + // these will not be re-created by write_method_info + // and should not be included in the total count + for (int index = 0; index < num_methods; index++) { + Method* method = methods->at(index); + if (method->is_overpass()) { + num_overpass++; + } + } + + write_u2(num_methods - num_overpass); if (JvmtiExport::can_maintain_original_method_order()) { int index; int original_index; diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/prims/perf.cpp --- a/src/share/vm/prims/perf.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/prims/perf.cpp Mon May 06 10:20:18 2013 -0700 @@ -142,20 +142,20 @@ } switch(variability) { - case 1: /* V_Constant */ + case PerfData::V_Constant: pl = PerfDataManager::create_long_constant(NULL_NS, (char *)name_utf, (PerfData::Units)units, value, CHECK_NULL); break; - case 2: /* V_Variable */ - pl = PerfDataManager::create_long_variable(NULL_NS, (char *)name_utf, + case PerfData::V_Monotonic: + pl = PerfDataManager::create_long_counter(NULL_NS, (char *)name_utf, (PerfData::Units)units, value, CHECK_NULL); break; - case 3: /* V_Monotonic Counter */ - pl = PerfDataManager::create_long_counter(NULL_NS, (char *)name_utf, + case PerfData::V_Variable: + pl = PerfDataManager::create_long_variable(NULL_NS, (char *)name_utf, (PerfData::Units)units, value, CHECK_NULL); break; diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/prims/whitebox.cpp --- a/src/share/vm/prims/whitebox.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/prims/whitebox.cpp Mon May 06 10:20:18 2013 -0700 @@ -439,9 +439,29 @@ instanceKlassHandle ikh = instanceKlassHandle(JNIHandles::resolve(wbclass)->klass()); Handle loader(ikh->class_loader()); if (loader.is_null()) { + ResourceMark rm; ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI - jint result = env->RegisterNatives(wbclass, methods, sizeof(methods)/sizeof(methods[0])); - if (result == 0) { + bool result = true; + // one by one registration natives for exception catching + jclass exceptionKlass = env->FindClass(vmSymbols::java_lang_NoSuchMethodError()->as_C_string()); + for (int i = 0, n = sizeof(methods) / sizeof(methods[0]); i < n; ++i) { + if (env->RegisterNatives(wbclass, methods + i, 1) != 0) { + result = false; + if (env->ExceptionCheck() && env->IsInstanceOf(env->ExceptionOccurred(), exceptionKlass)) { + // j.l.NoSuchMethodError is thrown when a method can't be found or a method is not native + // ignoring the exception + tty->print_cr("Warning: 'NoSuchMethodError' on register of sun.hotspot.WhiteBox::%s%s", methods[i].name, methods[i].signature); + env->ExceptionClear(); + } else { + // register is failed w/o exception or w/ unexpected exception + tty->print_cr("Warning: unexpected error on register of sun.hotspot.WhiteBox::%s%s. All methods will be unregistered", methods[i].name, methods[i].signature); + env->UnregisterNatives(wbclass); + break; + } + } + } + + if (result) { WhiteBox::set_used(); } } diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/runtime/arguments.cpp Mon May 06 10:20:18 2013 -0700 @@ -2224,6 +2224,55 @@ return JNI_OK; } +// Checks if name in command-line argument -agent{lib,path}:name[=options] +// represents a valid HPROF of JDWP agent. is_path==true denotes that we +// are dealing with -agentpath (case where name is a path), otherwise with +// -agentlib +bool valid_hprof_or_jdwp_agent(char *name, bool is_path) { + char *_name; + const char *_hprof = "hprof", *_jdwp = "jdwp"; + size_t _len_hprof, _len_jdwp, _len_prefix; + + if (is_path) { + if ((_name = strrchr(name, (int) *os::file_separator())) == NULL) { + return false; + } + + _name++; // skip past last path separator + _len_prefix = strlen(JNI_LIB_PREFIX); + + if (strncmp(_name, JNI_LIB_PREFIX, _len_prefix) != 0) { + return false; + } + + _name += _len_prefix; + _len_hprof = strlen(_hprof); + _len_jdwp = strlen(_jdwp); + + if (strncmp(_name, _hprof, _len_hprof) == 0) { + _name += _len_hprof; + } + else if (strncmp(_name, _jdwp, _len_jdwp) == 0) { + _name += _len_jdwp; + } + else { + return false; + } + + if (strcmp(_name, JNI_LIB_SUFFIX) != 0) { + return false; + } + + return true; + } + + if (strcmp(name, _hprof) == 0 || strcmp(name, _jdwp) == 0) { + return true; + } + + return false; +} + jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, SysClassPath* scp_p, bool* scp_assembly_required_p, @@ -2322,7 +2371,7 @@ options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(pos + 1) + 1, mtInternal), pos + 1); } #if !INCLUDE_JVMTI - if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) { + if (valid_hprof_or_jdwp_agent(name, is_absolute_path)) { jio_fprintf(defaultStream::error_stream(), "Profiling and debugging agents are not supported in this VM\n"); return JNI_ERR; diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/runtime/compilationPolicy.cpp --- a/src/share/vm/runtime/compilationPolicy.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/runtime/compilationPolicy.cpp Mon May 06 10:20:18 2013 -0700 @@ -109,6 +109,9 @@ // Returns true if m is allowed to be compiled bool CompilationPolicy::can_be_compiled(methodHandle m, int comp_level) { + // allow any levels for WhiteBox + assert(WhiteBoxAPI || comp_level == CompLevel_all || is_compile(comp_level), "illegal compilation level"); + if (m->is_abstract()) return false; if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false; @@ -122,7 +125,13 @@ return false; } if (comp_level == CompLevel_all) { - return !m->is_not_compilable(CompLevel_simple) && !m->is_not_compilable(CompLevel_full_optimization); + if (TieredCompilation) { + // enough to be compilable at any level for tiered + return !m->is_not_compilable(CompLevel_simple) || !m->is_not_compilable(CompLevel_full_optimization); + } else { + // must be compilable at available level for non-tiered + return !m->is_not_compilable(CompLevel_highest_tier); + } } else if (is_compile(comp_level)) { return !m->is_not_compilable(comp_level); } @@ -436,7 +445,7 @@ reset_counter_for_invocation_event(m); const char* comment = "count"; - if (is_compilation_enabled() && can_be_compiled(m)) { + if (is_compilation_enabled() && can_be_compiled(m, comp_level)) { nmethod* nm = m->code(); if (nm == NULL ) { CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, comment, thread); @@ -449,7 +458,7 @@ const int hot_count = m->backedge_count(); const char* comment = "backedge_count"; - if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m)) { + if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m, comp_level)) { CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread); NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));) } @@ -467,7 +476,7 @@ reset_counter_for_invocation_event(m); const char* comment = "count"; - if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m)) { + if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m, comp_level)) { ResourceMark rm(thread); frame fr = thread->last_frame(); assert(fr.is_interpreted_frame(), "must be interpreted"); @@ -505,7 +514,7 @@ const int hot_count = m->backedge_count(); const char* comment = "backedge_count"; - if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m)) { + if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m, comp_level)) { CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread); NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));) } @@ -600,7 +609,7 @@ // If the caller method is too big or something then we do not want to // compile it just to inline a method - if (!can_be_compiled(next_m)) { + if (!can_be_compiled(next_m, CompLevel_any)) { msg = "caller cannot be compiled"; break; } diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/runtime/globals.hpp --- a/src/share/vm/runtime/globals.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/runtime/globals.hpp Mon May 06 10:20:18 2013 -0700 @@ -2123,6 +2123,9 @@ product(intx, PrefetchFieldsAhead, -1, \ "How many fields ahead to prefetch in oop scan (<= 0 means off)") \ \ + diagnostic(bool, VerifySilently, false, \ + "Don't print print the verification progress") \ + \ diagnostic(bool, VerifyDuringStartup, false, \ "Verify memory system before executing any Java code " \ "during VM initialization") \ @@ -3179,6 +3182,9 @@ product(uintx, CodeCacheFlushingMinimumFreeSpace, 1500*K, \ "When less than X space left, start code cache cleaning") \ \ + product(uintx, CodeCacheFlushingFraction, 2, \ + "Fraction of the code cache that is flushed when full") \ + \ /* interpreter debugging */ \ develop(intx, BinarySwitchThreshold, 5, \ "Minimal number of lookupswitch entries for rewriting to binary " \ @@ -3223,8 +3229,9 @@ develop(bool, ReplayCompiles, false, \ "Enable replay of compilations from ReplayDataFile") \ \ - develop(ccstr, ReplayDataFile, "replay.txt", \ - "file containing compilation replay information") \ + product(ccstr, ReplayDataFile, NULL, \ + "File containing compilation replay information" \ + "[default: ./replay_pid%p.log] (%p replaced with pid)") \ \ develop(intx, ReplaySuppressInitializers, 2, \ "Controls handling of class initialization during replay" \ @@ -3237,8 +3244,8 @@ develop(bool, ReplayIgnoreInitErrors, false, \ "Ignore exceptions thrown during initialization for replay") \ \ - develop(bool, DumpReplayDataOnError, true, \ - "record replay data for crashing compiler threads") \ + product(bool, DumpReplayDataOnError, true, \ + "Record replay data for crashing compiler threads") \ \ product(bool, CICompilerCountPerCPU, false, \ "1 compiler thread for log(N CPUs)") \ @@ -3247,7 +3254,9 @@ "Fire OutOfMemoryErrors throughout CI for testing the compiler " \ "(non-negative value throws OOM after this many CI accesses " \ "in each compile)") \ - \ + notproduct(intx, CICrashAt, -1, \ + "id of compilation to trigger assert in compiler thread for " \ + "the purpose of testing, e.g. generation of replay data") \ notproduct(bool, CIObjectFactoryVerify, false, \ "enable potentially expensive verification in ciObjectFactory") \ \ diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/runtime/os.hpp --- a/src/share/vm/runtime/os.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/runtime/os.hpp Mon May 06 10:20:18 2013 -0700 @@ -454,6 +454,7 @@ // File i/o operations static const int default_file_open_flags(); static int open(const char *path, int oflag, int mode); + static FILE* open(int fd, const char* mode); static int close(int fd); static jlong lseek(int fd, jlong offset, int whence); static char* native_path(char *path); @@ -477,7 +478,7 @@ static const char* dll_file_extension(); static const char* get_temp_directory(); - static const char* get_current_directory(char *buf, int buflen); + static const char* get_current_directory(char *buf, size_t buflen); // Builds a platform-specific full library path given a ld path and lib name // Returns true if buffer contains full path to existing file, false otherwise diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/runtime/serviceThread.cpp --- a/src/share/vm/runtime/serviceThread.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/runtime/serviceThread.cpp Mon May 06 10:20:18 2013 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,8 @@ #include "runtime/mutexLocker.hpp" #include "prims/jvmtiImpl.hpp" #include "services/gcNotifier.hpp" +#include "services/diagnosticArgument.hpp" +#include "services/diagnosticFramework.hpp" ServiceThread* ServiceThread::_instance = NULL; @@ -83,6 +85,7 @@ bool sensors_changed = false; bool has_jvmti_events = false; bool has_gc_notification_event = false; + bool has_dcmd_notification_event = false; JvmtiDeferredEvent jvmti_event; { // Need state transition ThreadBlockInVM so that this thread @@ -98,7 +101,8 @@ MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); while (!(sensors_changed = LowMemoryDetector::has_pending_requests()) && !(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) && - !(has_gc_notification_event = GCNotifier::has_event())) { + !(has_gc_notification_event = GCNotifier::has_event()) && + !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification())) { // wait until one of the sensors has pending requests, or there is a // pending JVMTI event or JMX GC notification to post Service_lock->wait(Mutex::_no_safepoint_check_flag); @@ -120,6 +124,10 @@ if(has_gc_notification_event) { GCNotifier::sendNotification(CHECK); } + + if(has_dcmd_notification_event) { + DCmdFactory::send_notification(CHECK); + } } } diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/runtime/sharedRuntime.cpp --- a/src/share/vm/runtime/sharedRuntime.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/runtime/sharedRuntime.cpp Mon May 06 10:20:18 2013 -0700 @@ -1316,12 +1316,6 @@ assert(stub_frame.is_runtime_frame(), "sanity check"); frame caller_frame = stub_frame.sender(®_map); - // MethodHandle invokes don't have a CompiledIC and should always - // simply redispatch to the callee_target. - address sender_pc = caller_frame.pc(); - CodeBlob* sender_cb = caller_frame.cb(); - nmethod* sender_nm = sender_cb->as_nmethod_or_null(); - if (caller_frame.is_interpreted_frame() || caller_frame.is_entry_frame()) { Method* callee = thread->callee_target(); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/runtime/simpleThresholdPolicy.cpp --- a/src/share/vm/runtime/simpleThresholdPolicy.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/runtime/simpleThresholdPolicy.cpp Mon May 06 10:20:18 2013 -0700 @@ -154,9 +154,10 @@ // Set carry flags on the counters if necessary void SimpleThresholdPolicy::handle_counter_overflow(Method* method) { MethodCounters *mcs = method->method_counters(); - assert(mcs != NULL, ""); - set_carry_if_necessary(mcs->invocation_counter()); - set_carry_if_necessary(mcs->backedge_counter()); + if (mcs != NULL) { + set_carry_if_necessary(mcs->invocation_counter()); + set_carry_if_necessary(mcs->backedge_counter()); + } MethodData* mdo = method->method_data(); if (mdo != NULL) { set_carry_if_necessary(mdo->invocation_counter()); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/runtime/sweeper.cpp --- a/src/share/vm/runtime/sweeper.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/runtime/sweeper.cpp Mon May 06 10:20:18 2013 -0700 @@ -136,13 +136,12 @@ jint NMethodSweeper::_locked_seen = 0; jint NMethodSweeper::_not_entrant_seen_on_stack = 0; -bool NMethodSweeper::_rescan = false; -bool NMethodSweeper::_do_sweep = false; -bool NMethodSweeper::_was_full = false; -jint NMethodSweeper::_advise_to_sweep = 0; -jlong NMethodSweeper::_last_was_full = 0; -uint NMethodSweeper::_highest_marked = 0; -long NMethodSweeper::_was_full_traversal = 0; +bool NMethodSweeper::_resweep = false; +jint NMethodSweeper::_flush_token = 0; +jlong NMethodSweeper::_last_full_flush_time = 0; +int NMethodSweeper::_highest_marked = 0; +int NMethodSweeper::_dead_compile_ids = 0; +long NMethodSweeper::_last_flush_traversal_id = 0; class MarkActivationClosure: public CodeBlobClosure { public: @@ -155,20 +154,16 @@ }; static MarkActivationClosure mark_activation_closure; +bool NMethodSweeper::sweep_in_progress() { + return (_current != NULL); +} + void NMethodSweeper::scan_stacks() { assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); if (!MethodFlushing) return; - _do_sweep = true; // No need to synchronize access, since this is always executed at a - // safepoint. If we aren't in the middle of scan and a rescan - // hasn't been requested then just return. If UseCodeCacheFlushing is on and - // code cache flushing is in progress, don't skip sweeping to help make progress - // clearing space in the code cache. - if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) { - _do_sweep = false; - return; - } + // safepoint. // Make sure CompiledIC_lock in unlocked, since we might update some // inline caches. If it is, we just bail-out and try later. @@ -176,7 +171,7 @@ // Check for restart assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); - if (_current == NULL) { + if (!sweep_in_progress() && _resweep) { _seen = 0; _invocations = NmethodSweepFraction; _current = CodeCache::first_nmethod(); @@ -187,39 +182,30 @@ Threads::nmethods_do(&mark_activation_closure); // reset the flags since we started a scan from the beginning. - _rescan = false; + _resweep = false; _locked_seen = 0; _not_entrant_seen_on_stack = 0; } if (UseCodeCacheFlushing) { - if (!CodeCache::needs_flushing()) { - // scan_stacks() runs during a safepoint, no race with setters - _advise_to_sweep = 0; + // only allow new flushes after the interval is complete. + jlong now = os::javaTimeMillis(); + jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; + jlong curr_interval = now - _last_full_flush_time; + if (curr_interval > max_interval) { + _flush_token = 0; } - if (was_full()) { - // There was some progress so attempt to restart the compiler - jlong now = os::javaTimeMillis(); - jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; - jlong curr_interval = now - _last_was_full; - if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) { - CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); - set_was_full(false); - - // Update the _last_was_full time so we can tell how fast the - // code cache is filling up - _last_was_full = os::javaTimeMillis(); - - log_sweep("restart_compiler"); - } + if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) { + CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); + log_sweep("restart_compiler"); } } } void NMethodSweeper::possibly_sweep() { assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); - if ((!MethodFlushing) || (!_do_sweep)) return; + if (!MethodFlushing || !sweep_in_progress()) return; if (_invocations > 0) { // Only one thread at a time will sweep @@ -253,6 +239,14 @@ tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations); } + if (!CompileBroker::should_compile_new_jobs()) { + // If we have turned off compilations we might as well do full sweeps + // in order to reach the clean state faster. Otherwise the sleeping compiler + // threads will slow down sweeping. After a few iterations the cache + // will be clean and sweeping stops (_resweep will not be set) + _invocations = 1; + } + // We want to visit all nmethods after NmethodSweepFraction // invocations so divide the remaining number of nmethods by the // remaining number of invocations. This is only an estimate since @@ -296,7 +290,7 @@ assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); - if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) { + if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) { // we've completed a scan without making progress but there were // nmethods we were unable to process either because they were // locked or were still on stack. We don't have to aggresively @@ -318,6 +312,13 @@ if (_invocations == 1) { log_sweep("finished"); } + + // Sweeper is the only case where memory is released, + // check here if it is time to restart the compiler. + if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) { + CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); + log_sweep("restart_compiler"); + } } class NMethodMarker: public StackObj { @@ -392,7 +393,7 @@ tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); } nm->mark_for_reclamation(); - _rescan = true; + _resweep = true; SWEEP(nm); } } else if (nm->is_not_entrant()) { @@ -403,7 +404,7 @@ tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); } nm->make_zombie(); - _rescan = true; + _resweep = true; SWEEP(nm); } else { // Still alive, clean up its inline caches @@ -425,16 +426,15 @@ release_nmethod(nm); } else { nm->make_zombie(); - _rescan = true; + _resweep = true; SWEEP(nm); } } else { assert(nm->is_alive(), "should be alive"); if (UseCodeCacheFlushing) { - if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) && - (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) && - CodeCache::needs_flushing()) { + if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() && + (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) { // This method has not been called since the forced cleanup happened nm->make_not_entrant(); } @@ -457,41 +457,27 @@ // _code field is restored and the Method*/nmethod // go back to their normal state. void NMethodSweeper::handle_full_code_cache(bool is_full) { - // Only the first one to notice can advise us to start early cleaning - if (!is_full){ - jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 ); - if (old != 0) { - return; - } - } if (is_full) { // Since code cache is full, immediately stop new compiles - bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); - if (!did_set) { - // only the first to notice can start the cleaning, - // others will go back and block - return; + if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) { + log_sweep("disable_compiler"); } - set_was_full(true); + } - // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up - jlong now = os::javaTimeMillis(); - jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; - jlong curr_interval = now - _last_was_full; - if (curr_interval < max_interval) { - _rescan = true; - log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'", - curr_interval/1000); - return; - } + // Make sure only one thread can flush + // The token is reset after CodeCacheMinimumFlushInterval in scan stacks, + // no need to check the timeout here. + jint old = Atomic::cmpxchg( 1, &_flush_token, 0 ); + if (old != 0) { + return; } VM_HandleFullCodeCache op(is_full); VMThread::execute(&op); - // rescan again as soon as possible - _rescan = true; + // resweep again as soon as possible + _resweep = true; } void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) { @@ -500,62 +486,64 @@ debug_only(jlong start = os::javaTimeMillis();) - if ((!was_full()) && (is_full)) { - if (!CodeCache::needs_flushing()) { - log_sweep("restart_compiler"); - CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); - return; - } - } + // Traverse the code cache trying to dump the oldest nmethods + int curr_max_comp_id = CompileBroker::get_compilation_id(); + int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids; - // Traverse the code cache trying to dump the oldest nmethods - uint curr_max_comp_id = CompileBroker::get_compilation_id(); - uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked; log_sweep("start_cleaning"); nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); jint disconnected = 0; jint made_not_entrant = 0; + jint nmethod_count = 0; + while ((nm != NULL)){ - uint curr_comp_id = nm->compile_id(); + int curr_comp_id = nm->compile_id(); // OSR methods cannot be flushed like this. Also, don't flush native methods // since they are part of the JDK in most cases - if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) && - (!nm->is_native_method()) && ((curr_comp_id < flush_target))) { + if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) { + + // only count methods that can be speculatively disconnected + nmethod_count++; - if ((nm->method()->code() == nm)) { - // This method has not been previously considered for - // unloading or it was restored already - CodeCache::speculatively_disconnect(nm); - disconnected++; - } else if (nm->is_speculatively_disconnected()) { - // This method was previously considered for preemptive unloading and was not called since then - CompilationPolicy::policy()->delay_compilation(nm->method()); - nm->make_not_entrant(); - made_not_entrant++; - } + if (nm->is_in_use() && (curr_comp_id < flush_target)) { + if ((nm->method()->code() == nm)) { + // This method has not been previously considered for + // unloading or it was restored already + CodeCache::speculatively_disconnect(nm); + disconnected++; + } else if (nm->is_speculatively_disconnected()) { + // This method was previously considered for preemptive unloading and was not called since then + CompilationPolicy::policy()->delay_compilation(nm->method()); + nm->make_not_entrant(); + made_not_entrant++; + } - if (curr_comp_id > _highest_marked) { - _highest_marked = curr_comp_id; + if (curr_comp_id > _highest_marked) { + _highest_marked = curr_comp_id; + } } } nm = CodeCache::alive_nmethod(CodeCache::next(nm)); } + // remember how many compile_ids wheren't seen last flush. + _dead_compile_ids = curr_max_comp_id - nmethod_count; + log_sweep("stop_cleaning", "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'", disconnected, made_not_entrant); // Shut off compiler. Sweeper will start over with a new stack scan and // traversal cycle and turn it back on if it clears enough space. - if (was_full()) { - _last_was_full = os::javaTimeMillis(); - CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); + if (is_full) { + _last_full_flush_time = os::javaTimeMillis(); } // After two more traversals the sweeper will get rid of unrestored nmethods - _was_full_traversal = _traversals; + _last_flush_traversal_id = _traversals; + _resweep = true; #ifdef ASSERT jlong end = os::javaTimeMillis(); if(PrintMethodFlushing && Verbose) { diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/runtime/sweeper.hpp --- a/src/share/vm/runtime/sweeper.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/runtime/sweeper.hpp Mon May 06 10:20:18 2013 -0700 @@ -35,26 +35,29 @@ static nmethod* _current; // Current nmethod static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache - static volatile int _invocations; // No. of invocations left until we are completed with this pass - static volatile int _sweep_started; // Flag to control conc sweeper + static volatile int _invocations; // No. of invocations left until we are completed with this pass + static volatile int _sweep_started; // Flag to control conc sweeper - static bool _rescan; // Indicates that we should do a full rescan of the - // of the code cache looking for work to do. - static bool _do_sweep; // Flag to skip the conc sweep if no stack scan happened - static int _locked_seen; // Number of locked nmethods encountered during the scan + //The following are reset in scan_stacks and synchronized by the safepoint + static bool _resweep; // Indicates that a change has happend and we want another sweep, + // always checked and reset at a safepoint so memory will be in sync. + static int _locked_seen; // Number of locked nmethods encountered during the scan static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack + static jint _flush_token; // token that guards method flushing, making sure it is executed only once. - static bool _was_full; // remember if we did emergency unloading - static jint _advise_to_sweep; // flag to indicate code cache getting full - static jlong _last_was_full; // timestamp of last emergency unloading - static uint _highest_marked; // highest compile id dumped at last emergency unloading - static long _was_full_traversal; // trav number at last emergency unloading + // These are set during a flush, a VM-operation + static long _last_flush_traversal_id; // trav number at last flush unloading + static jlong _last_full_flush_time; // timestamp of last emergency unloading + + // These are synchronized by the _sweep_started token + static int _highest_marked; // highest compile id dumped at last emergency unloading + static int _dead_compile_ids; // number of compile ids that where not in the cache last flush static void process_nmethod(nmethod *nm); - static void release_nmethod(nmethod* nm); static void log_sweep(const char* msg, const char* format = NULL, ...); + static bool sweep_in_progress(); public: static long traversal_count() { return _traversals; } @@ -71,17 +74,14 @@ static void possibly_sweep(); // Compiler threads call this to sweep static void notify(nmethod* nm) { - // Perform a full scan of the code cache from the beginning. No + // Request a new sweep of the code cache from the beginning. No // need to synchronize the setting of this flag since it only // changes to false at safepoint so we can never overwrite it with false. - _rescan = true; + _resweep = true; } static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate static void speculative_disconnect_nmethods(bool was_full); // Called by vm op to deal with alloc failure - - static void set_was_full(bool state) { _was_full = state; } - static bool was_full() { return _was_full; } }; #endif // SHARE_VM_RUNTIME_SWEEPER_HPP diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/runtime/thread.cpp --- a/src/share/vm/runtime/thread.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/runtime/thread.cpp Mon May 06 10:20:18 2013 -0700 @@ -3447,7 +3447,8 @@ assert (Universe::is_fully_initialized(), "not initialized"); if (VerifyDuringStartup) { - VM_Verify verify_op(false /* silent */); // make sure we're starting with a clean slate + // Make sure we're starting with a clean slate. + VM_Verify verify_op; VMThread::execute(&verify_op); } diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/runtime/virtualspace.cpp --- a/src/share/vm/runtime/virtualspace.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/runtime/virtualspace.cpp Mon May 06 10:20:18 2013 -0700 @@ -60,72 +60,6 @@ initialize(size, alignment, large, NULL, 0, executable); } -char * -ReservedSpace::align_reserved_region(char* addr, const size_t len, - const size_t prefix_size, - const size_t prefix_align, - const size_t suffix_size, - const size_t suffix_align) -{ - assert(addr != NULL, "sanity"); - const size_t required_size = prefix_size + suffix_size; - assert(len >= required_size, "len too small"); - - const size_t s = size_t(addr); - const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1); - const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs; - - if (len < beg_delta + required_size) { - return NULL; // Cannot do proper alignment. - } - const size_t end_delta = len - (beg_delta + required_size); - - if (beg_delta != 0) { - os::release_memory(addr, beg_delta); - } - - if (end_delta != 0) { - char* release_addr = (char*) (s + beg_delta + required_size); - os::release_memory(release_addr, end_delta); - } - - return (char*) (s + beg_delta); -} - -char* ReservedSpace::reserve_and_align(const size_t reserve_size, - const size_t prefix_size, - const size_t prefix_align, - const size_t suffix_size, - const size_t suffix_align) -{ - assert(reserve_size > prefix_size + suffix_size, "should not be here"); - - char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align); - if (raw_addr == NULL) return NULL; - - char* result = align_reserved_region(raw_addr, reserve_size, prefix_size, - prefix_align, suffix_size, - suffix_align); - if (result == NULL && !os::release_memory(raw_addr, reserve_size)) { - fatal("os::release_memory failed"); - } - -#ifdef ASSERT - if (result != NULL) { - const size_t raw = size_t(raw_addr); - const size_t res = size_t(result); - assert(res >= raw, "alignment decreased start addr"); - assert(res + prefix_size + suffix_size <= raw + reserve_size, - "alignment increased end addr"); - assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix"); - assert(((res + prefix_size) & (suffix_align - 1)) == 0, - "bad alignment of suffix"); - } -#endif - - return result; -} - // Helper method. static bool failed_to_reserve_as_requested(char* base, char* requested_address, const size_t size, bool special) @@ -155,92 +89,6 @@ return true; } -ReservedSpace::ReservedSpace(const size_t suffix_size, - const size_t suffix_align, - char* requested_address, - const size_t noaccess_prefix) -{ - assert(suffix_size != 0, "sanity"); - assert(suffix_align != 0, "sanity"); - assert((suffix_size & (suffix_align - 1)) == 0, - "suffix_size not divisible by suffix_align"); - - // Assert that if noaccess_prefix is used, it is the same as prefix_align. - // Add in noaccess_prefix to prefix - const size_t adjusted_prefix_size = noaccess_prefix; - const size_t size = adjusted_prefix_size + suffix_size; - - // On systems where the entire region has to be reserved and committed up - // front, the compound alignment normally done by this method is unnecessary. - const bool try_reserve_special = UseLargePages && - suffix_align == os::large_page_size(); - if (!os::can_commit_large_page_memory() && try_reserve_special) { - initialize(size, suffix_align, true, requested_address, noaccess_prefix, - false); - return; - } - - _base = NULL; - _size = 0; - _alignment = 0; - _special = false; - _noaccess_prefix = 0; - _executable = false; - - // Optimistically try to reserve the exact size needed. - char* addr; - if (requested_address != 0) { - requested_address -= noaccess_prefix; // adjust address - assert(requested_address != NULL, "huge noaccess prefix?"); - addr = os::attempt_reserve_memory_at(size, requested_address); - if (failed_to_reserve_as_requested(addr, requested_address, size, false)) { - // OS ignored requested address. Try different address. - addr = NULL; - } - } else { - addr = os::reserve_memory(size, NULL, suffix_align); - } - if (addr == NULL) return; - - // Check whether the result has the needed alignment - const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1); - if (ofs != 0) { - // Wrong alignment. Release, allocate more space and do manual alignment. - // - // On most operating systems, another allocation with a somewhat larger size - // will return an address "close to" that of the previous allocation. The - // result is often the same address (if the kernel hands out virtual - // addresses from low to high), or an address that is offset by the increase - // in size. Exploit that to minimize the amount of extra space requested. - if (!os::release_memory(addr, size)) { - fatal("os::release_memory failed"); - } - - const size_t extra = MAX2(ofs, suffix_align - ofs); - addr = reserve_and_align(size + extra, adjusted_prefix_size, suffix_align, - suffix_size, suffix_align); - if (addr == NULL) { - // Try an even larger region. If this fails, address space is exhausted. - addr = reserve_and_align(size + suffix_align, adjusted_prefix_size, - suffix_align, suffix_size, suffix_align); - } - - if (requested_address != 0 && - failed_to_reserve_as_requested(addr, requested_address, size, false)) { - // As a result of the alignment constraints, the allocated addr differs - // from the requested address. Return back to the caller who can - // take remedial action (like try again without a requested address). - assert(_base == NULL, "should be"); - return; - } - } - - _base = addr; - _size = size; - _alignment = suffix_align; - _noaccess_prefix = noaccess_prefix; -} - void ReservedSpace::initialize(size_t size, size_t alignment, bool large, char* requested_address, const size_t noaccess_prefix, @@ -476,20 +324,6 @@ protect_noaccess_prefix(size); } -ReservedHeapSpace::ReservedHeapSpace(const size_t heap_space_size, - const size_t alignment, - char* requested_address) : - ReservedSpace(heap_space_size, alignment, - requested_address, - (UseCompressedOops && (Universe::narrow_oop_base() != NULL) && - Universe::narrow_oop_use_implicit_null_checks()) ? - lcm(os::vm_page_size(), alignment) : 0) { - if (base() > 0) { - MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap); - } - protect_noaccess_prefix(heap_space_size); -} - // Reserve space for code segment. Same as Java heap only we mark this as // executable. ReservedCodeSpace::ReservedCodeSpace(size_t r_size, diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/runtime/virtualspace.hpp --- a/src/share/vm/runtime/virtualspace.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/runtime/virtualspace.hpp Mon May 06 10:20:18 2013 -0700 @@ -47,28 +47,6 @@ const size_t noaccess_prefix, bool executable); - // Release parts of an already-reserved memory region [addr, addr + len) to - // get a new region that has "compound alignment." Return the start of the - // resulting region, or NULL on failure. - // - // The region is logically divided into a prefix and a suffix. The prefix - // starts at the result address, which is aligned to prefix_align. The suffix - // starts at result address + prefix_size, which is aligned to suffix_align. - // The total size of the result region is size prefix_size + suffix_size. - char* align_reserved_region(char* addr, const size_t len, - const size_t prefix_size, - const size_t prefix_align, - const size_t suffix_size, - const size_t suffix_align); - - // Reserve memory, call align_reserved_region() to alignment it and return the - // result. - char* reserve_and_align(const size_t reserve_size, - const size_t prefix_size, - const size_t prefix_align, - const size_t suffix_size, - const size_t suffix_align); - protected: // Create protection page at the beginning of the space. void protect_noaccess_prefix(const size_t size); @@ -79,9 +57,6 @@ ReservedSpace(size_t size, size_t alignment, bool large, char* requested_address = NULL, const size_t noaccess_prefix = 0); - ReservedSpace(const size_t suffix_size, const size_t suffix_align, - char* requested_address, - const size_t noaccess_prefix = 0); ReservedSpace(size_t size, size_t alignment, bool large, bool executable); // Accessors @@ -128,8 +103,6 @@ // Constructor ReservedHeapSpace(size_t size, size_t forced_base_alignment, bool large, char* requested_address); - ReservedHeapSpace(const size_t prefix_size, const size_t prefix_align, - char* requested_address); }; // Class encapsulating behavior specific memory space for Code diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/runtime/vmStructs.cpp --- a/src/share/vm/runtime/vmStructs.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/runtime/vmStructs.cpp Mon May 06 10:20:18 2013 -0700 @@ -828,6 +828,7 @@ nonstatic_field(nmethod, _lock_count, jint) \ nonstatic_field(nmethod, _stack_traversal_mark, long) \ nonstatic_field(nmethod, _compile_id, int) \ + nonstatic_field(nmethod, _comp_level, int) \ nonstatic_field(nmethod, _exception_cache, ExceptionCache*) \ nonstatic_field(nmethod, _marked_for_deoptimization, bool) \ \ diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/runtime/vmThread.cpp --- a/src/share/vm/runtime/vmThread.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/runtime/vmThread.cpp Mon May 06 10:20:18 2013 -0700 @@ -293,7 +293,7 @@ os::check_heap(); // Silent verification so as not to pollute normal output, // unless we really asked for it. - Universe::verify(!(PrintGCDetails || Verbose)); + Universe::verify(!(PrintGCDetails || Verbose) || VerifySilently); } CompileBroker::set_should_block(); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/runtime/vm_operations.hpp --- a/src/share/vm/runtime/vm_operations.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/runtime/vm_operations.hpp Mon May 06 10:20:18 2013 -0700 @@ -302,7 +302,7 @@ private: bool _silent; public: - VM_Verify(bool silent) : _silent(silent) {} + VM_Verify(bool silent = VerifySilently) : _silent(silent) {} VMOp_Type type() const { return VMOp_Verify; } void doit(); }; diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/services/attachListener.cpp --- a/src/share/vm/services/attachListener.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/services/attachListener.cpp Mon May 06 10:20:18 2013 -0700 @@ -157,7 +157,7 @@ Thread* THREAD = Thread::current(); // All the supplied jcmd arguments are stored as a single // string (op->arg(0)). This is parsed by the Dcmd framework. - DCmd::parse_and_execute(out, op->arg(0), ' ', THREAD); + DCmd::parse_and_execute(DCmd_Source_AttachAPI, out, op->arg(0), ' ', THREAD); if (HAS_PENDING_EXCEPTION) { java_lang_Throwable::print(PENDING_EXCEPTION, out); out->cr(); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/services/diagnosticCommand.cpp --- a/src/share/vm/services/diagnosticCommand.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/services/diagnosticCommand.cpp Mon May 06 10:20:18 2013 -0700 @@ -34,26 +34,33 @@ void DCmdRegistrant::register_dcmds(){ // Registration of the diagnostic commands - // First boolean argument specifies if the command is enabled - // Second boolean argument specifies if the command is hidden - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); + // First argument specifies which interfaces will export the command + // Second argument specifies if the command is enabled + // Third argument specifies if the command is hidden + uint32_t full_export = DCmd_Source_Internal | DCmd_Source_AttachAPI + | DCmd_Source_MBean; + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); #if INCLUDE_SERVICES // Heap dumping/inspection supported - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); #endif // INCLUDE_SERVICES - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true, false)); - //Enhanced JMX Agent Support - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true,false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true,false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(true,false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + + // Enhanced JMX Agent Support + // These commands won't be exported via the DiagnosticCommandMBean until an + // appropriate permission is created for them + uint32_t jmx_agent_export_flags = DCmd_Source_Internal | DCmd_Source_AttachAPI; + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(jmx_agent_export_flags, true,false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(jmx_agent_export_flags, true,false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(jmx_agent_export_flags, true,false)); } @@ -72,29 +79,37 @@ _dcmdparser.add_dcmd_argument(&_cmd); }; -void HelpDCmd::execute(TRAPS) { +void HelpDCmd::execute(DCmdSource source, TRAPS) { if (_all.value()) { - GrowableArray* cmd_list = DCmdFactory::DCmd_list(); + GrowableArray* cmd_list = DCmdFactory::DCmd_list(source); for (int i = 0; i < cmd_list->length(); i++) { - DCmdFactory* factory = DCmdFactory::factory(cmd_list->at(i), + DCmdFactory* factory = DCmdFactory::factory(source, cmd_list->at(i), strlen(cmd_list->at(i))); - if (!factory->is_hidden()) { - output()->print_cr("%s%s", factory->name(), - factory->is_enabled() ? "" : " [disabled]"); - output()->print_cr("\t%s", factory->description()); - output()->cr(); - } + output()->print_cr("%s%s", factory->name(), + factory->is_enabled() ? "" : " [disabled]"); + output()->print_cr("\t%s", factory->description()); + output()->cr(); factory = factory->next(); } } else if (_cmd.has_value()) { DCmd* cmd = NULL; - DCmdFactory* factory = DCmdFactory::factory(_cmd.value(), + DCmdFactory* factory = DCmdFactory::factory(source, _cmd.value(), strlen(_cmd.value())); if (factory != NULL) { output()->print_cr("%s%s", factory->name(), factory->is_enabled() ? "" : " [disabled]"); output()->print_cr(factory->description()); output()->print_cr("\nImpact: %s", factory->impact()); + JavaPermission p = factory->permission(); + if(p._class != NULL) { + if(p._action != NULL) { + output()->print_cr("\nPermission: %s(%s, %s)", + p._class, p._name == NULL ? "null" : p._name, p._action); + } else { + output()->print_cr("\nPermission: %s(%s)", + p._class, p._name == NULL ? "null" : p._name); + } + } output()->cr(); cmd = factory->create_resource_instance(output()); if (cmd != NULL) { @@ -106,14 +121,12 @@ } } else { output()->print_cr("The following commands are available:"); - GrowableArray* cmd_list = DCmdFactory::DCmd_list(); + GrowableArray* cmd_list = DCmdFactory::DCmd_list(source); for (int i = 0; i < cmd_list->length(); i++) { - DCmdFactory* factory = DCmdFactory::factory(cmd_list->at(i), + DCmdFactory* factory = DCmdFactory::factory(source, cmd_list->at(i), strlen(cmd_list->at(i))); - if (!factory->is_hidden()) { - output()->print_cr("%s%s", factory->name(), - factory->is_enabled() ? "" : " [disabled]"); - } + output()->print_cr("%s%s", factory->name(), + factory->is_enabled() ? "" : " [disabled]"); factory = factory->_next; } output()->print_cr("\nFor more information about a specific command use 'help '."); @@ -131,7 +144,7 @@ } } -void VersionDCmd::execute(TRAPS) { +void VersionDCmd::execute(DCmdSource source, TRAPS) { output()->print_cr("%s version %s", Abstract_VM_Version::vm_name(), Abstract_VM_Version::vm_release()); JDK_Version jdk_version = JDK_Version::current(); @@ -150,7 +163,7 @@ _dcmdparser.add_dcmd_option(&_all); } -void PrintVMFlagsDCmd::execute(TRAPS) { +void PrintVMFlagsDCmd::execute(DCmdSource source, TRAPS) { if (_all.value()) { CommandLineFlags::printFlags(output(), true); } else { @@ -169,7 +182,7 @@ } } -void PrintSystemPropertiesDCmd::execute(TRAPS) { +void PrintSystemPropertiesDCmd::execute(DCmdSource source, TRAPS) { // load sun.misc.VMSupport Symbol* klass = vmSymbols::sun_misc_VMSupport(); Klass* k = SystemDictionary::resolve_or_fail(klass, true, CHECK); @@ -219,7 +232,7 @@ _dcmdparser.add_dcmd_option(&_date); } -void VMUptimeDCmd::execute(TRAPS) { +void VMUptimeDCmd::execute(DCmdSource source, TRAPS) { if (_date.value()) { output()->date_stamp(true, "", ": "); } @@ -239,11 +252,15 @@ } } -void SystemGCDCmd::execute(TRAPS) { - Universe::heap()->collect(GCCause::_java_lang_system_gc); +void SystemGCDCmd::execute(DCmdSource source, TRAPS) { + if (!DisableExplicitGC) { + Universe::heap()->collect(GCCause::_java_lang_system_gc); + } else { + output()->print_cr("Explicit GC is disabled, no GC has been performed."); + } } -void RunFinalizationDCmd::execute(TRAPS) { +void RunFinalizationDCmd::execute(DCmdSource source, TRAPS) { Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_System(), true, CHECK); instanceKlassHandle klass(THREAD, k); @@ -263,7 +280,7 @@ _dcmdparser.add_dcmd_argument(&_filename); } -void HeapDumpDCmd::execute(TRAPS) { +void HeapDumpDCmd::execute(DCmdSource source, TRAPS) { // Request a full GC before heap dump if _all is false // This helps reduces the amount of unreachable objects in the dump // and makes it easier to browse. @@ -301,7 +318,7 @@ _dcmdparser.add_dcmd_option(&_all); } -void ClassHistogramDCmd::execute(TRAPS) { +void ClassHistogramDCmd::execute(DCmdSource source, TRAPS) { VM_GC_HeapInspection heapop(output(), !_all.value() /* request full gc if false */, true /* need_prologue */); @@ -337,7 +354,7 @@ _dcmdparser.add_dcmd_argument(&_columns); } -void ClassStatsDCmd::execute(TRAPS) { +void ClassStatsDCmd::execute(DCmdSource source, TRAPS) { if (!UnlockDiagnosticVMOptions) { output()->print_cr("GC.class_stats command requires -XX:+UnlockDiagnosticVMOptions"); return; @@ -384,7 +401,7 @@ _dcmdparser.add_dcmd_option(&_locks); } -void ThreadDumpDCmd::execute(TRAPS) { +void ThreadDumpDCmd::execute(DCmdSource source, TRAPS) { // thread stacks VM_PrintThreads op1(output(), _locks.value()); VMThread::execute(&op1); @@ -526,7 +543,8 @@ } } -void JMXStartRemoteDCmd::execute(TRAPS) { + +void JMXStartRemoteDCmd::execute(DCmdSource source, TRAPS) { ResourceMark rm(THREAD); HandleMark hm(THREAD); @@ -593,7 +611,7 @@ // do nothing } -void JMXStartLocalDCmd::execute(TRAPS) { +void JMXStartLocalDCmd::execute(DCmdSource source, TRAPS) { ResourceMark rm(THREAD); HandleMark hm(THREAD); @@ -611,7 +629,7 @@ } -void JMXStopRemoteDCmd::execute(TRAPS) { +void JMXStopRemoteDCmd::execute(DCmdSource source, TRAPS) { ResourceMark rm(THREAD); HandleMark hm(THREAD); diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/services/diagnosticCommand.hpp --- a/src/share/vm/services/diagnosticCommand.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/services/diagnosticCommand.hpp Mon May 06 10:20:18 2013 -0700 @@ -51,7 +51,7 @@ } static const char* impact() { return "Low"; } static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; class VersionDCmd : public DCmd { @@ -62,8 +62,13 @@ return "Print JVM version information."; } static const char* impact() { return "Low"; } + static const JavaPermission permission() { + JavaPermission p = {"java.util.PropertyPermission", + "java.vm.version", "read"}; + return p; + } static int num_arguments() { return 0; } - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; class CommandLineDCmd : public DCmd { @@ -74,8 +79,13 @@ return "Print the command line used to start this VM instance."; } static const char* impact() { return "Low"; } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", + "monitor", NULL}; + return p; + } static int num_arguments() { return 0; } - virtual void execute(TRAPS) { + virtual void execute(DCmdSource source, TRAPS) { Arguments::print_on(_output); } }; @@ -91,8 +101,13 @@ static const char* impact() { return "Low"; } + static const JavaPermission permission() { + JavaPermission p = {"java.util.PropertyPermission", + "*", "read"}; + return p; + } static int num_arguments() { return 0; } - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; // See also: print_flag in attachListener.cpp @@ -108,8 +123,13 @@ static const char* impact() { return "Low"; } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", + "monitor", NULL}; + return p; + } static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; class VMUptimeDCmd : public DCmdWithParser { @@ -125,7 +145,7 @@ return "Low"; } static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; class SystemGCDCmd : public DCmd { @@ -139,7 +159,7 @@ return "Medium: Depends on Java heap size and content."; } static int num_arguments() { return 0; } - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; class RunFinalizationDCmd : public DCmd { @@ -153,7 +173,7 @@ return "Medium: Depends on Java content."; } static int num_arguments() { return 0; } - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; #if INCLUDE_SERVICES // Heap dumping supported @@ -174,8 +194,13 @@ return "High: Depends on Java heap size and content. " "Request a full GC unless the '-all' option is specified."; } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", + "monitor", NULL}; + return p; + } static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; #endif // INCLUDE_SERVICES @@ -194,8 +219,13 @@ static const char* impact() { return "High: Depends on Java heap size and content."; } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", + "monitor", NULL}; + return p; + } static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; class ClassStatsDCmd : public DCmdWithParser { @@ -216,7 +246,7 @@ return "High: Depends on Java heap size and content."; } static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; // See also: thread_dump in attachListener.cpp @@ -232,8 +262,13 @@ static const char* impact() { return "Medium: Depends on the number of threads."; } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", + "monitor", NULL}; + return p; + } static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; // Enhanced JMX Agent support @@ -281,7 +316,7 @@ static int num_arguments(); - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; @@ -302,7 +337,7 @@ return "Start local management agent."; } - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; @@ -321,7 +356,7 @@ return "Stop remote management agent."; } - virtual void execute(TRAPS); + virtual void execute(DCmdSource source, TRAPS); }; #endif // SHARE_VM_SERVICES_DIAGNOSTICCOMMAND_HPP diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/services/diagnosticFramework.cpp --- a/src/share/vm/services/diagnosticFramework.cpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/services/diagnosticFramework.cpp Mon May 06 10:20:18 2013 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -359,7 +359,7 @@ while (arg != NULL) { array->append(new DCmdArgumentInfo(arg->name(), arg->description(), arg->type(), arg->default_string(), arg->is_mandatory(), - false, idx)); + false, arg->allow_multiple(), idx)); idx++; arg = arg->next(); } @@ -367,32 +367,42 @@ while (arg != NULL) { array->append(new DCmdArgumentInfo(arg->name(), arg->description(), arg->type(), arg->default_string(), arg->is_mandatory(), - true)); + true, arg->allow_multiple())); arg = arg->next(); } return array; } DCmdFactory* DCmdFactory::_DCmdFactoryList = NULL; +bool DCmdFactory::_has_pending_jmx_notification = false; -void DCmd::parse_and_execute(outputStream* out, const char* cmdline, - char delim, TRAPS) { +void DCmd::parse_and_execute(DCmdSource source, outputStream* out, + const char* cmdline, char delim, TRAPS) { if (cmdline == NULL) return; // Nothing to do! DCmdIter iter(cmdline, '\n'); + int count = 0; while (iter.has_next()) { + if(source == DCmd_Source_MBean && count > 0) { + // When diagnostic commands are invoked via JMX, each command line + // must contains one and only one command because of the Permission + // checks performed by the DiagnosticCommandMBean + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), + "Invalid syntax"); + } CmdLine line = iter.next(); if (line.is_stop()) { break; } if (line.is_executable()) { - DCmd* command = DCmdFactory::create_local_DCmd(line, out, CHECK); + DCmd* command = DCmdFactory::create_local_DCmd(source, line, out, CHECK); assert(command != NULL, "command error must be handled before this line"); DCmdMark mark(command); command->parse(&line, delim, CHECK); - command->execute(CHECK); + command->execute(source, CHECK); } + count++; } } @@ -420,15 +430,78 @@ return _dcmdparser.argument_info_array(); } -Mutex* DCmdFactory::_dcmdFactory_lock = new Mutex(Mutex::leaf, "DCmdFactory", true); +void DCmdFactory::push_jmx_notification_request() { + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); + _has_pending_jmx_notification = true; + Service_lock->notify_all(); +} + +void DCmdFactory::send_notification(TRAPS) { + DCmdFactory::send_notification_internal(THREAD); + // Clearing pending exception to avoid premature termination of + // the service thread + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + } +} +void DCmdFactory::send_notification_internal(TRAPS) { + ResourceMark rm(THREAD); + HandleMark hm(THREAD); + bool notif = false; + { + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); + notif = _has_pending_jmx_notification; + _has_pending_jmx_notification = false; + } + if (notif) { + + Klass* k = Management::sun_management_ManagementFactoryHelper_klass(CHECK); + instanceKlassHandle mgmt_factory_helper_klass(THREAD, k); -DCmdFactory* DCmdFactory::factory(const char* name, size_t len) { + JavaValue result(T_OBJECT); + JavaCalls::call_static(&result, + mgmt_factory_helper_klass, + vmSymbols::getDiagnosticCommandMBean_name(), + vmSymbols::getDiagnosticCommandMBean_signature(), + CHECK); + + instanceOop m = (instanceOop) result.get_jobject(); + instanceHandle dcmd_mbean_h(THREAD, m); + + Klass* k2 = Management::sun_management_DiagnosticCommandImpl_klass(CHECK); + instanceKlassHandle dcmd_mbean_klass(THREAD, k2); + + if (!dcmd_mbean_h->is_a(k2)) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), + "ManagementFactory.getDiagnosticCommandMBean didn't return a DiagnosticCommandMBean instance"); + } + + JavaValue result2(T_VOID); + JavaCallArguments args2(dcmd_mbean_h); + + JavaCalls::call_virtual(&result2, + dcmd_mbean_klass, + vmSymbols::createDiagnosticFrameworkNotification_name(), + vmSymbols::void_method_signature(), + &args2, + CHECK); + } +} + +Mutex* DCmdFactory::_dcmdFactory_lock = new Mutex(Mutex::leaf, "DCmdFactory", true); +bool DCmdFactory::_send_jmx_notification = false; + +DCmdFactory* DCmdFactory::factory(DCmdSource source, const char* name, size_t len) { MutexLockerEx ml(_dcmdFactory_lock, Mutex::_no_safepoint_check_flag); DCmdFactory* factory = _DCmdFactoryList; while (factory != NULL) { if (strlen(factory->name()) == len && strncmp(name, factory->name(), len) == 0) { - return factory; + if(factory->export_flags() & source) { + return factory; + } else { + return NULL; + } } factory = factory->_next; } @@ -439,11 +512,16 @@ MutexLockerEx ml(_dcmdFactory_lock, Mutex::_no_safepoint_check_flag); factory->_next = _DCmdFactoryList; _DCmdFactoryList = factory; + if (_send_jmx_notification && !factory->_hidden + && (factory->_export_flags & DCmd_Source_MBean)) { + DCmdFactory::push_jmx_notification_request(); + } return 0; // Actually, there's no checks for duplicates } -DCmd* DCmdFactory::create_global_DCmd(CmdLine &line, outputStream* out, TRAPS) { - DCmdFactory* f = factory(line.cmd_addr(), line.cmd_len()); +DCmd* DCmdFactory::create_global_DCmd(DCmdSource source, CmdLine &line, + outputStream* out, TRAPS) { + DCmdFactory* f = factory(source, line.cmd_addr(), line.cmd_len()); if (f != NULL) { if (f->is_enabled()) { THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), @@ -455,8 +533,9 @@ "Unknown diagnostic command"); } -DCmd* DCmdFactory::create_local_DCmd(CmdLine &line, outputStream* out, TRAPS) { - DCmdFactory* f = factory(line.cmd_addr(), line.cmd_len()); +DCmd* DCmdFactory::create_local_DCmd(DCmdSource source, CmdLine &line, + outputStream* out, TRAPS) { + DCmdFactory* f = factory(source, line.cmd_addr(), line.cmd_len()); if (f != NULL) { if (!f->is_enabled()) { THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), @@ -468,12 +547,12 @@ "Unknown diagnostic command"); } -GrowableArray* DCmdFactory::DCmd_list() { +GrowableArray* DCmdFactory::DCmd_list(DCmdSource source) { MutexLockerEx ml(_dcmdFactory_lock, Mutex::_no_safepoint_check_flag); GrowableArray* array = new GrowableArray(); DCmdFactory* factory = _DCmdFactoryList; while (factory != NULL) { - if (!factory->is_hidden()) { + if (!factory->is_hidden() && (factory->export_flags() & source)) { array->append(factory->name()); } factory = factory->next(); @@ -481,15 +560,16 @@ return array; } -GrowableArray* DCmdFactory::DCmdInfo_list() { +GrowableArray* DCmdFactory::DCmdInfo_list(DCmdSource source ) { MutexLockerEx ml(_dcmdFactory_lock, Mutex::_no_safepoint_check_flag); GrowableArray* array = new GrowableArray(); DCmdFactory* factory = _DCmdFactoryList; while (factory != NULL) { - if (!factory->is_hidden()) { + if (!factory->is_hidden() && (factory->export_flags() & source)) { array->append(new DCmdInfo(factory->name(), factory->description(), factory->impact(), - factory->num_arguments(), factory->is_enabled())); + factory->permission(), factory->num_arguments(), + factory->is_enabled())); } factory = factory->next(); } diff -r d9b08d62b95e -r b7f3bf2ba33b src/share/vm/services/diagnosticFramework.hpp --- a/src/share/vm/services/diagnosticFramework.hpp Thu May 02 10:58:04 2013 -0400 +++ b/src/share/vm/services/diagnosticFramework.hpp Mon May 06 10:20:18 2013 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +34,22 @@ #include "utilities/ostream.hpp" +enum DCmdSource { + DCmd_Source_Internal = 0x01U, // invocation from the JVM + DCmd_Source_AttachAPI = 0x02U, // invocation via the attachAPI + DCmd_Source_MBean = 0x04U // invocation via a MBean +}; + +// Warning: strings referenced by the JavaPermission struct are passed to +// the native part of the JDK. Avoid use of dynamically allocated strings +// that could be de-allocated before the JDK native code had time to +// convert them into Java Strings. +struct JavaPermission { + const char* _class; + const char* _name; + const char* _action; +}; + // CmdLine is the class used to handle a command line containing a single // diagnostic command and its arguments. It provides methods to access the // command name and the beginning of the arguments. The class is also @@ -113,26 +129,30 @@ // used to export the description to the JMX interface of the framework. class DCmdInfo : public ResourceObj { protected: - const char* _name; - const char* _description; - const char* _impact; - int _num_arguments; - bool _is_enabled; + const char* _name; /* Name of the diagnostic command */ + const char* _description; /* Short description */ + const char* _impact; /* Impact on the JVM */ + JavaPermission _permission; /* Java Permission required to execute this command if any */ + int _num_arguments; /* Number of supported options or arguments */ + bool _is_enabled; /* True if the diagnostic command can be invoked, false otherwise */ public: DCmdInfo(const char* name, const char* description, const char* impact, + JavaPermission permission, int num_arguments, bool enabled) { this->_name = name; this->_description = description; this->_impact = impact; + this->_permission = permission; this->_num_arguments = num_arguments; this->_is_enabled = enabled; } const char* name() const { return _name; } const char* description() const { return _description; } const char* impact() const { return _impact; } + JavaPermission permission() const { return _permission; } int num_arguments() const { return _num_arguments; } bool is_enabled() const { return _is_enabled; } @@ -144,16 +164,20 @@ // framework. class DCmdArgumentInfo : public ResourceObj { protected: - const char* _name; - const char* _description; - const char* _type; - const char* _default_string; - bool _mandatory; - bool _option; - int _position; + const char* _name; /* Option/Argument name*/ + const char* _description; /* Short description */ + const char* _type; /* Type: STRING, BOOLEAN, etc. */ + const char* _default_string; /* Default value in a parsable string */ + bool _mandatory; /* True if the option/argument is mandatory */ + bool _option; /* True if it is an option, false if it is an argument */ + /* (see diagnosticFramework.hpp for option/argument definitions) */ + bool _multiple; /* True is the option can be specified several time */ + int _position; /* Expected position for this argument (this field is */ + /* meaningless for options) */ public: DCmdArgumentInfo(const char* name, const char* description, const char* type, - const char* default_string, bool mandatory, bool option) { + const char* default_string, bool mandatory, bool option, + bool multiple) { this->_name = name; this->_description = description; this->_type = type; @@ -161,11 +185,12 @@ this->_option = option; this->_mandatory = mandatory; this->_option = option; + this->_multiple = multiple; this->_position = -1; } DCmdArgumentInfo(const char* name, const char* description, const char* type, const char* default_string, bool mandatory, bool option, - int position) { + bool multiple, int position) { this->_name = name; this->_description = description; this->_type = type; @@ -173,6 +198,7 @@ this->_option = option; this->_mandatory = mandatory; this->_option = option; + this->_multiple = multiple; this->_position = position; } const char* name() const { return _name; } @@ -181,11 +207,29 @@ const char* default_string() const { return _default_string; } bool is_mandatory() const { return _mandatory; } bool is_option() const { return _option; } + bool is_multiple() const { return _multiple; } int position() const { return _position; } }; // The DCmdParser class can be used to create an argument parser for a // diagnostic command. It is not mandatory to use it to parse arguments. +// The DCmdParser parses a CmdLine instance according to the parameters that +// have been declared by its associated diagnostic command. A parameter can +// either be an option or an argument. Options are identified by the option name +// while arguments are identified by their position in the command line. The +// position of an argument is defined relative to all arguments passed on the +// command line, options are not considered when defining an argument position. +// The generic syntax of a diagnostic command is: +// +// [