# HG changeset patch # User lana # Date 1392146765 28800 # Node ID b2fee789d23f3cdabb3db4e51af43038e5692d3a # Parent ad29d183df7c1cee5b0b1138b43c544b7aada72f# Parent 4c8bda53850f10264bb962d4c14beedfa014b1be Merge diff -r 4c8bda53850f -r b2fee789d23f agent/make/mkinstall --- a/agent/make/mkinstall Thu Feb 06 13:08:44 2014 -0800 +++ b/agent/make/mkinstall Tue Feb 11 11:26:05 2014 -0800 @@ -27,7 +27,9 @@ cp ../src/os/solaris/proc/amd64/libsaproc.so $SA_NAME/solaris/amd64 cp ../src/os/solaris/proc/sparc/libsaproc.so $SA_NAME/solaris/sparc +cp ../src/os/solaris/proc/sparc/libsaproc_audit.so $SA_NAME/solaris/sparc cp ../src/os/solaris/proc/sparcv9/libsaproc.so $SA_NAME/solaris/sparcv9 +cp ../src/os/solaris/proc/sparcv9/libsaproc_audit.so $SA_NAME/solaris/sparcv9 cp ../src/os/solaris/proc/i386/libsaproc.so $SA_NAME/solaris/i386 cp ../src/os/linux/i386/libsaproc.so $SA_NAME/linux/i386 cp ../src/os/linux/ia64/libsaproc.so $SA_NAME/linux/ia64 diff -r 4c8bda53850f -r b2fee789d23f agent/make/saenv.sh --- a/agent/make/saenv.sh Thu Feb 06 13:08:44 2014 -0800 +++ b/agent/make/saenv.sh Tue Feb 11 11:26:05 2014 -0800 @@ -48,16 +48,17 @@ CPU=i386 fi else - # configure audit helper library if SA_ALTROOT is set - if [ -n "$SA_ALTROOT" ]; then - LD_AUDIT_32=$STARTDIR/../src/os/solaris/proc/`uname -p`/libsaproc_audit.so - export LD_AUDIT_32 - if [ ! -f $LD_AUDIT_32 ]; then - echo "SA_ALTROOT is set and can't find libsaproc_audit.so." - echo "Make sure to build it with 'make natives'." - exit 1 - fi + # configure audit helper library for solaris + LD_AUDIT_32=$STARTDIR/../src/os/solaris/proc/`uname -p`/libsaproc_audit.so + if [ ! -f $LD_AUDIT_32 ]; then + LD_AUDIT_32=$STARTDIR/solaris/`uname -p`/libsaproc_audit.so + fi + if [ ! -f $LD_AUDIT_32 ]; then + echo "Can't find libsaproc_audit.so." + echo "Make sure to build it with 'make natives'." + exit 1 fi + export LD_AUDIT_32 SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/`uname -p`:$STARTDIR/solaris/`uname -p` OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger" CPU=sparc diff -r 4c8bda53850f -r b2fee789d23f agent/make/saenv64.sh --- a/agent/make/saenv64.sh Thu Feb 06 13:08:44 2014 -0800 +++ b/agent/make/saenv64.sh Tue Feb 11 11:26:05 2014 -0800 @@ -43,16 +43,19 @@ fi fi -# configure audit helper library if SA_ALTROOT is set -if [ -n "$SA_ALTROOT" ]; then - LD_AUDIT_64=$STARTDIR/../src/os/solaris/proc/$CPU/libsaproc_audit.so - export LD_AUDIT_64 - if [ ! -f $LD_AUDIT_64 ]; then - echo "SA_ALTROOT is set and can't find libsaproc_audit.so." - echo "Make sure to build it with 'make natives'." - exit 1 - fi +# configure audit helper library +LD_AUDIT_64=$STARTDIR/../src/os/solaris/proc/$CPU/libsaproc_audit.so +if [ ! -f $LD_AUDIT_64 ]; then + LD_AUDIT_64=$STARTDIR/solaris/$CPU/libsaproc_audit.so fi + +if [ ! -f $LD_AUDIT_64 ]; then + echo "Can't find libsaproc_audit.so." + echo "Make sure to build it with 'make natives'." + exit 1 +fi + +export LD_AUDIT_64 SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/$CPU:$STARTDIR/solaris/$CPU OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger" diff -r 4c8bda53850f -r b2fee789d23f agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java --- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Thu Feb 06 13:08:44 2014 -0800 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -152,7 +152,7 @@ private long indexOffset(long index) { if (Assert.ASSERTS_ENABLED) { - Assert.that(index > 0 && index < getLength(), "invalid cp index " + index + " " + getLength()); + Assert.that(index >= 0 && index < getLength(), "invalid cp index " + index + " " + getLength()); } return (index * getElementSize()) + headerSize; } diff -r 4c8bda53850f -r b2fee789d23f agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java Thu Feb 06 13:08:44 2014 -0800 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -98,11 +98,14 @@ break; default: throw new IllegalArgumentException(); } + if (cpCache == null) { return (short) cpCacheIndex; } else if (fmt.indexOf("JJJJ") >= 0) { - // change byte-ordering and go via secondary cache entry - throw new InternalError("unimplemented"); + // Invokedynamic require special handling + cpCacheIndex = ~cpCacheIndex; + cpCacheIndex = bytes.swapInt(cpCacheIndex); + return (short) cpCache.getEntryAt(cpCacheIndex).getConstantPoolIndex(); } else if (fmt.indexOf("JJ") >= 0) { // change byte-ordering and go via cache return (short) cpCache.getEntryAt((int) (0xFFFF & bytes.swapShort((short)cpCacheIndex))).getConstantPoolIndex(); diff -r 4c8bda53850f -r b2fee789d23f agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java Thu Feb 06 13:08:44 2014 -0800 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,8 +61,9 @@ long h = 0; int s = 0; int len = buf.length; + // Emulate the unsigned int in java_lang_String::hash_code while (len-- > 0) { - h = 31*h + (0xFFL & buf[s]); + h = 31*h + (0xFFFFFFFFL & buf[s]); s++; } return h & 0xFFFFFFFFL; diff -r 4c8bda53850f -r b2fee789d23f make/bsd/makefiles/adjust-mflags.sh --- a/make/bsd/makefiles/adjust-mflags.sh Thu Feb 06 13:08:44 2014 -0800 +++ b/make/bsd/makefiles/adjust-mflags.sh Tue Feb 11 11:26:05 2014 -0800 @@ -64,7 +64,7 @@ echo "$MFLAGS" \ | sed ' s/^-/ -/ - s/ -\([^ ][^ ]*\)j/ -\1 -j/ + s/ -\([^ I][^ I]*\)j/ -\1 -j/ s/ -j[0-9][0-9]*/ -j/ s/ -j\([^ ]\)/ -j -\1/ s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/ diff -r 4c8bda53850f -r b2fee789d23f make/bsd/makefiles/debug.make --- a/make/bsd/makefiles/debug.make Thu Feb 06 13:08:44 2014 -0800 +++ b/make/bsd/makefiles/debug.make Tue Feb 11 11:26:05 2014 -0800 @@ -36,6 +36,9 @@ # Linker mapfile MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug +ifeq ($(OS_VENDOR), Darwin) +MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-darwin-debug +endif VERSION = debug SYSDEFS += -DASSERT diff -r 4c8bda53850f -r b2fee789d23f make/bsd/makefiles/dtrace.make --- a/make/bsd/makefiles/dtrace.make Thu Feb 06 13:08:44 2014 -0800 +++ b/make/bsd/makefiles/dtrace.make Tue Feb 11 11:26:05 2014 -0800 @@ -68,11 +68,9 @@ # Use mapfile with libjvm_db.so LIBJVM_DB_MAPFILE = # no mapfile for usdt2 # $(MAKEFILES_DIR)/mapfile-vers-jvm_db -#LFLAGS_JVM_DB += $(MAPFLAG:FILENAME=$(LIBJVM_DB_MAPFILE)) # Use mapfile with libjvm_dtrace.so LIBJVM_DTRACE_MAPFILE = # no mapfile for usdt2 # $(MAKEFILES_DIR)/mapfile-vers-jvm_dtrace -#LFLAGS_JVM_DTRACE += $(MAPFLAG:FILENAME=$(LIBJVM_DTRACE_MAPFILE)) LFLAGS_JVM_DB += $(PICFLAG) # -D_REENTRANT LFLAGS_JVM_DTRACE += $(PICFLAG) # -D_REENTRANT @@ -260,9 +258,6 @@ endif endif -#$(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \ -# $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d -# $(QUIETLY) cat $^ > $@ $(DtraceOutDir): mkdir $(DtraceOutDir) @@ -276,100 +271,25 @@ $(DtraceOutDir)/hs_private.h: $(DTRACE_SRCDIR)/hs_private.d | $(DtraceOutDir) $(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_SRCDIR)/hs_private.d -$(DtraceOutDir)/jhelper.h: $(DTRACE_SRCDIR)/jhelper.d $(JVMOFFS).o | $(DtraceOutDir) - $(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_SRCDIR)/jhelper.d - -# jhelper currently disabled dtrace_gen_headers: $(DtraceOutDir)/hotspot.h $(DtraceOutDir)/hotspot_jni.h $(DtraceOutDir)/hs_private.h -DTraced_Files = ciEnv.o \ - classLoadingService.o \ - compileBroker.o \ - hashtable.o \ - instanceKlass.o \ - java.o \ - jni.o \ - jvm.o \ - memoryManager.o \ - nmethod.o \ - objectMonitor.o \ - runtimeService.o \ - sharedRuntime.o \ - synchronizer.o \ - thread.o \ - unsafe.o \ - vmThread.o \ - vmCMSOperations.o \ - vmPSOperations.o \ - vmGCOperations.o \ - -# Dtrace is available, so we build $(DTRACE.o) -#$(DTRACE.o): $(DTRACE).d $(JVMOFFS).h $(JVMOFFS)Index.h $(DTraced_Files) -# @echo Compiling $(DTRACE).d - -# $(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -xlazyload -o $@ -s $(DTRACE).d \ -# $(DTraced_Files) ||\ -# STATUS=$$?;\ -# if [ x"$$STATUS" = x"1" -a \ -# x`uname -r` = x"5.10" -a \ -# x`uname -p` = x"sparc" ]; then\ -# echo "*****************************************************************";\ -# echo "* If you are building server compiler, and the error message is ";\ -# echo "* \"incorrect ELF machine type...\", you have run into solaris bug ";\ -# echo "* 6213962, \"dtrace -G doesn't work on sparcv8+ object files\".";\ -# echo "* Either patch/upgrade your system (>= S10u1_15), or set the ";\ -# echo "* environment variable HOTSPOT_DISABLE_DTRACE_PROBES to disable ";\ -# echo "* dtrace probes for this build.";\ -# echo "*****************************************************************";\ -# fi;\ -# exit $$STATUS - # Since some DTraced_Files are in LIBJVM.o and they are touched by this - # command, and libgenerateJvmOffsets.so depends on LIBJVM.o, 'make' will - # think it needs to rebuild libgenerateJvmOffsets.so and thus JvmOffsets* - # files, but it doesn't, so we touch the necessary files to prevent later - # recompilation. Note: we only touch the necessary files if they already - # exist in order to close a race where an empty file can be created - # before the real build rule is executed. - # But, we can't touch the *.h files: This rule depends - # on them, and that would cause an infinite cycle of rebuilding. - # Neither the *.h or *.ccp files need to be touched, since they have - # rules which do not update them when the generator file has not - # changed their contents. -# $(QUIETLY) if [ -f lib$(GENOFFS).so ]; then touch lib$(GENOFFS).so; fi -# $(QUIETLY) if [ -f $(GENOFFS) ]; then touch $(GENOFFS); fi -# $(QUIETLY) if [ -f $(JVMOFFS.o) ]; then touch $(JVMOFFS.o); fi .PHONY: dtraceCheck -#SYSTEM_DTRACE_H = /usr/include/dtrace.h SYSTEM_DTRACE_PROG = /usr/sbin/dtrace -#PATCH_DTRACE_PROG = /opt/SUNWdtrd/sbin/dtrace systemDtraceFound := $(wildcard ${SYSTEM_DTRACE_PROG}) -#patchDtraceFound := $(wildcard ${PATCH_DTRACE_PROG}) -#systemDtraceHdrFound := $(wildcard $(SYSTEM_DTRACE_H)) -#ifneq ("$(systemDtraceHdrFound)", "") -#CFLAGS += -DHAVE_DTRACE_H -#endif - -#ifneq ("$(patchDtraceFound)", "") -#DTRACE_PROG=$(PATCH_DTRACE_PROG) -#DTRACE_INCL=-I/opt/SUNWdtrd/include -#else ifneq ("$(systemDtraceFound)", "") DTRACE_PROG=$(SYSTEM_DTRACE_PROG) else -endif # ifneq ("$(systemDtraceFound)", "") -#endif # ifneq ("$(patchDtraceFound)", "") +endif ifneq ("${DTRACE_PROG}", "") ifeq ("${HOTSPOT_DISABLE_DTRACE_PROBES}", "") DTRACE_OBJS = $(DTRACE.o) #$(JVMOFFS.o) CFLAGS += -DDTRACE_ENABLED #$(DTRACE_INCL) -#clangCFLAGS += -DDTRACE_ENABLED -fno-optimize-sibling-calls -#MAPFILE_DTRACE_OPT = $(MAPFILE_DTRACE) dtraceCheck: diff -r 4c8bda53850f -r b2fee789d23f make/bsd/makefiles/fastdebug.make --- a/make/bsd/makefiles/fastdebug.make Thu Feb 06 13:08:44 2014 -0800 +++ b/make/bsd/makefiles/fastdebug.make Tue Feb 11 11:26:05 2014 -0800 @@ -57,6 +57,9 @@ # Linker mapfile MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug +ifeq ($(OS_VENDOR), Darwin) +MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-darwin-debug +endif VERSION = fastdebug SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS diff -r 4c8bda53850f -r b2fee789d23f make/bsd/makefiles/mapfile-vers-darwin-debug --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/bsd/makefiles/mapfile-vers-darwin-debug Tue Feb 11 11:26:05 2014 -0800 @@ -0,0 +1,256 @@ +# +# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# +# Only used for OSX/Darwin builds + +# Define public interface. + # _JNI + _JNI_CreateJavaVM + _JNI_GetCreatedJavaVMs + _JNI_GetDefaultJavaVMInitArgs + + # _JVM + _JVM_Accept + _JVM_ActiveProcessorCount + _JVM_AllocateNewArray + _JVM_AllocateNewObject + _JVM_ArrayCopy + _JVM_AssertionStatusDirectives + _JVM_Available + _JVM_Bind + _JVM_ClassDepth + _JVM_ClassLoaderDepth + _JVM_Clone + _JVM_Close + _JVM_CX8Field + _JVM_CompileClass + _JVM_CompileClasses + _JVM_CompilerCommand + _JVM_Connect + _JVM_ConstantPoolGetClassAt + _JVM_ConstantPoolGetClassAtIfLoaded + _JVM_ConstantPoolGetDoubleAt + _JVM_ConstantPoolGetFieldAt + _JVM_ConstantPoolGetFieldAtIfLoaded + _JVM_ConstantPoolGetFloatAt + _JVM_ConstantPoolGetIntAt + _JVM_ConstantPoolGetLongAt + _JVM_ConstantPoolGetMethodAt + _JVM_ConstantPoolGetMethodAtIfLoaded + _JVM_ConstantPoolGetMemberRefInfoAt + _JVM_ConstantPoolGetSize + _JVM_ConstantPoolGetStringAt + _JVM_ConstantPoolGetUTF8At + _JVM_CountStackFrames + _JVM_CurrentClassLoader + _JVM_CurrentLoadedClass + _JVM_CurrentThread + _JVM_CurrentTimeMillis + _JVM_DefineClass + _JVM_DefineClassWithSource + _JVM_DefineClassWithSourceCond + _JVM_DesiredAssertionStatus + _JVM_DisableCompiler + _JVM_DoPrivileged + _JVM_DTraceGetVersion + _JVM_DTraceActivate + _JVM_DTraceIsProbeEnabled + _JVM_DTraceIsSupported + _JVM_DTraceDispose + _JVM_DumpAllStacks + _JVM_DumpThreads + _JVM_EnableCompiler + _JVM_Exit + _JVM_FillInStackTrace + _JVM_FindClassFromClass + _JVM_FindClassFromClassLoader + _JVM_FindClassFromBootLoader + _JVM_FindLibraryEntry + _JVM_FindLoadedClass + _JVM_FindPrimitiveClass + _JVM_FindSignal + _JVM_FreeMemory + _JVM_GC + _JVM_GetAllThreads + _JVM_GetArrayElement + _JVM_GetArrayLength + _JVM_GetCPClassNameUTF + _JVM_GetCPFieldClassNameUTF + _JVM_GetCPFieldModifiers + _JVM_GetCPFieldNameUTF + _JVM_GetCPFieldSignatureUTF + _JVM_GetCPMethodClassNameUTF + _JVM_GetCPMethodModifiers + _JVM_GetCPMethodNameUTF + _JVM_GetCPMethodSignatureUTF + _JVM_GetCallerClass + _JVM_GetClassAccessFlags + _JVM_GetClassAnnotations + _JVM_GetClassCPEntriesCount + _JVM_GetClassCPTypes + _JVM_GetClassConstantPool + _JVM_GetClassContext + _JVM_GetClassDeclaredConstructors + _JVM_GetClassDeclaredFields + _JVM_GetClassDeclaredMethods + _JVM_GetClassFieldsCount + _JVM_GetClassInterfaces + _JVM_GetClassLoader + _JVM_GetClassMethodsCount + _JVM_GetClassModifiers + _JVM_GetClassName + _JVM_GetClassNameUTF + _JVM_GetClassSignature + _JVM_GetClassSigners + _JVM_GetClassTypeAnnotations + _JVM_GetComponentType + _JVM_GetDeclaredClasses + _JVM_GetDeclaringClass + _JVM_GetEnclosingMethodInfo + _JVM_GetFieldAnnotations + _JVM_GetFieldIxModifiers + _JVM_GetFieldTypeAnnotations + _JVM_GetHostName + _JVM_GetInheritedAccessControlContext + _JVM_GetInterfaceVersion + _JVM_GetLastErrorString + _JVM_GetManagement + _JVM_GetMethodAnnotations + _JVM_GetMethodDefaultAnnotationValue + _JVM_GetMethodIxArgsSize + _JVM_GetMethodIxByteCode + _JVM_GetMethodIxByteCodeLength + _JVM_GetMethodIxExceptionIndexes + _JVM_GetMethodIxExceptionTableEntry + _JVM_GetMethodIxExceptionTableLength + _JVM_GetMethodIxExceptionsCount + _JVM_GetMethodIxLocalsCount + _JVM_GetMethodIxMaxStack + _JVM_GetMethodIxModifiers + _JVM_GetMethodIxNameUTF + _JVM_GetMethodIxSignatureUTF + _JVM_GetMethodParameterAnnotations + _JVM_GetMethodParameters + _JVM_GetMethodTypeAnnotations + _JVM_GetPrimitiveArrayElement + _JVM_GetProtectionDomain + _JVM_GetSockName + _JVM_GetSockOpt + _JVM_GetStackAccessControlContext + _JVM_GetStackTraceDepth + _JVM_GetStackTraceElement + _JVM_GetSystemPackage + _JVM_GetSystemPackages + _JVM_GetThreadStateNames + _JVM_GetThreadStateValues + _JVM_GetVersionInfo + _JVM_Halt + _JVM_HoldsLock + _JVM_IHashCode + _JVM_InitAgentProperties + _JVM_InitProperties + _JVM_InitializeCompiler + _JVM_InitializeSocketLibrary + _JVM_InternString + _JVM_Interrupt + _JVM_InvokeMethod + _JVM_IsArrayClass + _JVM_IsConstructorIx + _JVM_IsInterface + _JVM_IsInterrupted + _JVM_IsNaN + _JVM_IsPrimitiveClass + _JVM_IsSameClassPackage + _JVM_IsSilentCompiler + _JVM_IsSupportedJNIVersion + _JVM_IsThreadAlive + _JVM_IsVMGeneratedMethodIx + _JVM_LatestUserDefinedLoader + _JVM_Listen + _JVM_LoadClass0 + _JVM_LoadLibrary + _JVM_Lseek + _JVM_MaxObjectInspectionAge + _JVM_MaxMemory + _JVM_MonitorNotify + _JVM_MonitorNotifyAll + _JVM_MonitorWait + _JVM_NanoTime + _JVM_NativePath + _JVM_NewArray + _JVM_NewInstanceFromConstructor + _JVM_NewMultiArray + _JVM_OnExit + _JVM_Open + _JVM_RaiseSignal + _JVM_RawMonitorCreate + _JVM_RawMonitorDestroy + _JVM_RawMonitorEnter + _JVM_RawMonitorExit + _JVM_Read + _JVM_Recv + _JVM_RecvFrom + _JVM_RegisterSignal + _JVM_ReleaseUTF + _JVM_ResolveClass + _JVM_ResumeThread + _JVM_Send + _JVM_SendTo + _JVM_SetArrayElement + _JVM_SetClassSigners + _JVM_SetLength + _JVM_SetNativeThreadName + _JVM_SetPrimitiveArrayElement + _JVM_SetSockOpt + _JVM_SetThreadPriority + _JVM_Sleep + _JVM_Socket + _JVM_SocketAvailable + _JVM_SocketClose + _JVM_SocketShutdown + _JVM_StartThread + _JVM_StopThread + _JVM_SuspendThread + _JVM_SupportsCX8 + _JVM_Sync + _JVM_Timeout + _JVM_TotalMemory + _JVM_TraceInstructions + _JVM_TraceMethodCalls + _JVM_UnloadLibrary + _JVM_Write + _JVM_Yield + _JVM_handle_bsd_signal + + # miscellaneous functions + _jio_fprintf + _jio_printf + _jio_snprintf + _jio_vfprintf + _jio_vsnprintf + + # This is for Forte Analyzer profiling support. + _AsyncGetCallTrace + + # INSERT VTABLE SYMBOLS HERE + diff -r 4c8bda53850f -r b2fee789d23f make/bsd/makefiles/mapfile-vers-darwin-product --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/bsd/makefiles/mapfile-vers-darwin-product Tue Feb 11 11:26:05 2014 -0800 @@ -0,0 +1,256 @@ +# +# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# +# Only used for OSX/Darwin builds + +# Define public interface. + # _JNI + _JNI_CreateJavaVM + _JNI_GetCreatedJavaVMs + _JNI_GetDefaultJavaVMInitArgs + + # _JVM + _JVM_Accept + _JVM_ActiveProcessorCount + _JVM_AllocateNewArray + _JVM_AllocateNewObject + _JVM_ArrayCopy + _JVM_AssertionStatusDirectives + _JVM_Available + _JVM_Bind + _JVM_ClassDepth + _JVM_ClassLoaderDepth + _JVM_Clone + _JVM_Close + _JVM_CX8Field + _JVM_CompileClass + _JVM_CompileClasses + _JVM_CompilerCommand + _JVM_Connect + _JVM_ConstantPoolGetClassAt + _JVM_ConstantPoolGetClassAtIfLoaded + _JVM_ConstantPoolGetDoubleAt + _JVM_ConstantPoolGetFieldAt + _JVM_ConstantPoolGetFieldAtIfLoaded + _JVM_ConstantPoolGetFloatAt + _JVM_ConstantPoolGetIntAt + _JVM_ConstantPoolGetLongAt + _JVM_ConstantPoolGetMethodAt + _JVM_ConstantPoolGetMethodAtIfLoaded + _JVM_ConstantPoolGetMemberRefInfoAt + _JVM_ConstantPoolGetSize + _JVM_ConstantPoolGetStringAt + _JVM_ConstantPoolGetUTF8At + _JVM_CountStackFrames + _JVM_CurrentClassLoader + _JVM_CurrentLoadedClass + _JVM_CurrentThread + _JVM_CurrentTimeMillis + _JVM_DefineClass + _JVM_DefineClassWithSource + _JVM_DefineClassWithSourceCond + _JVM_DesiredAssertionStatus + _JVM_DisableCompiler + _JVM_DoPrivileged + _JVM_DTraceGetVersion + _JVM_DTraceActivate + _JVM_DTraceIsProbeEnabled + _JVM_DTraceIsSupported + _JVM_DTraceDispose + _JVM_DumpAllStacks + _JVM_DumpThreads + _JVM_EnableCompiler + _JVM_Exit + _JVM_FillInStackTrace + _JVM_FindClassFromClass + _JVM_FindClassFromClassLoader + _JVM_FindClassFromBootLoader + _JVM_FindLibraryEntry + _JVM_FindLoadedClass + _JVM_FindPrimitiveClass + _JVM_FindSignal + _JVM_FreeMemory + _JVM_GC + _JVM_GetAllThreads + _JVM_GetArrayElement + _JVM_GetArrayLength + _JVM_GetCPClassNameUTF + _JVM_GetCPFieldClassNameUTF + _JVM_GetCPFieldModifiers + _JVM_GetCPFieldNameUTF + _JVM_GetCPFieldSignatureUTF + _JVM_GetCPMethodClassNameUTF + _JVM_GetCPMethodModifiers + _JVM_GetCPMethodNameUTF + _JVM_GetCPMethodSignatureUTF + _JVM_GetCallerClass + _JVM_GetClassAccessFlags + _JVM_GetClassAnnotations + _JVM_GetClassCPEntriesCount + _JVM_GetClassCPTypes + _JVM_GetClassConstantPool + _JVM_GetClassContext + _JVM_GetClassDeclaredConstructors + _JVM_GetClassDeclaredFields + _JVM_GetClassDeclaredMethods + _JVM_GetClassFieldsCount + _JVM_GetClassInterfaces + _JVM_GetClassLoader + _JVM_GetClassMethodsCount + _JVM_GetClassModifiers + _JVM_GetClassName + _JVM_GetClassNameUTF + _JVM_GetClassSignature + _JVM_GetClassSigners + _JVM_GetClassTypeAnnotations + _JVM_GetComponentType + _JVM_GetDeclaredClasses + _JVM_GetDeclaringClass + _JVM_GetEnclosingMethodInfo + _JVM_GetFieldAnnotations + _JVM_GetFieldIxModifiers + _JVM_GetFieldTypeAnnotations + _JVM_GetHostName + _JVM_GetInheritedAccessControlContext + _JVM_GetInterfaceVersion + _JVM_GetLastErrorString + _JVM_GetManagement + _JVM_GetMethodAnnotations + _JVM_GetMethodDefaultAnnotationValue + _JVM_GetMethodIxArgsSize + _JVM_GetMethodIxByteCode + _JVM_GetMethodIxByteCodeLength + _JVM_GetMethodIxExceptionIndexes + _JVM_GetMethodIxExceptionTableEntry + _JVM_GetMethodIxExceptionTableLength + _JVM_GetMethodIxExceptionsCount + _JVM_GetMethodIxLocalsCount + _JVM_GetMethodIxMaxStack + _JVM_GetMethodIxModifiers + _JVM_GetMethodIxNameUTF + _JVM_GetMethodIxSignatureUTF + _JVM_GetMethodParameterAnnotations + _JVM_GetMethodParameters + _JVM_GetMethodTypeAnnotations + _JVM_GetPrimitiveArrayElement + _JVM_GetProtectionDomain + _JVM_GetSockName + _JVM_GetSockOpt + _JVM_GetStackAccessControlContext + _JVM_GetStackTraceDepth + _JVM_GetStackTraceElement + _JVM_GetSystemPackage + _JVM_GetSystemPackages + _JVM_GetThreadStateNames + _JVM_GetThreadStateValues + _JVM_GetVersionInfo + _JVM_Halt + _JVM_HoldsLock + _JVM_IHashCode + _JVM_InitAgentProperties + _JVM_InitProperties + _JVM_InitializeCompiler + _JVM_InitializeSocketLibrary + _JVM_InternString + _JVM_Interrupt + _JVM_InvokeMethod + _JVM_IsArrayClass + _JVM_IsConstructorIx + _JVM_IsInterface + _JVM_IsInterrupted + _JVM_IsNaN + _JVM_IsPrimitiveClass + _JVM_IsSameClassPackage + _JVM_IsSilentCompiler + _JVM_IsSupportedJNIVersion + _JVM_IsThreadAlive + _JVM_IsVMGeneratedMethodIx + _JVM_LatestUserDefinedLoader + _JVM_Listen + _JVM_LoadClass0 + _JVM_LoadLibrary + _JVM_Lseek + _JVM_MaxObjectInspectionAge + _JVM_MaxMemory + _JVM_MonitorNotify + _JVM_MonitorNotifyAll + _JVM_MonitorWait + _JVM_NanoTime + _JVM_NativePath + _JVM_NewArray + _JVM_NewInstanceFromConstructor + _JVM_NewMultiArray + _JVM_OnExit + _JVM_Open + _JVM_RaiseSignal + _JVM_RawMonitorCreate + _JVM_RawMonitorDestroy + _JVM_RawMonitorEnter + _JVM_RawMonitorExit + _JVM_Read + _JVM_Recv + _JVM_RecvFrom + _JVM_RegisterSignal + _JVM_ReleaseUTF + _JVM_ResolveClass + _JVM_ResumeThread + _JVM_Send + _JVM_SendTo + _JVM_SetArrayElement + _JVM_SetClassSigners + _JVM_SetLength + _JVM_SetNativeThreadName + _JVM_SetPrimitiveArrayElement + _JVM_SetSockOpt + _JVM_SetThreadPriority + _JVM_Sleep + _JVM_Socket + _JVM_SocketAvailable + _JVM_SocketClose + _JVM_SocketShutdown + _JVM_StartThread + _JVM_StopThread + _JVM_SuspendThread + _JVM_SupportsCX8 + _JVM_Sync + _JVM_Timeout + _JVM_TotalMemory + _JVM_TraceInstructions + _JVM_TraceMethodCalls + _JVM_UnloadLibrary + _JVM_Write + _JVM_Yield + _JVM_handle_bsd_signal + + # miscellaneous functions + _jio_fprintf + _jio_printf + _jio_snprintf + _jio_vfprintf + _jio_vsnprintf + + # This is for Forte Analyzer profiling support. + _AsyncGetCallTrace + + # INSERT VTABLE SYMBOLS HERE + diff -r 4c8bda53850f -r b2fee789d23f make/bsd/makefiles/mapfile-vers-debug --- a/make/bsd/makefiles/mapfile-vers-debug Thu Feb 06 13:08:44 2014 -0800 +++ b/make/bsd/makefiles/mapfile-vers-debug Tue Feb 11 11:26:05 2014 -0800 @@ -19,238 +19,250 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # -# Only used for OSX/Darwin builds # Define public interface. - # _JNI - _JNI_CreateJavaVM - _JNI_GetCreatedJavaVMs - _JNI_GetDefaultJavaVMInitArgs + +SUNWprivate_1.1 { + global: + # JNI + JNI_CreateJavaVM; + JNI_GetCreatedJavaVMs; + JNI_GetDefaultJavaVMInitArgs; - # _JVM - _JVM_Accept - _JVM_ActiveProcessorCount - _JVM_AllocateNewArray - _JVM_AllocateNewObject - _JVM_ArrayCopy - _JVM_AssertionStatusDirectives - _JVM_Available - _JVM_Bind - _JVM_ClassDepth - _JVM_ClassLoaderDepth - _JVM_Clone - _JVM_Close - _JVM_CX8Field - _JVM_CompileClass - _JVM_CompileClasses - _JVM_CompilerCommand - _JVM_Connect - _JVM_ConstantPoolGetClassAt - _JVM_ConstantPoolGetClassAtIfLoaded - _JVM_ConstantPoolGetDoubleAt - _JVM_ConstantPoolGetFieldAt - _JVM_ConstantPoolGetFieldAtIfLoaded - _JVM_ConstantPoolGetFloatAt - _JVM_ConstantPoolGetIntAt - _JVM_ConstantPoolGetLongAt - _JVM_ConstantPoolGetMethodAt - _JVM_ConstantPoolGetMethodAtIfLoaded - _JVM_ConstantPoolGetMemberRefInfoAt - _JVM_ConstantPoolGetSize - _JVM_ConstantPoolGetStringAt - _JVM_ConstantPoolGetUTF8At - _JVM_CountStackFrames - _JVM_CurrentClassLoader - _JVM_CurrentLoadedClass - _JVM_CurrentThread - _JVM_CurrentTimeMillis - _JVM_DefineClass - _JVM_DefineClassWithSource - _JVM_DefineClassWithSourceCond - _JVM_DesiredAssertionStatus - _JVM_DisableCompiler - _JVM_DoPrivileged - _JVM_DTraceGetVersion - _JVM_DTraceActivate - _JVM_DTraceIsProbeEnabled - _JVM_DTraceIsSupported - _JVM_DTraceDispose - _JVM_DumpAllStacks - _JVM_DumpThreads - _JVM_EnableCompiler - _JVM_Exit - _JVM_FillInStackTrace - _JVM_FindClassFromClass - _JVM_FindClassFromClassLoader - _JVM_FindClassFromBootLoader - _JVM_FindLibraryEntry - _JVM_FindLoadedClass - _JVM_FindPrimitiveClass - _JVM_FindSignal - _JVM_FreeMemory - _JVM_GC - _JVM_GetAllThreads - _JVM_GetArrayElement - _JVM_GetArrayLength - _JVM_GetCPClassNameUTF - _JVM_GetCPFieldClassNameUTF - _JVM_GetCPFieldModifiers - _JVM_GetCPFieldNameUTF - _JVM_GetCPFieldSignatureUTF - _JVM_GetCPMethodClassNameUTF - _JVM_GetCPMethodModifiers - _JVM_GetCPMethodNameUTF - _JVM_GetCPMethodSignatureUTF - _JVM_GetCallerClass - _JVM_GetClassAccessFlags - _JVM_GetClassAnnotations - _JVM_GetClassCPEntriesCount - _JVM_GetClassCPTypes - _JVM_GetClassConstantPool - _JVM_GetClassContext - _JVM_GetClassDeclaredConstructors - _JVM_GetClassDeclaredFields - _JVM_GetClassDeclaredMethods - _JVM_GetClassFieldsCount - _JVM_GetClassInterfaces - _JVM_GetClassLoader - _JVM_GetClassMethodsCount - _JVM_GetClassModifiers - _JVM_GetClassName - _JVM_GetClassNameUTF - _JVM_GetClassSignature - _JVM_GetClassSigners - _JVM_GetClassTypeAnnotations - _JVM_GetComponentType - _JVM_GetDeclaredClasses - _JVM_GetDeclaringClass - _JVM_GetEnclosingMethodInfo - _JVM_GetFieldAnnotations - _JVM_GetFieldIxModifiers - _JVM_GetFieldTypeAnnotations - _JVM_GetHostName - _JVM_GetInheritedAccessControlContext - _JVM_GetInterfaceVersion - _JVM_GetLastErrorString - _JVM_GetManagement - _JVM_GetMethodAnnotations - _JVM_GetMethodDefaultAnnotationValue - _JVM_GetMethodIxArgsSize - _JVM_GetMethodIxByteCode - _JVM_GetMethodIxByteCodeLength - _JVM_GetMethodIxExceptionIndexes - _JVM_GetMethodIxExceptionTableEntry - _JVM_GetMethodIxExceptionTableLength - _JVM_GetMethodIxExceptionsCount - _JVM_GetMethodIxLocalsCount - _JVM_GetMethodIxMaxStack - _JVM_GetMethodIxModifiers - _JVM_GetMethodIxNameUTF - _JVM_GetMethodIxSignatureUTF - _JVM_GetMethodParameterAnnotations - _JVM_GetMethodParameters - _JVM_GetMethodTypeAnnotations - _JVM_GetPrimitiveArrayElement - _JVM_GetProtectionDomain - _JVM_GetSockName - _JVM_GetSockOpt - _JVM_GetStackAccessControlContext - _JVM_GetStackTraceDepth - _JVM_GetStackTraceElement - _JVM_GetSystemPackage - _JVM_GetSystemPackages - _JVM_GetThreadStateNames - _JVM_GetThreadStateValues - _JVM_GetVersionInfo - _JVM_Halt - _JVM_HoldsLock - _JVM_IHashCode - _JVM_InitAgentProperties - _JVM_InitProperties - _JVM_InitializeCompiler - _JVM_InitializeSocketLibrary - _JVM_InternString - _JVM_Interrupt - _JVM_InvokeMethod - _JVM_IsArrayClass - _JVM_IsConstructorIx - _JVM_IsInterface - _JVM_IsInterrupted - _JVM_IsNaN - _JVM_IsPrimitiveClass - _JVM_IsSameClassPackage - _JVM_IsSilentCompiler - _JVM_IsSupportedJNIVersion - _JVM_IsThreadAlive - _JVM_IsVMGeneratedMethodIx - _JVM_LatestUserDefinedLoader - _JVM_Listen - _JVM_LoadClass0 - _JVM_LoadLibrary - _JVM_Lseek - _JVM_MaxObjectInspectionAge - _JVM_MaxMemory - _JVM_MonitorNotify - _JVM_MonitorNotifyAll - _JVM_MonitorWait - _JVM_NanoTime - _JVM_NativePath - _JVM_NewArray - _JVM_NewInstanceFromConstructor - _JVM_NewMultiArray - _JVM_OnExit - _JVM_Open - _JVM_RaiseSignal - _JVM_RawMonitorCreate - _JVM_RawMonitorDestroy - _JVM_RawMonitorEnter - _JVM_RawMonitorExit - _JVM_Read - _JVM_Recv - _JVM_RecvFrom - _JVM_RegisterSignal - _JVM_ReleaseUTF - _JVM_ResolveClass - _JVM_ResumeThread - _JVM_Send - _JVM_SendTo - _JVM_SetArrayElement - _JVM_SetClassSigners - _JVM_SetLength - _JVM_SetNativeThreadName - _JVM_SetPrimitiveArrayElement - _JVM_SetSockOpt - _JVM_SetThreadPriority - _JVM_Sleep - _JVM_Socket - _JVM_SocketAvailable - _JVM_SocketClose - _JVM_SocketShutdown - _JVM_StartThread - _JVM_StopThread - _JVM_SuspendThread - _JVM_SupportsCX8 - _JVM_Sync - _JVM_Timeout - _JVM_TotalMemory - _JVM_TraceInstructions - _JVM_TraceMethodCalls - _JVM_UnloadLibrary - _JVM_Write - _JVM_Yield - _JVM_handle_bsd_signal + # JVM + JVM_Accept; + JVM_ActiveProcessorCount; + JVM_AllocateNewArray; + JVM_AllocateNewObject; + JVM_ArrayCopy; + JVM_AssertionStatusDirectives; + JVM_Available; + JVM_Bind; + JVM_ClassDepth; + JVM_ClassLoaderDepth; + JVM_Clone; + JVM_Close; + JVM_CX8Field; + JVM_CompileClass; + JVM_CompileClasses; + JVM_CompilerCommand; + JVM_Connect; + JVM_ConstantPoolGetClassAt; + JVM_ConstantPoolGetClassAtIfLoaded; + JVM_ConstantPoolGetDoubleAt; + JVM_ConstantPoolGetFieldAt; + JVM_ConstantPoolGetFieldAtIfLoaded; + JVM_ConstantPoolGetFloatAt; + JVM_ConstantPoolGetIntAt; + JVM_ConstantPoolGetLongAt; + JVM_ConstantPoolGetMethodAt; + JVM_ConstantPoolGetMethodAtIfLoaded; + JVM_ConstantPoolGetMemberRefInfoAt; + JVM_ConstantPoolGetSize; + JVM_ConstantPoolGetStringAt; + JVM_ConstantPoolGetUTF8At; + JVM_CountStackFrames; + JVM_CurrentClassLoader; + JVM_CurrentLoadedClass; + JVM_CurrentThread; + JVM_CurrentTimeMillis; + JVM_DefineClass; + JVM_DefineClassWithSource; + JVM_DefineClassWithSourceCond; + JVM_DesiredAssertionStatus; + JVM_DisableCompiler; + JVM_DoPrivileged; + JVM_DTraceGetVersion; + JVM_DTraceActivate; + JVM_DTraceIsProbeEnabled; + JVM_DTraceIsSupported; + JVM_DTraceDispose; + JVM_DumpAllStacks; + JVM_DumpThreads; + JVM_EnableCompiler; + JVM_Exit; + JVM_FillInStackTrace; + JVM_FindClassFromClass; + JVM_FindClassFromClassLoader; + JVM_FindClassFromBootLoader; + JVM_FindLibraryEntry; + JVM_FindLoadedClass; + JVM_FindPrimitiveClass; + JVM_FindSignal; + JVM_FreeMemory; + JVM_GC; + JVM_GetAllThreads; + JVM_GetArrayElement; + JVM_GetArrayLength; + JVM_GetCPClassNameUTF; + JVM_GetCPFieldClassNameUTF; + JVM_GetCPFieldModifiers; + JVM_GetCPFieldNameUTF; + JVM_GetCPFieldSignatureUTF; + JVM_GetCPMethodClassNameUTF; + JVM_GetCPMethodModifiers; + JVM_GetCPMethodNameUTF; + JVM_GetCPMethodSignatureUTF; + JVM_GetCallerClass; + JVM_GetClassAccessFlags; + JVM_GetClassAnnotations; + JVM_GetClassCPEntriesCount; + JVM_GetClassCPTypes; + JVM_GetClassConstantPool; + JVM_GetClassContext; + JVM_GetClassDeclaredConstructors; + JVM_GetClassDeclaredFields; + JVM_GetClassDeclaredMethods; + JVM_GetClassFieldsCount; + JVM_GetClassInterfaces; + JVM_GetClassLoader; + JVM_GetClassMethodsCount; + JVM_GetClassModifiers; + JVM_GetClassName; + JVM_GetClassNameUTF; + JVM_GetClassSignature; + JVM_GetClassSigners; + JVM_GetClassTypeAnnotations; + JVM_GetComponentType; + JVM_GetDeclaredClasses; + JVM_GetDeclaringClass; + JVM_GetEnclosingMethodInfo; + JVM_GetFieldAnnotations; + JVM_GetFieldIxModifiers; + JVM_GetFieldTypeAnnotations; + JVM_GetHostName; + JVM_GetInheritedAccessControlContext; + JVM_GetInterfaceVersion; + JVM_GetLastErrorString; + JVM_GetManagement; + JVM_GetMethodAnnotations; + JVM_GetMethodDefaultAnnotationValue; + JVM_GetMethodIxArgsSize; + JVM_GetMethodIxByteCode; + JVM_GetMethodIxByteCodeLength; + JVM_GetMethodIxExceptionIndexes; + JVM_GetMethodIxExceptionTableEntry; + JVM_GetMethodIxExceptionTableLength; + JVM_GetMethodIxExceptionsCount; + JVM_GetMethodIxLocalsCount; + JVM_GetMethodIxMaxStack; + JVM_GetMethodIxModifiers; + JVM_GetMethodIxNameUTF; + JVM_GetMethodIxSignatureUTF; + JVM_GetMethodParameterAnnotations; + JVM_GetMethodParameters; + JVM_GetMethodTypeAnnotations; + JVM_GetPrimitiveArrayElement; + JVM_GetProtectionDomain; + JVM_GetSockName; + JVM_GetSockOpt; + JVM_GetStackAccessControlContext; + JVM_GetStackTraceDepth; + JVM_GetStackTraceElement; + JVM_GetSystemPackage; + JVM_GetSystemPackages; + JVM_GetThreadStateNames; + JVM_GetThreadStateValues; + JVM_GetVersionInfo; + JVM_Halt; + JVM_HoldsLock; + JVM_IHashCode; + JVM_InitAgentProperties; + JVM_InitProperties; + JVM_InitializeCompiler; + JVM_InitializeSocketLibrary; + JVM_InternString; + JVM_Interrupt; + JVM_InvokeMethod; + JVM_IsArrayClass; + JVM_IsConstructorIx; + JVM_IsInterface; + JVM_IsInterrupted; + JVM_IsNaN; + JVM_IsPrimitiveClass; + JVM_IsSameClassPackage; + JVM_IsSilentCompiler; + JVM_IsSupportedJNIVersion; + JVM_IsThreadAlive; + JVM_IsVMGeneratedMethodIx; + JVM_LatestUserDefinedLoader; + JVM_Listen; + JVM_LoadClass0; + JVM_LoadLibrary; + JVM_Lseek; + JVM_MaxObjectInspectionAge; + JVM_MaxMemory; + JVM_MonitorNotify; + JVM_MonitorNotifyAll; + JVM_MonitorWait; + JVM_NanoTime; + JVM_NativePath; + JVM_NewArray; + JVM_NewInstanceFromConstructor; + JVM_NewMultiArray; + JVM_OnExit; + JVM_Open; + JVM_RaiseSignal; + JVM_RawMonitorCreate; + JVM_RawMonitorDestroy; + JVM_RawMonitorEnter; + JVM_RawMonitorExit; + JVM_Read; + JVM_Recv; + JVM_RecvFrom; + JVM_RegisterSignal; + JVM_ReleaseUTF; + JVM_ResolveClass; + JVM_ResumeThread; + JVM_Send; + JVM_SendTo; + JVM_SetArrayElement; + JVM_SetClassSigners; + JVM_SetLength; + JVM_SetNativeThreadName; + JVM_SetPrimitiveArrayElement; + JVM_SetSockOpt; + JVM_SetThreadPriority; + JVM_Sleep; + JVM_Socket; + JVM_SocketAvailable; + JVM_SocketClose; + JVM_SocketShutdown; + JVM_StartThread; + JVM_StopThread; + JVM_SuspendThread; + JVM_SupportsCX8; + JVM_Sync; + JVM_Timeout; + JVM_TotalMemory; + JVM_TraceInstructions; + JVM_TraceMethodCalls; + JVM_UnloadLibrary; + JVM_Write; + JVM_Yield; + JVM_handle_linux_signal; # miscellaneous functions - _jio_fprintf - _jio_printf - _jio_snprintf - _jio_vfprintf - _jio_vsnprintf + jio_fprintf; + jio_printf; + jio_snprintf; + jio_vfprintf; + jio_vsnprintf; + fork1; + numa_warn; + numa_error; + + # Needed because there is no JVM interface for this. + sysThreadAvailableStackWithSlack; # This is for Forte Analyzer profiling support. - _AsyncGetCallTrace + AsyncGetCallTrace; + + # INSERT VTABLE SYMBOLS HERE - # INSERT VTABLE SYMBOLS HERE + local: + *; +}; diff -r 4c8bda53850f -r b2fee789d23f make/bsd/makefiles/mapfile-vers-product --- a/make/bsd/makefiles/mapfile-vers-product Thu Feb 06 13:08:44 2014 -0800 +++ b/make/bsd/makefiles/mapfile-vers-product Tue Feb 11 11:26:05 2014 -0800 @@ -19,238 +19,250 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # -# Only used for OSX/Darwin builds # Define public interface. - # _JNI - _JNI_CreateJavaVM - _JNI_GetCreatedJavaVMs - _JNI_GetDefaultJavaVMInitArgs + +SUNWprivate_1.1 { + global: + # JNI + JNI_CreateJavaVM; + JNI_GetCreatedJavaVMs; + JNI_GetDefaultJavaVMInitArgs; - # _JVM - _JVM_Accept - _JVM_ActiveProcessorCount - _JVM_AllocateNewArray - _JVM_AllocateNewObject - _JVM_ArrayCopy - _JVM_AssertionStatusDirectives - _JVM_Available - _JVM_Bind - _JVM_ClassDepth - _JVM_ClassLoaderDepth - _JVM_Clone - _JVM_Close - _JVM_CX8Field - _JVM_CompileClass - _JVM_CompileClasses - _JVM_CompilerCommand - _JVM_Connect - _JVM_ConstantPoolGetClassAt - _JVM_ConstantPoolGetClassAtIfLoaded - _JVM_ConstantPoolGetDoubleAt - _JVM_ConstantPoolGetFieldAt - _JVM_ConstantPoolGetFieldAtIfLoaded - _JVM_ConstantPoolGetFloatAt - _JVM_ConstantPoolGetIntAt - _JVM_ConstantPoolGetLongAt - _JVM_ConstantPoolGetMethodAt - _JVM_ConstantPoolGetMethodAtIfLoaded - _JVM_ConstantPoolGetMemberRefInfoAt - _JVM_ConstantPoolGetSize - _JVM_ConstantPoolGetStringAt - _JVM_ConstantPoolGetUTF8At - _JVM_CountStackFrames - _JVM_CurrentClassLoader - _JVM_CurrentLoadedClass - _JVM_CurrentThread - _JVM_CurrentTimeMillis - _JVM_DefineClass - _JVM_DefineClassWithSource - _JVM_DefineClassWithSourceCond - _JVM_DesiredAssertionStatus - _JVM_DisableCompiler - _JVM_DoPrivileged - _JVM_DTraceGetVersion - _JVM_DTraceActivate - _JVM_DTraceIsProbeEnabled - _JVM_DTraceIsSupported - _JVM_DTraceDispose - _JVM_DumpAllStacks - _JVM_DumpThreads - _JVM_EnableCompiler - _JVM_Exit - _JVM_FillInStackTrace - _JVM_FindClassFromClass - _JVM_FindClassFromClassLoader - _JVM_FindClassFromBootLoader - _JVM_FindLibraryEntry - _JVM_FindLoadedClass - _JVM_FindPrimitiveClass - _JVM_FindSignal - _JVM_FreeMemory - _JVM_GC - _JVM_GetAllThreads - _JVM_GetArrayElement - _JVM_GetArrayLength - _JVM_GetCPClassNameUTF - _JVM_GetCPFieldClassNameUTF - _JVM_GetCPFieldModifiers - _JVM_GetCPFieldNameUTF - _JVM_GetCPFieldSignatureUTF - _JVM_GetCPMethodClassNameUTF - _JVM_GetCPMethodModifiers - _JVM_GetCPMethodNameUTF - _JVM_GetCPMethodSignatureUTF - _JVM_GetCallerClass - _JVM_GetClassAccessFlags - _JVM_GetClassAnnotations - _JVM_GetClassCPEntriesCount - _JVM_GetClassCPTypes - _JVM_GetClassConstantPool - _JVM_GetClassContext - _JVM_GetClassDeclaredConstructors - _JVM_GetClassDeclaredFields - _JVM_GetClassDeclaredMethods - _JVM_GetClassFieldsCount - _JVM_GetClassInterfaces - _JVM_GetClassLoader - _JVM_GetClassMethodsCount - _JVM_GetClassModifiers - _JVM_GetClassName - _JVM_GetClassNameUTF - _JVM_GetClassSignature - _JVM_GetClassSigners - _JVM_GetClassTypeAnnotations - _JVM_GetComponentType - _JVM_GetDeclaredClasses - _JVM_GetDeclaringClass - _JVM_GetEnclosingMethodInfo - _JVM_GetFieldAnnotations - _JVM_GetFieldIxModifiers - _JVM_GetFieldTypeAnnotations - _JVM_GetHostName - _JVM_GetInheritedAccessControlContext - _JVM_GetInterfaceVersion - _JVM_GetLastErrorString - _JVM_GetManagement - _JVM_GetMethodAnnotations - _JVM_GetMethodDefaultAnnotationValue - _JVM_GetMethodIxArgsSize - _JVM_GetMethodIxByteCode - _JVM_GetMethodIxByteCodeLength - _JVM_GetMethodIxExceptionIndexes - _JVM_GetMethodIxExceptionTableEntry - _JVM_GetMethodIxExceptionTableLength - _JVM_GetMethodIxExceptionsCount - _JVM_GetMethodIxLocalsCount - _JVM_GetMethodIxMaxStack - _JVM_GetMethodIxModifiers - _JVM_GetMethodIxNameUTF - _JVM_GetMethodIxSignatureUTF - _JVM_GetMethodParameterAnnotations - _JVM_GetMethodParameters - _JVM_GetMethodTypeAnnotations - _JVM_GetPrimitiveArrayElement - _JVM_GetProtectionDomain - _JVM_GetSockName - _JVM_GetSockOpt - _JVM_GetStackAccessControlContext - _JVM_GetStackTraceDepth - _JVM_GetStackTraceElement - _JVM_GetSystemPackage - _JVM_GetSystemPackages - _JVM_GetThreadStateNames - _JVM_GetThreadStateValues - _JVM_GetVersionInfo - _JVM_Halt - _JVM_HoldsLock - _JVM_IHashCode - _JVM_InitAgentProperties - _JVM_InitProperties - _JVM_InitializeCompiler - _JVM_InitializeSocketLibrary - _JVM_InternString - _JVM_Interrupt - _JVM_InvokeMethod - _JVM_IsArrayClass - _JVM_IsConstructorIx - _JVM_IsInterface - _JVM_IsInterrupted - _JVM_IsNaN - _JVM_IsPrimitiveClass - _JVM_IsSameClassPackage - _JVM_IsSilentCompiler - _JVM_IsSupportedJNIVersion - _JVM_IsThreadAlive - _JVM_IsVMGeneratedMethodIx - _JVM_LatestUserDefinedLoader - _JVM_Listen - _JVM_LoadClass0 - _JVM_LoadLibrary - _JVM_Lseek - _JVM_MaxObjectInspectionAge - _JVM_MaxMemory - _JVM_MonitorNotify - _JVM_MonitorNotifyAll - _JVM_MonitorWait - _JVM_NanoTime - _JVM_NativePath - _JVM_NewArray - _JVM_NewInstanceFromConstructor - _JVM_NewMultiArray - _JVM_OnExit - _JVM_Open - _JVM_RaiseSignal - _JVM_RawMonitorCreate - _JVM_RawMonitorDestroy - _JVM_RawMonitorEnter - _JVM_RawMonitorExit - _JVM_Read - _JVM_Recv - _JVM_RecvFrom - _JVM_RegisterSignal - _JVM_ReleaseUTF - _JVM_ResolveClass - _JVM_ResumeThread - _JVM_Send - _JVM_SendTo - _JVM_SetArrayElement - _JVM_SetClassSigners - _JVM_SetLength - _JVM_SetNativeThreadName - _JVM_SetPrimitiveArrayElement - _JVM_SetSockOpt - _JVM_SetThreadPriority - _JVM_Sleep - _JVM_Socket - _JVM_SocketAvailable - _JVM_SocketClose - _JVM_SocketShutdown - _JVM_StartThread - _JVM_StopThread - _JVM_SuspendThread - _JVM_SupportsCX8 - _JVM_Sync - _JVM_Timeout - _JVM_TotalMemory - _JVM_TraceInstructions - _JVM_TraceMethodCalls - _JVM_UnloadLibrary - _JVM_Write - _JVM_Yield - _JVM_handle_bsd_signal + # JVM + JVM_Accept; + JVM_ActiveProcessorCount; + JVM_AllocateNewArray; + JVM_AllocateNewObject; + JVM_ArrayCopy; + JVM_AssertionStatusDirectives; + JVM_Available; + JVM_Bind; + JVM_ClassDepth; + JVM_ClassLoaderDepth; + JVM_Clone; + JVM_Close; + JVM_CX8Field; + JVM_CompileClass; + JVM_CompileClasses; + JVM_CompilerCommand; + JVM_Connect; + JVM_ConstantPoolGetClassAt; + JVM_ConstantPoolGetClassAtIfLoaded; + JVM_ConstantPoolGetDoubleAt; + JVM_ConstantPoolGetFieldAt; + JVM_ConstantPoolGetFieldAtIfLoaded; + JVM_ConstantPoolGetFloatAt; + JVM_ConstantPoolGetIntAt; + JVM_ConstantPoolGetLongAt; + JVM_ConstantPoolGetMethodAt; + JVM_ConstantPoolGetMethodAtIfLoaded; + JVM_ConstantPoolGetMemberRefInfoAt; + JVM_ConstantPoolGetSize; + JVM_ConstantPoolGetStringAt; + JVM_ConstantPoolGetUTF8At; + JVM_CountStackFrames; + JVM_CurrentClassLoader; + JVM_CurrentLoadedClass; + JVM_CurrentThread; + JVM_CurrentTimeMillis; + JVM_DefineClass; + JVM_DefineClassWithSource; + JVM_DefineClassWithSourceCond; + JVM_DesiredAssertionStatus; + JVM_DisableCompiler; + JVM_DoPrivileged; + JVM_DTraceGetVersion; + JVM_DTraceActivate; + JVM_DTraceIsProbeEnabled; + JVM_DTraceIsSupported; + JVM_DTraceDispose; + JVM_DumpAllStacks; + JVM_DumpThreads; + JVM_EnableCompiler; + JVM_Exit; + JVM_FillInStackTrace; + JVM_FindClassFromClass; + JVM_FindClassFromClassLoader; + JVM_FindClassFromBootLoader; + JVM_FindLibraryEntry; + JVM_FindLoadedClass; + JVM_FindPrimitiveClass; + JVM_FindSignal; + JVM_FreeMemory; + JVM_GC; + JVM_GetAllThreads; + JVM_GetArrayElement; + JVM_GetArrayLength; + JVM_GetCPClassNameUTF; + JVM_GetCPFieldClassNameUTF; + JVM_GetCPFieldModifiers; + JVM_GetCPFieldNameUTF; + JVM_GetCPFieldSignatureUTF; + JVM_GetCPMethodClassNameUTF; + JVM_GetCPMethodModifiers; + JVM_GetCPMethodNameUTF; + JVM_GetCPMethodSignatureUTF; + JVM_GetCallerClass; + JVM_GetClassAccessFlags; + JVM_GetClassAnnotations; + JVM_GetClassCPEntriesCount; + JVM_GetClassCPTypes; + JVM_GetClassConstantPool; + JVM_GetClassContext; + JVM_GetClassDeclaredConstructors; + JVM_GetClassDeclaredFields; + JVM_GetClassDeclaredMethods; + JVM_GetClassFieldsCount; + JVM_GetClassInterfaces; + JVM_GetClassLoader; + JVM_GetClassMethodsCount; + JVM_GetClassModifiers; + JVM_GetClassName; + JVM_GetClassNameUTF; + JVM_GetClassSignature; + JVM_GetClassSigners; + JVM_GetClassTypeAnnotations; + JVM_GetComponentType; + JVM_GetDeclaredClasses; + JVM_GetDeclaringClass; + JVM_GetEnclosingMethodInfo; + JVM_GetFieldAnnotations; + JVM_GetFieldIxModifiers; + JVM_GetFieldTypeAnnotations; + JVM_GetHostName; + JVM_GetInheritedAccessControlContext; + JVM_GetInterfaceVersion; + JVM_GetLastErrorString; + JVM_GetManagement; + JVM_GetMethodAnnotations; + JVM_GetMethodDefaultAnnotationValue; + JVM_GetMethodIxArgsSize; + JVM_GetMethodIxByteCode; + JVM_GetMethodIxByteCodeLength; + JVM_GetMethodIxExceptionIndexes; + JVM_GetMethodIxExceptionTableEntry; + JVM_GetMethodIxExceptionTableLength; + JVM_GetMethodIxExceptionsCount; + JVM_GetMethodIxLocalsCount; + JVM_GetMethodIxMaxStack; + JVM_GetMethodIxModifiers; + JVM_GetMethodIxNameUTF; + JVM_GetMethodIxSignatureUTF; + JVM_GetMethodParameterAnnotations; + JVM_GetMethodParameters; + JVM_GetMethodTypeAnnotations; + JVM_GetPrimitiveArrayElement; + JVM_GetProtectionDomain; + JVM_GetSockName; + JVM_GetSockOpt; + JVM_GetStackAccessControlContext; + JVM_GetStackTraceDepth; + JVM_GetStackTraceElement; + JVM_GetSystemPackage; + JVM_GetSystemPackages; + JVM_GetThreadStateNames; + JVM_GetThreadStateValues; + JVM_GetVersionInfo; + JVM_Halt; + JVM_HoldsLock; + JVM_IHashCode; + JVM_InitAgentProperties; + JVM_InitProperties; + JVM_InitializeCompiler; + JVM_InitializeSocketLibrary; + JVM_InternString; + JVM_Interrupt; + JVM_InvokeMethod; + JVM_IsArrayClass; + JVM_IsConstructorIx; + JVM_IsInterface; + JVM_IsInterrupted; + JVM_IsNaN; + JVM_IsPrimitiveClass; + JVM_IsSameClassPackage; + JVM_IsSilentCompiler; + JVM_IsSupportedJNIVersion; + JVM_IsThreadAlive; + JVM_IsVMGeneratedMethodIx; + JVM_LatestUserDefinedLoader; + JVM_Listen; + JVM_LoadClass0; + JVM_LoadLibrary; + JVM_Lseek; + JVM_MaxObjectInspectionAge; + JVM_MaxMemory; + JVM_MonitorNotify; + JVM_MonitorNotifyAll; + JVM_MonitorWait; + JVM_NanoTime; + JVM_NativePath; + JVM_NewArray; + JVM_NewInstanceFromConstructor; + JVM_NewMultiArray; + JVM_OnExit; + JVM_Open; + JVM_RaiseSignal; + JVM_RawMonitorCreate; + JVM_RawMonitorDestroy; + JVM_RawMonitorEnter; + JVM_RawMonitorExit; + JVM_Read; + JVM_Recv; + JVM_RecvFrom; + JVM_RegisterSignal; + JVM_ReleaseUTF; + JVM_ResolveClass; + JVM_ResumeThread; + JVM_Send; + JVM_SendTo; + JVM_SetArrayElement; + JVM_SetClassSigners; + JVM_SetLength; + JVM_SetNativeThreadName; + JVM_SetPrimitiveArrayElement; + JVM_SetSockOpt; + JVM_SetThreadPriority; + JVM_Sleep; + JVM_Socket; + JVM_SocketAvailable; + JVM_SocketClose; + JVM_SocketShutdown; + JVM_StartThread; + JVM_StopThread; + JVM_SuspendThread; + JVM_SupportsCX8; + JVM_Sync; + JVM_Timeout; + JVM_TotalMemory; + JVM_TraceInstructions; + JVM_TraceMethodCalls; + JVM_UnloadLibrary; + JVM_Write; + JVM_Yield; + JVM_handle_linux_signal; # miscellaneous functions - _jio_fprintf - _jio_printf - _jio_snprintf - _jio_vfprintf - _jio_vsnprintf + jio_fprintf; + jio_printf; + jio_snprintf; + jio_vfprintf; + jio_vsnprintf; + fork1; + numa_warn; + numa_error; + + # Needed because there is no JVM interface for this. + sysThreadAvailableStackWithSlack; # This is for Forte Analyzer profiling support. - _AsyncGetCallTrace + AsyncGetCallTrace; + + # INSERT VTABLE SYMBOLS HERE - # INSERT VTABLE SYMBOLS HERE + local: + *; +}; diff -r 4c8bda53850f -r b2fee789d23f make/bsd/makefiles/optimized.make --- a/make/bsd/makefiles/optimized.make Thu Feb 06 13:08:44 2014 -0800 +++ b/make/bsd/makefiles/optimized.make Tue Feb 11 11:26:05 2014 -0800 @@ -39,5 +39,8 @@ # Linker mapfile MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug +ifeq ($(OS_VENDOR), Darwin) +MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-darwin-debug +endif VERSION = optimized diff -r 4c8bda53850f -r b2fee789d23f make/bsd/makefiles/product.make --- a/make/bsd/makefiles/product.make Thu Feb 06 13:08:44 2014 -0800 +++ b/make/bsd/makefiles/product.make Tue Feb 11 11:26:05 2014 -0800 @@ -39,6 +39,9 @@ # Linker mapfile MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-product +ifeq ($(OS_VENDOR), Darwin) +MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-darwin-product +endif SYSDEFS += -DPRODUCT VERSION = optimized diff -r 4c8bda53850f -r b2fee789d23f make/linux/makefiles/adjust-mflags.sh --- a/make/linux/makefiles/adjust-mflags.sh Thu Feb 06 13:08:44 2014 -0800 +++ b/make/linux/makefiles/adjust-mflags.sh Tue Feb 11 11:26:05 2014 -0800 @@ -64,7 +64,7 @@ echo "$MFLAGS" \ | sed ' s/^-/ -/ - s/ -\([^ ][^ ]*\)j/ -\1 -j/ + s/ -\([^ I][^ I]*\)j/ -\1 -j/ s/ -j[0-9][0-9]*/ -j/ s/ -j\([^ ]\)/ -j -\1/ s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/ diff -r 4c8bda53850f -r b2fee789d23f make/solaris/makefiles/adjust-mflags.sh --- a/make/solaris/makefiles/adjust-mflags.sh Thu Feb 06 13:08:44 2014 -0800 +++ b/make/solaris/makefiles/adjust-mflags.sh Tue Feb 11 11:26:05 2014 -0800 @@ -64,7 +64,7 @@ echo "$MFLAGS" \ | sed ' s/^-/ -/ - s/ -\([^ ][^ ]*\)j/ -\1 -j/ + s/ -\([^ I][^ I]*\)j/ -\1 -j/ s/ -j[0-9][0-9]*/ -j/ s/ -j\([^ ]\)/ -j -\1/ s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/ diff -r 4c8bda53850f -r b2fee789d23f make/solaris/makefiles/buildtree.make --- a/make/solaris/makefiles/buildtree.make Thu Feb 06 13:08:44 2014 -0800 +++ b/make/solaris/makefiles/buildtree.make Tue Feb 11 11:26:05 2014 -0800 @@ -117,7 +117,7 @@ # For dependencies and recursive makes. BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make dtrace.make BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) @@ -349,6 +349,16 @@ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ ) > $@ +dtrace.make: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ + ) > $@ + FORCE: .PHONY: all FORCE diff -r 4c8bda53850f -r b2fee789d23f make/solaris/makefiles/dtrace.make --- a/make/solaris/makefiles/dtrace.make Thu Feb 06 13:08:44 2014 -0800 +++ b/make/solaris/makefiles/dtrace.make Tue Feb 11 11:26:05 2014 -0800 @@ -36,6 +36,8 @@ else +DtraceOutDir = $(GENERATED)/dtracefiles + JVM_DB = libjvm_db LIBJVM_DB = libjvm_db.so @@ -326,6 +328,22 @@ $(QUIETLY) if [ -f $(GENOFFS) ]; then touch $(GENOFFS); fi $(QUIETLY) if [ -f $(JVMOFFS.o) ]; then touch $(JVMOFFS.o); fi + +$(DtraceOutDir): + mkdir $(DtraceOutDir) + +$(DtraceOutDir)/hotspot.h: $(DTRACE_SRCDIR)/hotspot.d | $(DtraceOutDir) + $(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_SRCDIR)/hotspot.d + +$(DtraceOutDir)/hotspot_jni.h: $(DTRACE_SRCDIR)/hotspot_jni.d | $(DtraceOutDir) + $(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_SRCDIR)/hotspot_jni.d + +$(DtraceOutDir)/hs_private.h: $(DTRACE_SRCDIR)/hs_private.d | $(DtraceOutDir) + $(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_SRCDIR)/hs_private.d + +dtrace_gen_headers: $(DtraceOutDir)/hotspot.h $(DtraceOutDir)/hotspot_jni.h $(DtraceOutDir)/hs_private.h + + .PHONY: dtraceCheck SYSTEM_DTRACE_H = /usr/include/dtrace.h diff -r 4c8bda53850f -r b2fee789d23f make/solaris/makefiles/top.make --- a/make/solaris/makefiles/top.make Thu Feb 06 13:08:44 2014 -0800 +++ b/make/solaris/makefiles/top.make Tue Feb 11 11:26:05 2014 -0800 @@ -73,7 +73,7 @@ @echo All done. # This is an explicit dependency for the sake of parallel makes. -vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff +vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff dtrace_stuff @# We need a null action here, so implicit rules don't get consulted. $(Cached_plat): $(Plat_File) @@ -95,6 +95,9 @@ sa_stuff: @$(MAKE) -f sa.make $(MFLAGS-adjusted) +dtrace_stuff: $(Cached_plat) $(adjust-mflags) + @$(MAKE) -f dtrace.make dtrace_gen_headers $(MFLAGS-adjusted) GENERATED=$(GENERATED) + # and the VM: must use other makefile with dependencies included # We have to go to great lengths to get control over the -jN argument diff -r 4c8bda53850f -r b2fee789d23f src/cpu/sparc/vm/assembler_sparc.hpp --- a/src/cpu/sparc/vm/assembler_sparc.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/sparc/vm/assembler_sparc.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -88,6 +88,7 @@ orncc_op3 = 0x16, xnorcc_op3 = 0x17, addccc_op3 = 0x18, + aes4_op3 = 0x19, umulcc_op3 = 0x1a, smulcc_op3 = 0x1b, subccc_op3 = 0x1c, @@ -121,6 +122,8 @@ fpop1_op3 = 0x34, fpop2_op3 = 0x35, impdep1_op3 = 0x36, + aes3_op3 = 0x36, + flog3_op3 = 0x36, impdep2_op3 = 0x37, jmpl_op3 = 0x38, rett_op3 = 0x39, @@ -172,41 +175,56 @@ enum opfs { // selected opfs - fmovs_opf = 0x01, - fmovd_opf = 0x02, + fmovs_opf = 0x01, + fmovd_opf = 0x02, - fnegs_opf = 0x05, - fnegd_opf = 0x06, + fnegs_opf = 0x05, + fnegd_opf = 0x06, - fadds_opf = 0x41, - faddd_opf = 0x42, - fsubs_opf = 0x45, - fsubd_opf = 0x46, + fadds_opf = 0x41, + faddd_opf = 0x42, + fsubs_opf = 0x45, + fsubd_opf = 0x46, - fmuls_opf = 0x49, - fmuld_opf = 0x4a, - fdivs_opf = 0x4d, - fdivd_opf = 0x4e, + fmuls_opf = 0x49, + fmuld_opf = 0x4a, + fdivs_opf = 0x4d, + fdivd_opf = 0x4e, + + fcmps_opf = 0x51, + fcmpd_opf = 0x52, - fcmps_opf = 0x51, - fcmpd_opf = 0x52, + fstox_opf = 0x81, + fdtox_opf = 0x82, + fxtos_opf = 0x84, + fxtod_opf = 0x88, + fitos_opf = 0xc4, + fdtos_opf = 0xc6, + fitod_opf = 0xc8, + fstod_opf = 0xc9, + fstoi_opf = 0xd1, + fdtoi_opf = 0xd2, - fstox_opf = 0x81, - fdtox_opf = 0x82, - fxtos_opf = 0x84, - fxtod_opf = 0x88, - fitos_opf = 0xc4, - fdtos_opf = 0xc6, - fitod_opf = 0xc8, - fstod_opf = 0xc9, - fstoi_opf = 0xd1, - fdtoi_opf = 0xd2, + mdtox_opf = 0x110, + mstouw_opf = 0x111, + mstosw_opf = 0x113, + mxtod_opf = 0x118, + mwtos_opf = 0x119, + + aes_kexpand0_opf = 0x130, + aes_kexpand2_opf = 0x131 + }; - mdtox_opf = 0x110, - mstouw_opf = 0x111, - mstosw_opf = 0x113, - mxtod_opf = 0x118, - mwtos_opf = 0x119 + enum op5s { + aes_eround01_op5 = 0x00, + aes_eround23_op5 = 0x01, + aes_dround01_op5 = 0x02, + aes_dround23_op5 = 0x03, + aes_eround01_l_op5 = 0x04, + aes_eround23_l_op5 = 0x05, + aes_dround01_l_op5 = 0x06, + aes_dround23_l_op5 = 0x07, + aes_kexpand1_op5 = 0x08 }; enum RCondition { rc_z = 1, rc_lez = 2, rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7, rc_last = rc_gez }; @@ -427,6 +445,7 @@ static int immed( bool i) { return u_field(i ? 1 : 0, 13, 13); } static int opf_low6( int w) { return u_field(w, 10, 5); } static int opf_low5( int w) { return u_field(w, 9, 5); } + static int op5( int x) { return u_field(x, 8, 5); } static int trapcc( CC cc) { return u_field(cc, 12, 11); } static int sx( int i) { return u_field(i, 12, 12); } // shift x=1 means 64-bit static int opf( int x) { return u_field(x, 13, 5); } @@ -451,6 +470,7 @@ static int fd( FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 29, 25); }; static int fs1(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 18, 14); }; static int fs2(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 4, 0); }; + static int fs3(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 13, 9); }; // some float instructions use this encoding on the op3 field static int alt_op3(int op, FloatRegisterImpl::Width w) { @@ -559,6 +579,12 @@ return x & ((1 << 10) - 1); } + // AES crypto instructions supported only on certain processors + static void aes_only() { assert( VM_Version::has_aes(), "This instruction only works on SPARC with AES instructions support"); } + + // instruction only in VIS1 + static void vis1_only() { assert( VM_Version::has_vis1(), "This instruction only works on SPARC with VIS1"); } + // instruction only in VIS3 static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); } @@ -682,6 +708,24 @@ void addccc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } + // 4-operand AES instructions + + void aes_eround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_op5) | fs2(s2, FloatRegisterImpl::D) ); } + void aes_eround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_op5) | fs2(s2, FloatRegisterImpl::D) ); } + void aes_dround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_op5) | fs2(s2, FloatRegisterImpl::D) ); } + void aes_dround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_op5) | fs2(s2, FloatRegisterImpl::D) ); } + void aes_eround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); } + void aes_eround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); } + void aes_dround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); } + void aes_dround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); } + void aes_kexpand1( FloatRegister s1, FloatRegister s2, int imm5a, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | u_field(imm5a, 13, 9) | op5(aes_kexpand1_op5) | fs2(s2, FloatRegisterImpl::D) ); } + + + // 3-operand AES instructions + + void aes_kexpand0( FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand0_opf) | fs2(s2, FloatRegisterImpl::D) ); } + void aes_kexpand2( FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand2_opf) | fs2(s2, FloatRegisterImpl::D) ); } + // pp 136 inline void bpr(RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none); @@ -784,6 +828,10 @@ void fmul( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | fs1(s1, sw) | opf(0x60 + sw + dw*4) | fs2(s2, sw)); } void fdiv( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x4c + w) | fs2(s2, w)); } + // FXORs/FXORd instructions + + void fxor( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(flog3_op3) | fs1(s1, w) | opf(0x6E - w) | fs2(s2, w)); } + // pp 164 void fsqrt( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x28 + w) | fs2(s, w)); } diff -r 4c8bda53850f -r b2fee789d23f src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -1315,7 +1315,7 @@ } Address LIR_Assembler::as_Address(LIR_Address* addr) { - Register reg = addr->base()->as_register(); + Register reg = addr->base()->as_pointer_register(); LIR_Opr index = addr->index(); if (index->is_illegal()) { return Address(reg, addr->disp()); @@ -3101,7 +3101,145 @@ } void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { - fatal("Type profiling not implemented on this platform"); + Register obj = op->obj()->as_register(); + Register tmp1 = op->tmp()->as_pointer_register(); + Register tmp2 = G1; + Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); + ciKlass* exact_klass = op->exact_klass(); + intptr_t current_klass = op->current_klass(); + bool not_null = op->not_null(); + bool no_conflict = op->no_conflict(); + + Label update, next, none; + + bool do_null = !not_null; + bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; + bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; + + assert(do_null || do_update, "why are we here?"); + assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); + + __ verify_oop(obj); + + if (tmp1 != obj) { + __ mov(obj, tmp1); + } + if (do_null) { + __ br_notnull_short(tmp1, Assembler::pt, update); + if (!TypeEntries::was_null_seen(current_klass)) { + __ ld_ptr(mdo_addr, tmp1); + __ or3(tmp1, TypeEntries::null_seen, tmp1); + __ st_ptr(tmp1, mdo_addr); + } + if (do_update) { + __ ba(next); + __ delayed()->nop(); + } +#ifdef ASSERT + } else { + __ br_notnull_short(tmp1, Assembler::pt, update); + __ stop("unexpect null obj"); +#endif + } + + __ bind(update); + + if (do_update) { +#ifdef ASSERT + if (exact_klass != NULL) { + Label ok; + __ load_klass(tmp1, tmp1); + metadata2reg(exact_klass->constant_encoding(), tmp2); + __ cmp_and_br_short(tmp1, tmp2, Assembler::equal, Assembler::pt, ok); + __ stop("exact klass and actual klass differ"); + __ bind(ok); + } +#endif + + Label do_update; + __ ld_ptr(mdo_addr, tmp2); + + if (!no_conflict) { + if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { + if (exact_klass != NULL) { + metadata2reg(exact_klass->constant_encoding(), tmp1); + } else { + __ load_klass(tmp1, tmp1); + } + + __ xor3(tmp1, tmp2, tmp1); + __ btst(TypeEntries::type_klass_mask, tmp1); + // klass seen before, nothing to do. The unknown bit may have been + // set already but no need to check. + __ brx(Assembler::zero, false, Assembler::pt, next); + __ delayed()-> + + btst(TypeEntries::type_unknown, tmp1); + // already unknown. Nothing to do anymore. + __ brx(Assembler::notZero, false, Assembler::pt, next); + + if (TypeEntries::is_type_none(current_klass)) { + __ delayed()->btst(TypeEntries::type_mask, tmp2); + __ brx(Assembler::zero, true, Assembler::pt, do_update); + // first time here. Set profile type. + __ delayed()->or3(tmp2, tmp1, tmp2); + } else { + __ delayed()->nop(); + } + } else { + assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && + ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); + + __ btst(TypeEntries::type_unknown, tmp2); + // already unknown. Nothing to do anymore. + __ brx(Assembler::notZero, false, Assembler::pt, next); + __ delayed()->nop(); + } + + // different than before. Cannot keep accurate profile. + __ or3(tmp2, TypeEntries::type_unknown, tmp2); + } else { + // There's a single possible klass at this profile point + assert(exact_klass != NULL, "should be"); + if (TypeEntries::is_type_none(current_klass)) { + metadata2reg(exact_klass->constant_encoding(), tmp1); + __ xor3(tmp1, tmp2, tmp1); + __ btst(TypeEntries::type_klass_mask, tmp1); + __ brx(Assembler::zero, false, Assembler::pt, next); +#ifdef ASSERT + + { + Label ok; + __ delayed()->btst(TypeEntries::type_mask, tmp2); + __ brx(Assembler::zero, true, Assembler::pt, ok); + __ delayed()->nop(); + + __ stop("unexpected profiling mismatch"); + __ bind(ok); + } + // first time here. Set profile type. + __ or3(tmp2, tmp1, tmp2); +#else + // first time here. Set profile type. + __ delayed()->or3(tmp2, tmp1, tmp2); +#endif + + } else { + assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && + ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); + + // already unknown. Nothing to do anymore. + __ btst(TypeEntries::type_unknown, tmp2); + __ brx(Assembler::notZero, false, Assembler::pt, next); + __ delayed()->or3(tmp2, TypeEntries::type_unknown, tmp2); + } + } + + __ bind(do_update); + __ st_ptr(tmp2, mdo_addr); + + __ bind(next); + } } void LIR_Assembler::align_backward_branch_target() { @@ -3321,9 +3459,14 @@ void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { LIR_Address* addr = addr_opr->as_address_ptr(); - assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet"); - - __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register()); + assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1, "can't handle complex addresses yet"); + + if (Assembler::is_simm13(addr->disp())) { + __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register()); + } else { + __ set(addr->disp(), G3_scratch); + __ add(addr->base()->as_pointer_register(), G3_scratch, dest->as_pointer_register()); + } } diff -r 4c8bda53850f -r b2fee789d23f src/cpu/sparc/vm/interp_masm_sparc.cpp --- a/src/cpu/sparc/vm/interp_masm_sparc.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -1892,6 +1892,220 @@ } } +void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr, Register tmp) { + Label not_null, do_nothing, do_update; + + assert_different_registers(obj, mdo_addr.base(), tmp); + + verify_oop(obj); + + ld_ptr(mdo_addr, tmp); + + br_notnull_short(obj, pt, not_null); + or3(tmp, TypeEntries::null_seen, tmp); + ba_short(do_update); + + bind(not_null); + load_klass(obj, obj); + + xor3(obj, tmp, obj); + btst(TypeEntries::type_klass_mask, obj); + // klass seen before, nothing to do. The unknown bit may have been + // set already but no need to check. + brx(zero, false, pt, do_nothing); + delayed()-> + + btst(TypeEntries::type_unknown, obj); + // already unknown. Nothing to do anymore. + brx(notZero, false, pt, do_nothing); + delayed()-> + + btst(TypeEntries::type_mask, tmp); + brx(zero, true, pt, do_update); + // first time here. Set profile type. + delayed()->or3(tmp, obj, tmp); + + // different than before. Cannot keep accurate profile. + or3(tmp, TypeEntries::type_unknown, tmp); + + bind(do_update); + // update profile + st_ptr(tmp, mdo_addr); + + bind(do_nothing); +} + +void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) { + if (!ProfileInterpreter) { + return; + } + + assert_different_registers(callee, tmp1, tmp2, ImethodDataPtr); + + if (MethodData::profile_arguments() || MethodData::profile_return()) { + Label profile_continue; + + test_method_data_pointer(profile_continue); + + int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); + + ldub(ImethodDataPtr, in_bytes(DataLayout::tag_offset()) - off_to_start, tmp1); + cmp_and_br_short(tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag, notEqual, pn, profile_continue); + + if (MethodData::profile_arguments()) { + Label done; + int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); + add(ImethodDataPtr, off_to_args, ImethodDataPtr); + + for (int i = 0; i < TypeProfileArgsLimit; i++) { + if (i > 0 || MethodData::profile_return()) { + // If return value type is profiled we may have no argument to profile + ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1); + sub(tmp1, i*TypeStackSlotEntries::per_arg_count(), tmp1); + cmp_and_br_short(tmp1, TypeStackSlotEntries::per_arg_count(), less, pn, done); + } + ld_ptr(Address(callee, Method::const_offset()), tmp1); + lduh(Address(tmp1, ConstMethod::size_of_parameters_offset()), tmp1); + // stack offset o (zero based) from the start of the argument + // list, for n arguments translates into offset n - o - 1 from + // the end of the argument list. But there's an extra slot at + // the stop of the stack. So the offset is n - o from Lesp. + ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, tmp2); + sub(tmp1, tmp2, tmp1); + + // Can't use MacroAssembler::argument_address() which needs Gargs to be set up + sll(tmp1, Interpreter::logStackElementSize, tmp1); + ld_ptr(Lesp, tmp1, tmp1); + + Address mdo_arg_addr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args); + profile_obj_type(tmp1, mdo_arg_addr, tmp2); + + int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); + add(ImethodDataPtr, to_add, ImethodDataPtr); + off_to_args += to_add; + } + + if (MethodData::profile_return()) { + ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1); + sub(tmp1, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count(), tmp1); + } + + bind(done); + + if (MethodData::profile_return()) { + // We're right after the type profile for the last + // argument. tmp1 is the number of cells left in the + // CallTypeData/VirtualCallTypeData to reach its end. Non null + // if there's a return to profile. + assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); + sll(tmp1, exact_log2(DataLayout::cell_size), tmp1); + add(ImethodDataPtr, tmp1, ImethodDataPtr); + } + } else { + assert(MethodData::profile_return(), "either profile call args or call ret"); + update_mdp_by_constant(in_bytes(ReturnTypeEntry::size())); + } + + // mdp points right after the end of the + // CallTypeData/VirtualCallTypeData, right after the cells for the + // return value type if there's one. + + bind(profile_continue); + } +} + +void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) { + assert_different_registers(ret, tmp1, tmp2); + if (ProfileInterpreter && MethodData::profile_return()) { + Label profile_continue, done; + + test_method_data_pointer(profile_continue); + + if (MethodData::profile_return_jsr292_only()) { + // If we don't profile all invoke bytecodes we must make sure + // it's a bytecode we indeed profile. We can't go back to the + // begining of the ProfileData we intend to update to check its + // type because we're right after it and we don't known its + // length. + Label do_profile; + ldub(Lbcp, 0, tmp1); + cmp_and_br_short(tmp1, Bytecodes::_invokedynamic, equal, pn, do_profile); + cmp(tmp1, Bytecodes::_invokehandle); + br(equal, false, pn, do_profile); + delayed()->ldub(Lmethod, Method::intrinsic_id_offset_in_bytes(), tmp1); + cmp_and_br_short(tmp1, vmIntrinsics::_compiledLambdaForm, notEqual, pt, profile_continue); + + bind(do_profile); + } + + Address mdo_ret_addr(ImethodDataPtr, -in_bytes(ReturnTypeEntry::size())); + mov(ret, tmp1); + profile_obj_type(tmp1, mdo_ret_addr, tmp2); + + bind(profile_continue); + } +} + +void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) { + if (ProfileInterpreter && MethodData::profile_parameters()) { + Label profile_continue, done; + + test_method_data_pointer(profile_continue); + + // Load the offset of the area within the MDO used for + // parameters. If it's negative we're not profiling any parameters. + lduw(ImethodDataPtr, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), tmp1); + cmp_and_br_short(tmp1, 0, less, pn, profile_continue); + + // Compute a pointer to the area for parameters from the offset + // and move the pointer to the slot for the last + // parameters. Collect profiling from last parameter down. + // mdo start + parameters offset + array length - 1 + + // Pointer to the parameter area in the MDO + Register mdp = tmp1; + add(ImethodDataPtr, tmp1, mdp); + + // offset of the current profile entry to update + Register entry_offset = tmp2; + // entry_offset = array len in number of cells + ld_ptr(mdp, ArrayData::array_len_offset(), entry_offset); + + int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); + assert(off_base % DataLayout::cell_size == 0, "should be a number of cells"); + + // entry_offset (number of cells) = array len - size of 1 entry + offset of the stack slot field + sub(entry_offset, TypeStackSlotEntries::per_arg_count() - (off_base / DataLayout::cell_size), entry_offset); + // entry_offset in bytes + sll(entry_offset, exact_log2(DataLayout::cell_size), entry_offset); + + Label loop; + bind(loop); + + // load offset on the stack from the slot for this parameter + ld_ptr(mdp, entry_offset, tmp3); + sll(tmp3,Interpreter::logStackElementSize, tmp3); + neg(tmp3); + // read the parameter from the local area + ld_ptr(Llocals, tmp3, tmp3); + + // make entry_offset now point to the type field for this parameter + int type_base = in_bytes(ParametersTypeData::type_offset(0)); + assert(type_base > off_base, "unexpected"); + add(entry_offset, type_base - off_base, entry_offset); + + // profile the parameter + Address arg_type(mdp, entry_offset); + profile_obj_type(tmp3, arg_type, tmp4); + + // go to next parameter + sub(entry_offset, TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base), entry_offset); + cmp_and_br_short(entry_offset, off_base, greaterEqual, pt, loop); + + bind(profile_continue); + } +} + // add a InterpMonitorElem to stack (see frame_sparc.hpp) void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty, diff -r 4c8bda53850f -r b2fee789d23f src/cpu/sparc/vm/interp_masm_sparc.hpp --- a/src/cpu/sparc/vm/interp_masm_sparc.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/sparc/vm/interp_masm_sparc.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -323,6 +323,11 @@ Register scratch2, Register scratch3); + void profile_obj_type(Register obj, const Address& mdo_addr, Register tmp); + void profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual); + void profile_return_type(Register ret, Register tmp1, Register tmp2); + void profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4); + // Debugging void interp_verify_oop(Register reg, TosState state, const char * file, int line); // only if +VerifyOops && state == atos void verify_oop_or_return_address(Register reg, Register rtmp); // for astore diff -r 4c8bda53850f -r b2fee789d23f src/cpu/sparc/vm/sparc.ad --- a/src/cpu/sparc/vm/sparc.ad Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/sparc/vm/sparc.ad Tue Feb 11 11:26:05 2014 -0800 @@ -1848,6 +1848,12 @@ return false; } +// Current (2013) SPARC platforms need to read original key +// to construct decryption expanded key +const bool Matcher::pass_original_key_for_aes() { + return true; +} + // USII supports fxtof through the whole range of number, USIII doesn't const bool Matcher::convL2FSupported(void) { return VM_Version::has_fast_fxtof(); @@ -3355,8 +3361,8 @@ interface(CONST_INTER); %} -// Unsigned (positive) Integer Immediate: 13-bit -operand immU13() %{ +// Unsigned Integer Immediate: 12-bit (non-negative that fits in simm13) +operand immU12() %{ predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int())); match(ConI); op_cost(0); @@ -3392,6 +3398,17 @@ interface(CONST_INTER); %} +// Int Immediate non-negative +operand immU31() +%{ + predicate(n->get_int() >= 0); + match(ConI); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + // Integer Immediate: 0-bit operand immI0() %{ predicate(n->get_int() == 0); @@ -5720,7 +5737,6 @@ effect(TEMP dst, TEMP tmp); ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); - size((3+1)*4); // set may use two instructions. format %{ "LDUH $mem,$dst\t! ushort/char & 16-bit mask -> long\n\t" "SET $mask,$tmp\n\t" "AND $dst,$tmp,$dst" %} @@ -5842,13 +5858,13 @@ ins_pipe(iload_mem); %} -// Load Integer with a 13-bit mask into a Long Register -instruct loadI2L_immI13(iRegL dst, memory mem, immI13 mask) %{ +// Load Integer with a 12-bit mask into a Long Register +instruct loadI2L_immU12(iRegL dst, memory mem, immU12 mask) %{ match(Set dst (ConvI2L (AndI (LoadI mem) mask))); ins_cost(MEMORY_REF_COST + DEFAULT_COST); size(2*4); - format %{ "LDUW $mem,$dst\t! int & 13-bit mask -> long\n\t" + format %{ "LDUW $mem,$dst\t! int & 12-bit mask -> long\n\t" "AND $dst,$mask,$dst" %} ins_encode %{ Register Rdst = $dst$$Register; @@ -5858,14 +5874,13 @@ ins_pipe(iload_mem); %} -// Load Integer with a 32-bit mask into a Long Register -instruct loadI2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{ +// Load Integer with a 31-bit mask into a Long Register +instruct loadI2L_immU31(iRegL dst, memory mem, immU31 mask, iRegL tmp) %{ match(Set dst (ConvI2L (AndI (LoadI mem) mask))); effect(TEMP dst, TEMP tmp); ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); - size((3+1)*4); // set may use two instructions. - format %{ "LDUW $mem,$dst\t! int & 32-bit mask -> long\n\t" + format %{ "LDUW $mem,$dst\t! int & 31-bit mask -> long\n\t" "SET $mask,$tmp\n\t" "AND $dst,$tmp,$dst" %} ins_encode %{ @@ -8960,7 +8975,7 @@ ins_pipe(ialu_cconly_reg_reg); %} -instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU13 op2 ) %{ +instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU12 op2 ) %{ match(Set icc (CmpU op1 op2)); size(4); diff -r 4c8bda53850f -r b2fee789d23f src/cpu/sparc/vm/stubGenerator_sparc.cpp --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -3304,6 +3304,775 @@ } } + address generate_aescrypt_encryptBlock() { + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", "aesencryptBlock"); + Label L_doLast128bit, L_storeOutput; + address start = __ pc(); + Register from = O0; // source byte array + Register to = O1; // destination byte array + Register key = O2; // expanded key array + const Register keylen = O4; //reg for storing expanded key array length + + // read expanded key length + __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0); + + // load input into F54-F56; F30-F31 used as temp + __ ldf(FloatRegisterImpl::S, from, 0, F30); + __ ldf(FloatRegisterImpl::S, from, 4, F31); + __ fmov(FloatRegisterImpl::D, F30, F54); + __ ldf(FloatRegisterImpl::S, from, 8, F30); + __ ldf(FloatRegisterImpl::S, from, 12, F31); + __ fmov(FloatRegisterImpl::D, F30, F56); + + // load expanded key + for ( int i = 0; i <= 38; i += 2 ) { + __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i)); + } + + // perform cipher transformation + __ fxor(FloatRegisterImpl::D, F0, F54, F54); + __ fxor(FloatRegisterImpl::D, F2, F56, F56); + // rounds 1 through 8 + for ( int i = 4; i <= 28; i += 8 ) { + __ aes_eround01(as_FloatRegister(i), F54, F56, F58); + __ aes_eround23(as_FloatRegister(i+2), F54, F56, F60); + __ aes_eround01(as_FloatRegister(i+4), F58, F60, F54); + __ aes_eround23(as_FloatRegister(i+6), F58, F60, F56); + } + __ aes_eround01(F36, F54, F56, F58); //round 9 + __ aes_eround23(F38, F54, F56, F60); + + // 128-bit original key size + __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_doLast128bit); + + for ( int i = 40; i <= 50; i += 2 ) { + __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i) ); + } + __ aes_eround01(F40, F58, F60, F54); //round 10 + __ aes_eround23(F42, F58, F60, F56); + __ aes_eround01(F44, F54, F56, F58); //round 11 + __ aes_eround23(F46, F54, F56, F60); + + // 192-bit original key size + __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_storeOutput); + + __ ldf(FloatRegisterImpl::D, key, 208, F52); + __ aes_eround01(F48, F58, F60, F54); //round 12 + __ aes_eround23(F50, F58, F60, F56); + __ ldf(FloatRegisterImpl::D, key, 216, F46); + __ ldf(FloatRegisterImpl::D, key, 224, F48); + __ ldf(FloatRegisterImpl::D, key, 232, F50); + __ aes_eround01(F52, F54, F56, F58); //round 13 + __ aes_eround23(F46, F54, F56, F60); + __ br(Assembler::always, false, Assembler::pt, L_storeOutput); + __ delayed()->nop(); + + __ BIND(L_doLast128bit); + __ ldf(FloatRegisterImpl::D, key, 160, F48); + __ ldf(FloatRegisterImpl::D, key, 168, F50); + + __ BIND(L_storeOutput); + // perform last round of encryption common for all key sizes + __ aes_eround01_l(F48, F58, F60, F54); //last round + __ aes_eround23_l(F50, F58, F60, F56); + + // store output into the destination array, F0-F1 used as temp + __ fmov(FloatRegisterImpl::D, F54, F0); + __ stf(FloatRegisterImpl::S, F0, to, 0); + __ stf(FloatRegisterImpl::S, F1, to, 4); + __ fmov(FloatRegisterImpl::D, F56, F0); + __ stf(FloatRegisterImpl::S, F0, to, 8); + __ retl(); + __ delayed()->stf(FloatRegisterImpl::S, F1, to, 12); + + return start; + } + + address generate_aescrypt_decryptBlock() { + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", "aesdecryptBlock"); + address start = __ pc(); + Label L_expand192bit, L_expand256bit, L_common_transform; + Register from = O0; // source byte array + Register to = O1; // destination byte array + Register key = O2; // expanded key array + Register original_key = O3; // original key array only required during decryption + const Register keylen = O4; // reg for storing expanded key array length + + // read expanded key array length + __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0); + + // load input into F52-F54; F30,F31 used as temp + __ ldf(FloatRegisterImpl::S, from, 0, F30); + __ ldf(FloatRegisterImpl::S, from, 4, F31); + __ fmov(FloatRegisterImpl::D, F30, F52); + __ ldf(FloatRegisterImpl::S, from, 8, F30); + __ ldf(FloatRegisterImpl::S, from, 12, F31); + __ fmov(FloatRegisterImpl::D, F30, F54); + + // load original key from SunJCE expanded decryption key + for ( int i = 0; i <= 3; i++ ) { + __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i)); + } + + // 256-bit original key size + __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit); + + // 192-bit original key size + __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit); + + // 128-bit original key size + // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions + for ( int i = 0; i <= 36; i += 4 ) { + __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4)); + __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6)); + } + + // perform 128-bit key specific inverse cipher transformation + __ fxor(FloatRegisterImpl::D, F42, F54, F54); + __ fxor(FloatRegisterImpl::D, F40, F52, F52); + __ br(Assembler::always, false, Assembler::pt, L_common_transform); + __ delayed()->nop(); + + __ BIND(L_expand192bit); + + // start loading rest of the 192-bit key + __ ldf(FloatRegisterImpl::S, original_key, 16, F4); + __ ldf(FloatRegisterImpl::S, original_key, 20, F5); + + // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions + for ( int i = 0; i <= 36; i += 6 ) { + __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6)); + __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8)); + __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10)); + } + __ aes_kexpand1(F42, F46, 7, F48); + __ aes_kexpand2(F44, F48, F50); + + // perform 192-bit key specific inverse cipher transformation + __ fxor(FloatRegisterImpl::D, F50, F54, F54); + __ fxor(FloatRegisterImpl::D, F48, F52, F52); + __ aes_dround23(F46, F52, F54, F58); + __ aes_dround01(F44, F52, F54, F56); + __ aes_dround23(F42, F56, F58, F54); + __ aes_dround01(F40, F56, F58, F52); + __ br(Assembler::always, false, Assembler::pt, L_common_transform); + __ delayed()->nop(); + + __ BIND(L_expand256bit); + + // load rest of the 256-bit key + for ( int i = 4; i <= 7; i++ ) { + __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i)); + } + + // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions + for ( int i = 0; i <= 40; i += 8 ) { + __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8)); + __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10)); + __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12)); + __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14)); + } + __ aes_kexpand1(F48, F54, 6, F56); + __ aes_kexpand2(F50, F56, F58); + + for ( int i = 0; i <= 6; i += 2 ) { + __ fmov(FloatRegisterImpl::D, as_FloatRegister(58-i), as_FloatRegister(i)); + } + + // load input into F52-F54 + __ ldf(FloatRegisterImpl::D, from, 0, F52); + __ ldf(FloatRegisterImpl::D, from, 8, F54); + + // perform 256-bit key specific inverse cipher transformation + __ fxor(FloatRegisterImpl::D, F0, F54, F54); + __ fxor(FloatRegisterImpl::D, F2, F52, F52); + __ aes_dround23(F4, F52, F54, F58); + __ aes_dround01(F6, F52, F54, F56); + __ aes_dround23(F50, F56, F58, F54); + __ aes_dround01(F48, F56, F58, F52); + __ aes_dround23(F46, F52, F54, F58); + __ aes_dround01(F44, F52, F54, F56); + __ aes_dround23(F42, F56, F58, F54); + __ aes_dround01(F40, F56, F58, F52); + + for ( int i = 0; i <= 7; i++ ) { + __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i)); + } + + // perform inverse cipher transformations common for all key sizes + __ BIND(L_common_transform); + for ( int i = 38; i >= 6; i -= 8 ) { + __ aes_dround23(as_FloatRegister(i), F52, F54, F58); + __ aes_dround01(as_FloatRegister(i-2), F52, F54, F56); + if ( i != 6) { + __ aes_dround23(as_FloatRegister(i-4), F56, F58, F54); + __ aes_dround01(as_FloatRegister(i-6), F56, F58, F52); + } else { + __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F54); + __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F52); + } + } + + // store output to destination array, F0-F1 used as temp + __ fmov(FloatRegisterImpl::D, F52, F0); + __ stf(FloatRegisterImpl::S, F0, to, 0); + __ stf(FloatRegisterImpl::S, F1, to, 4); + __ fmov(FloatRegisterImpl::D, F54, F0); + __ stf(FloatRegisterImpl::S, F0, to, 8); + __ retl(); + __ delayed()->stf(FloatRegisterImpl::S, F1, to, 12); + + return start; + } + + address generate_cipherBlockChaining_encryptAESCrypt() { + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); + Label L_cbcenc128, L_cbcenc192, L_cbcenc256; + address start = __ pc(); + Register from = O0; // source byte array + Register to = O1; // destination byte array + Register key = O2; // expanded key array + Register rvec = O3; // init vector + const Register len_reg = O4; // cipher length + const Register keylen = O5; // reg for storing expanded key array length + + // save cipher len to return in the end + __ mov(len_reg, L1); + + // read expanded key length + __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0); + + // load init vector + __ ldf(FloatRegisterImpl::D, rvec, 0, F60); + __ ldf(FloatRegisterImpl::D, rvec, 8, F62); + __ ldx(key,0,G1); + __ ldx(key,8,G2); + + // start loading expanded key + for ( int i = 0, j = 16; i <= 38; i += 2, j += 8 ) { + __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i)); + } + + // 128-bit original key size + __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_cbcenc128); + + for ( int i = 40, j = 176; i <= 46; i += 2, j += 8 ) { + __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i)); + } + + // 192-bit original key size + __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_cbcenc192); + + for ( int i = 48, j = 208; i <= 54; i += 2, j += 8 ) { + __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i)); + } + + // 256-bit original key size + __ br(Assembler::always, false, Assembler::pt, L_cbcenc256); + __ delayed()->nop(); + + __ align(OptoLoopAlignment); + __ BIND(L_cbcenc128); + __ ldx(from,0,G3); + __ ldx(from,8,G4); + __ xor3(G1,G3,G3); + __ xor3(G2,G4,G4); + __ movxtod(G3,F56); + __ movxtod(G4,F58); + __ fxor(FloatRegisterImpl::D, F60, F56, F60); + __ fxor(FloatRegisterImpl::D, F62, F58, F62); + + // TEN_EROUNDS + for ( int i = 0; i <= 32; i += 8 ) { + __ aes_eround01(as_FloatRegister(i), F60, F62, F56); + __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58); + if (i != 32 ) { + __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60); + __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62); + } else { + __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60); + __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62); + } + } + + __ stf(FloatRegisterImpl::D, F60, to, 0); + __ stf(FloatRegisterImpl::D, F62, to, 8); + __ add(from, 16, from); + __ add(to, 16, to); + __ subcc(len_reg, 16, len_reg); + __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc128); + __ delayed()->nop(); + __ stf(FloatRegisterImpl::D, F60, rvec, 0); + __ stf(FloatRegisterImpl::D, F62, rvec, 8); + __ retl(); + __ delayed()->mov(L1, O0); + + __ align(OptoLoopAlignment); + __ BIND(L_cbcenc192); + __ ldx(from,0,G3); + __ ldx(from,8,G4); + __ xor3(G1,G3,G3); + __ xor3(G2,G4,G4); + __ movxtod(G3,F56); + __ movxtod(G4,F58); + __ fxor(FloatRegisterImpl::D, F60, F56, F60); + __ fxor(FloatRegisterImpl::D, F62, F58, F62); + + // TWELEVE_EROUNDS + for ( int i = 0; i <= 40; i += 8 ) { + __ aes_eround01(as_FloatRegister(i), F60, F62, F56); + __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58); + if (i != 40 ) { + __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60); + __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62); + } else { + __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60); + __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62); + } + } + + __ stf(FloatRegisterImpl::D, F60, to, 0); + __ stf(FloatRegisterImpl::D, F62, to, 8); + __ add(from, 16, from); + __ subcc(len_reg, 16, len_reg); + __ add(to, 16, to); + __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc192); + __ delayed()->nop(); + __ stf(FloatRegisterImpl::D, F60, rvec, 0); + __ stf(FloatRegisterImpl::D, F62, rvec, 8); + __ retl(); + __ delayed()->mov(L1, O0); + + __ align(OptoLoopAlignment); + __ BIND(L_cbcenc256); + __ ldx(from,0,G3); + __ ldx(from,8,G4); + __ xor3(G1,G3,G3); + __ xor3(G2,G4,G4); + __ movxtod(G3,F56); + __ movxtod(G4,F58); + __ fxor(FloatRegisterImpl::D, F60, F56, F60); + __ fxor(FloatRegisterImpl::D, F62, F58, F62); + + // FOURTEEN_EROUNDS + for ( int i = 0; i <= 48; i += 8 ) { + __ aes_eround01(as_FloatRegister(i), F60, F62, F56); + __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58); + if (i != 48 ) { + __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60); + __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62); + } else { + __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60); + __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62); + } + } + + __ stf(FloatRegisterImpl::D, F60, to, 0); + __ stf(FloatRegisterImpl::D, F62, to, 8); + __ add(from, 16, from); + __ subcc(len_reg, 16, len_reg); + __ add(to, 16, to); + __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc256); + __ delayed()->nop(); + __ stf(FloatRegisterImpl::D, F60, rvec, 0); + __ stf(FloatRegisterImpl::D, F62, rvec, 8); + __ retl(); + __ delayed()->mov(L1, O0); + + return start; + } + + address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); + Label L_cbcdec_end, L_expand192bit, L_expand256bit, L_dec_first_block_start; + Label L_dec_first_block128, L_dec_first_block192, L_dec_next2_blocks128, L_dec_next2_blocks192, L_dec_next2_blocks256; + address start = __ pc(); + Register from = I0; // source byte array + Register to = I1; // destination byte array + Register key = I2; // expanded key array + Register rvec = I3; // init vector + const Register len_reg = I4; // cipher length + const Register original_key = I5; // original key array only required during decryption + const Register keylen = L6; // reg for storing expanded key array length + + // save cipher len before save_frame, to return in the end + __ mov(O4, L0); + __ save_frame(0); //args are read from I* registers since we save the frame in the beginning + + // load original key from SunJCE expanded decryption key + for ( int i = 0; i <= 3; i++ ) { + __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i)); + } + + // load initial vector + __ ldx(rvec,0,L0); + __ ldx(rvec,8,L1); + + // read expanded key array length + __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0); + + // 256-bit original key size + __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit); + + // 192-bit original key size + __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit); + + // 128-bit original key size + // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions + for ( int i = 0; i <= 36; i += 4 ) { + __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4)); + __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6)); + } + + // load expanded key[last-1] and key[last] elements + __ movdtox(F40,L2); + __ movdtox(F42,L3); + + __ and3(len_reg, 16, L4); + __ br_null(L4, false, Assembler::pt, L_dec_next2_blocks128); + __ delayed()->nop(); + + __ br(Assembler::always, false, Assembler::pt, L_dec_first_block_start); + __ delayed()->nop(); + + __ BIND(L_expand192bit); + // load rest of the 192-bit key + __ ldf(FloatRegisterImpl::S, original_key, 16, F4); + __ ldf(FloatRegisterImpl::S, original_key, 20, F5); + + // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions + for ( int i = 0; i <= 36; i += 6 ) { + __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6)); + __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8)); + __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10)); + } + __ aes_kexpand1(F42, F46, 7, F48); + __ aes_kexpand2(F44, F48, F50); + + // load expanded key[last-1] and key[last] elements + __ movdtox(F48,L2); + __ movdtox(F50,L3); + + __ and3(len_reg, 16, L4); + __ br_null(L4, false, Assembler::pt, L_dec_next2_blocks192); + __ delayed()->nop(); + + __ br(Assembler::always, false, Assembler::pt, L_dec_first_block_start); + __ delayed()->nop(); + + __ BIND(L_expand256bit); + // load rest of the 256-bit key + for ( int i = 4; i <= 7; i++ ) { + __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i)); + } + + // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions + for ( int i = 0; i <= 40; i += 8 ) { + __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8)); + __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10)); + __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12)); + __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14)); + } + __ aes_kexpand1(F48, F54, 6, F56); + __ aes_kexpand2(F50, F56, F58); + + // load expanded key[last-1] and key[last] elements + __ movdtox(F56,L2); + __ movdtox(F58,L3); + + __ and3(len_reg, 16, L4); + __ br_null(L4, false, Assembler::pt, L_dec_next2_blocks256); + __ delayed()->nop(); + + __ BIND(L_dec_first_block_start); + __ ldx(from,0,L4); + __ ldx(from,8,L5); + __ xor3(L2,L4,G1); + __ movxtod(G1,F60); + __ xor3(L3,L5,G1); + __ movxtod(G1,F62); + + // 128-bit original key size + __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pn, L_dec_first_block128); + + // 192-bit original key size + __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_first_block192); + + __ aes_dround23(F54, F60, F62, F58); + __ aes_dround01(F52, F60, F62, F56); + __ aes_dround23(F50, F56, F58, F62); + __ aes_dround01(F48, F56, F58, F60); + + __ BIND(L_dec_first_block192); + __ aes_dround23(F46, F60, F62, F58); + __ aes_dround01(F44, F60, F62, F56); + __ aes_dround23(F42, F56, F58, F62); + __ aes_dround01(F40, F56, F58, F60); + + __ BIND(L_dec_first_block128); + for ( int i = 38; i >= 6; i -= 8 ) { + __ aes_dround23(as_FloatRegister(i), F60, F62, F58); + __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56); + if ( i != 6) { + __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62); + __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60); + } else { + __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62); + __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60); + } + } + + __ movxtod(L0,F56); + __ movxtod(L1,F58); + __ mov(L4,L0); + __ mov(L5,L1); + __ fxor(FloatRegisterImpl::D, F56, F60, F60); + __ fxor(FloatRegisterImpl::D, F58, F62, F62); + + __ stf(FloatRegisterImpl::D, F60, to, 0); + __ stf(FloatRegisterImpl::D, F62, to, 8); + + __ add(from, 16, from); + __ add(to, 16, to); + __ subcc(len_reg, 16, len_reg); + __ br(Assembler::equal, false, Assembler::pt, L_cbcdec_end); + __ delayed()->nop(); + + // 256-bit original key size + __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_dec_next2_blocks256); + + // 192-bit original key size + __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_next2_blocks192); + + __ align(OptoLoopAlignment); + __ BIND(L_dec_next2_blocks128); + __ nop(); + + // F40:F42 used for first 16-bytes + __ ldx(from,0,G4); + __ ldx(from,8,G5); + __ xor3(L2,G4,G1); + __ movxtod(G1,F40); + __ xor3(L3,G5,G1); + __ movxtod(G1,F42); + + // F60:F62 used for next 16-bytes + __ ldx(from,16,L4); + __ ldx(from,24,L5); + __ xor3(L2,L4,G1); + __ movxtod(G1,F60); + __ xor3(L3,L5,G1); + __ movxtod(G1,F62); + + for ( int i = 38; i >= 6; i -= 8 ) { + __ aes_dround23(as_FloatRegister(i), F40, F42, F44); + __ aes_dround01(as_FloatRegister(i-2), F40, F42, F46); + __ aes_dround23(as_FloatRegister(i), F60, F62, F58); + __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56); + if (i != 6 ) { + __ aes_dround23(as_FloatRegister(i-4), F46, F44, F42); + __ aes_dround01(as_FloatRegister(i-6), F46, F44, F40); + __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62); + __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60); + } else { + __ aes_dround23_l(as_FloatRegister(i-4), F46, F44, F42); + __ aes_dround01_l(as_FloatRegister(i-6), F46, F44, F40); + __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62); + __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60); + } + } + + __ movxtod(L0,F46); + __ movxtod(L1,F44); + __ fxor(FloatRegisterImpl::D, F46, F40, F40); + __ fxor(FloatRegisterImpl::D, F44, F42, F42); + + __ stf(FloatRegisterImpl::D, F40, to, 0); + __ stf(FloatRegisterImpl::D, F42, to, 8); + + __ movxtod(G4,F56); + __ movxtod(G5,F58); + __ mov(L4,L0); + __ mov(L5,L1); + __ fxor(FloatRegisterImpl::D, F56, F60, F60); + __ fxor(FloatRegisterImpl::D, F58, F62, F62); + + __ stf(FloatRegisterImpl::D, F60, to, 16); + __ stf(FloatRegisterImpl::D, F62, to, 24); + + __ add(from, 32, from); + __ add(to, 32, to); + __ subcc(len_reg, 32, len_reg); + __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks128); + __ delayed()->nop(); + __ br(Assembler::always, false, Assembler::pt, L_cbcdec_end); + __ delayed()->nop(); + + __ align(OptoLoopAlignment); + __ BIND(L_dec_next2_blocks192); + __ nop(); + + // F48:F50 used for first 16-bytes + __ ldx(from,0,G4); + __ ldx(from,8,G5); + __ xor3(L2,G4,G1); + __ movxtod(G1,F48); + __ xor3(L3,G5,G1); + __ movxtod(G1,F50); + + // F60:F62 used for next 16-bytes + __ ldx(from,16,L4); + __ ldx(from,24,L5); + __ xor3(L2,L4,G1); + __ movxtod(G1,F60); + __ xor3(L3,L5,G1); + __ movxtod(G1,F62); + + for ( int i = 46; i >= 6; i -= 8 ) { + __ aes_dround23(as_FloatRegister(i), F48, F50, F52); + __ aes_dround01(as_FloatRegister(i-2), F48, F50, F54); + __ aes_dround23(as_FloatRegister(i), F60, F62, F58); + __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56); + if (i != 6 ) { + __ aes_dround23(as_FloatRegister(i-4), F54, F52, F50); + __ aes_dround01(as_FloatRegister(i-6), F54, F52, F48); + __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62); + __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60); + } else { + __ aes_dround23_l(as_FloatRegister(i-4), F54, F52, F50); + __ aes_dround01_l(as_FloatRegister(i-6), F54, F52, F48); + __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62); + __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60); + } + } + + __ movxtod(L0,F54); + __ movxtod(L1,F52); + __ fxor(FloatRegisterImpl::D, F54, F48, F48); + __ fxor(FloatRegisterImpl::D, F52, F50, F50); + + __ stf(FloatRegisterImpl::D, F48, to, 0); + __ stf(FloatRegisterImpl::D, F50, to, 8); + + __ movxtod(G4,F56); + __ movxtod(G5,F58); + __ mov(L4,L0); + __ mov(L5,L1); + __ fxor(FloatRegisterImpl::D, F56, F60, F60); + __ fxor(FloatRegisterImpl::D, F58, F62, F62); + + __ stf(FloatRegisterImpl::D, F60, to, 16); + __ stf(FloatRegisterImpl::D, F62, to, 24); + + __ add(from, 32, from); + __ add(to, 32, to); + __ subcc(len_reg, 32, len_reg); + __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks192); + __ delayed()->nop(); + __ br(Assembler::always, false, Assembler::pt, L_cbcdec_end); + __ delayed()->nop(); + + __ align(OptoLoopAlignment); + __ BIND(L_dec_next2_blocks256); + __ nop(); + + // F0:F2 used for first 16-bytes + __ ldx(from,0,G4); + __ ldx(from,8,G5); + __ xor3(L2,G4,G1); + __ movxtod(G1,F0); + __ xor3(L3,G5,G1); + __ movxtod(G1,F2); + + // F60:F62 used for next 16-bytes + __ ldx(from,16,L4); + __ ldx(from,24,L5); + __ xor3(L2,L4,G1); + __ movxtod(G1,F60); + __ xor3(L3,L5,G1); + __ movxtod(G1,F62); + + __ aes_dround23(F54, F0, F2, F4); + __ aes_dround01(F52, F0, F2, F6); + __ aes_dround23(F54, F60, F62, F58); + __ aes_dround01(F52, F60, F62, F56); + __ aes_dround23(F50, F6, F4, F2); + __ aes_dround01(F48, F6, F4, F0); + __ aes_dround23(F50, F56, F58, F62); + __ aes_dround01(F48, F56, F58, F60); + // save F48:F54 in temp registers + __ movdtox(F54,G2); + __ movdtox(F52,G3); + __ movdtox(F50,G6); + __ movdtox(F48,G1); + for ( int i = 46; i >= 14; i -= 8 ) { + __ aes_dround23(as_FloatRegister(i), F0, F2, F4); + __ aes_dround01(as_FloatRegister(i-2), F0, F2, F6); + __ aes_dround23(as_FloatRegister(i), F60, F62, F58); + __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56); + __ aes_dround23(as_FloatRegister(i-4), F6, F4, F2); + __ aes_dround01(as_FloatRegister(i-6), F6, F4, F0); + __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62); + __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60); + } + // init F48:F54 with F0:F6 values (original key) + __ ldf(FloatRegisterImpl::D, original_key, 0, F48); + __ ldf(FloatRegisterImpl::D, original_key, 8, F50); + __ ldf(FloatRegisterImpl::D, original_key, 16, F52); + __ ldf(FloatRegisterImpl::D, original_key, 24, F54); + __ aes_dround23(F54, F0, F2, F4); + __ aes_dround01(F52, F0, F2, F6); + __ aes_dround23(F54, F60, F62, F58); + __ aes_dround01(F52, F60, F62, F56); + __ aes_dround23_l(F50, F6, F4, F2); + __ aes_dround01_l(F48, F6, F4, F0); + __ aes_dround23_l(F50, F56, F58, F62); + __ aes_dround01_l(F48, F56, F58, F60); + // re-init F48:F54 with their original values + __ movxtod(G2,F54); + __ movxtod(G3,F52); + __ movxtod(G6,F50); + __ movxtod(G1,F48); + + __ movxtod(L0,F6); + __ movxtod(L1,F4); + __ fxor(FloatRegisterImpl::D, F6, F0, F0); + __ fxor(FloatRegisterImpl::D, F4, F2, F2); + + __ stf(FloatRegisterImpl::D, F0, to, 0); + __ stf(FloatRegisterImpl::D, F2, to, 8); + + __ movxtod(G4,F56); + __ movxtod(G5,F58); + __ mov(L4,L0); + __ mov(L5,L1); + __ fxor(FloatRegisterImpl::D, F56, F60, F60); + __ fxor(FloatRegisterImpl::D, F58, F62, F62); + + __ stf(FloatRegisterImpl::D, F60, to, 16); + __ stf(FloatRegisterImpl::D, F62, to, 24); + + __ add(from, 32, from); + __ add(to, 32, to); + __ subcc(len_reg, 32, len_reg); + __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks256); + __ delayed()->nop(); + + __ BIND(L_cbcdec_end); + __ stx(L0, rvec, 0); + __ stx(L1, rvec, 8); + __ restore(); + __ mov(L0, O0); + __ retl(); + __ delayed()->nop(); + + return start; + } + void generate_initial() { // Generates all stubs and initializes the entry points @@ -3368,6 +4137,14 @@ generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, &StubRoutines::_safefetchN_fault_pc, &StubRoutines::_safefetchN_continuation_pc); + + // generate AES intrinsics code + if (UseAESIntrinsics) { + StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); + StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); + StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); + StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); + } } diff -r 4c8bda53850f -r b2fee789d23f src/cpu/sparc/vm/templateInterpreter_sparc.cpp --- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -156,6 +156,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { address entry = __ pc(); + if (state == atos) { + __ profile_return_type(O0, G3_scratch, G1_scratch); + } + #if !defined(_LP64) && defined(COMPILER2) // All return values are where we want them, except for Longs. C2 returns // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. @@ -1333,6 +1337,7 @@ __ movbool(true, G3_scratch); __ stbool(G3_scratch, do_not_unlock_if_synchronized); + __ profile_parameters_type(G1_scratch, G3_scratch, G4_scratch, Lscratch); // increment invocation counter and check for overflow // // Note: checking for negative value instead of overflow diff -r 4c8bda53850f -r b2fee789d23f src/cpu/sparc/vm/templateTable_sparc.cpp --- a/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -2942,12 +2942,12 @@ void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) { - Register Rtemp = G4_scratch; Register Rcall = Rindex; assert_different_registers(Rcall, G5_method, Gargs, Rret); // get target Method* & entry point __ lookup_virtual_method(Rrecv, Rindex, G5_method); + __ profile_arguments_type(G5_method, Rcall, Gargs, true); __ call_from_interpreter(Rcall, Gargs, Rret); } @@ -3022,6 +3022,7 @@ __ null_check(O0); __ profile_final_call(O4); + __ profile_arguments_type(G5_method, Rscratch, Gargs, true); // get return address AddressLiteral table(Interpreter::invoke_return_entry_table()); @@ -3051,6 +3052,7 @@ // do the call __ profile_call(O4); + __ profile_arguments_type(G5_method, Rscratch, Gargs, false); __ call_from_interpreter(Rscratch, Gargs, Rret); } @@ -3066,6 +3068,7 @@ // do the call __ profile_call(O4); + __ profile_arguments_type(G5_method, Rscratch, Gargs, false); __ call_from_interpreter(Rscratch, Gargs, Rret); } @@ -3091,6 +3094,7 @@ // do the call - the index (f2) contains the Method* assert_different_registers(G5_method, Gargs, Rcall); __ mov(Rindex, G5_method); + __ profile_arguments_type(G5_method, Rcall, Gargs, true); __ call_from_interpreter(Rcall, Gargs, Rret); __ bind(notFinal); @@ -3197,6 +3201,7 @@ Register Rcall = Rinterface; assert_different_registers(Rcall, G5_method, Gargs, Rret); + __ profile_arguments_type(G5_method, Rcall, Gargs, true); __ call_from_interpreter(Rcall, Gargs, Rret); } @@ -3226,6 +3231,7 @@ // do the call __ verify_oop(G4_mtype); __ profile_final_call(O4); // FIXME: profile the LambdaForm also + __ profile_arguments_type(G5_method, Rscratch, Gargs, true); __ call_from_interpreter(Rscratch, Gargs, Rret); } @@ -3262,6 +3268,7 @@ // do the call __ verify_oop(G4_callsite); + __ profile_arguments_type(G5_method, Rscratch, Gargs, false); __ call_from_interpreter(Rscratch, Gargs, Rret); } diff -r 4c8bda53850f -r b2fee789d23f src/cpu/sparc/vm/vm_version_sparc.cpp --- a/src/cpu/sparc/vm/vm_version_sparc.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/sparc/vm/vm_version_sparc.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -234,7 +234,7 @@ assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size"); char buf[512]; - jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", (has_v9() ? ", v9" : (has_v8() ? ", v8" : "")), (has_hardware_popc() ? ", popc" : ""), (has_vis1() ? ", vis1" : ""), @@ -242,6 +242,7 @@ (has_vis3() ? ", vis3" : ""), (has_blk_init() ? ", blk_init" : ""), (has_cbcond() ? ", cbcond" : ""), + (has_aes() ? ", aes" : ""), (is_ultra3() ? ", ultra3" : ""), (is_sun4v() ? ", sun4v" : ""), (is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")), @@ -265,6 +266,41 @@ if (!has_vis1()) // Drop to 0 if no VIS1 support UseVIS = 0; + // T2 and above should have support for AES instructions + if (has_aes()) { + if (UseVIS > 0) { // AES intrinsics use FXOR instruction which is VIS1 + if (FLAG_IS_DEFAULT(UseAES)) { + FLAG_SET_DEFAULT(UseAES, true); + } + if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { + FLAG_SET_DEFAULT(UseAESIntrinsics, true); + } + // we disable both the AES flags if either of them is disabled on the command line + if (!UseAES || !UseAESIntrinsics) { + FLAG_SET_DEFAULT(UseAES, false); + FLAG_SET_DEFAULT(UseAESIntrinsics, false); + } + } else { + if (UseAES || UseAESIntrinsics) { + warning("SPARC AES intrinsics require VIS1 instruction support. Intrinsics will be disabled."); + if (UseAES) { + FLAG_SET_DEFAULT(UseAES, false); + } + if (UseAESIntrinsics) { + FLAG_SET_DEFAULT(UseAESIntrinsics, false); + } + } + } + } else if (UseAES || UseAESIntrinsics) { + warning("AES instructions are not available on this CPU"); + if (UseAES) { + FLAG_SET_DEFAULT(UseAES, false); + } + if (UseAESIntrinsics) { + FLAG_SET_DEFAULT(UseAESIntrinsics, false); + } + } + if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && (cache_line_size > ContendedPaddingWidth)) ContendedPaddingWidth = cache_line_size; diff -r 4c8bda53850f -r b2fee789d23f src/cpu/sparc/vm/vm_version_sparc.hpp --- a/src/cpu/sparc/vm/vm_version_sparc.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/sparc/vm/vm_version_sparc.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -48,7 +48,9 @@ sparc64_family = 14, M_family = 15, T_family = 16, - T1_model = 17 + T1_model = 17, + sparc5_instructions = 18, + aes_instructions = 19 }; enum Feature_Flag_Set { @@ -73,6 +75,8 @@ M_family_m = 1 << M_family, T_family_m = 1 << T_family, T1_model_m = 1 << T1_model, + sparc5_instructions_m = 1 << sparc5_instructions, + aes_instructions_m = 1 << aes_instructions, generic_v8_m = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m, generic_v9_m = generic_v8_m | v9_instructions_m, @@ -123,6 +127,8 @@ static bool has_vis3() { return (_features & vis3_instructions_m) != 0; } static bool has_blk_init() { return (_features & blk_init_instructions_m) != 0; } static bool has_cbcond() { return (_features & cbcond_instructions_m) != 0; } + static bool has_sparc5_instr() { return (_features & sparc5_instructions_m) != 0; } + static bool has_aes() { return (_features & aes_instructions_m) != 0; } static bool supports_compare_and_exchange() { return has_v9(); } @@ -133,6 +139,7 @@ static bool is_M_series() { return is_M_family(_features); } static bool is_T4() { return is_T_family(_features) && has_cbcond(); } + static bool is_T7() { return is_T_family(_features) && has_sparc5_instr(); } // Fujitsu SPARC64 static bool is_sparc64() { return (_features & sparc64_family_m) != 0; } @@ -152,7 +159,7 @@ static const char* cpu_features() { return _features_str; } static intx prefetch_data_size() { - return is_T4() ? 32 : 64; // default prefetch block size on sparc + return is_T4() && !is_T7() ? 32 : 64; // default prefetch block size on sparc } // Prefetch diff -r 4c8bda53850f -r b2fee789d23f src/cpu/x86/vm/c1_LIRAssembler_x86.cpp --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -38,6 +38,7 @@ #include "nativeInst_x86.hpp" #include "oops/objArrayKlass.hpp" #include "runtime/sharedRuntime.hpp" +#include "vmreg_x86.inline.hpp" // These masks are used to provide 128-bit aligned bitmasks to the XMM @@ -1006,6 +1007,9 @@ if (UseCompressedOops && !wide) { __ movptr(compressed_src, src->as_register()); __ encode_heap_oop(compressed_src); + if (patch_code != lir_patch_none) { + info->oop_map()->set_narrowoop(compressed_src->as_VMReg()); + } } #endif } diff -r 4c8bda53850f -r b2fee789d23f src/cpu/x86/vm/c1_LIRGenerator_x86.cpp --- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -941,6 +941,8 @@ case vmIntrinsics::_updateCRC32: { LIRItem crc(x->argument_at(0), this); LIRItem val(x->argument_at(1), this); + // val is destroyed by update_crc32 + val.set_destroys_register(); crc.load_item(); val.load_item(); __ update_crc32(crc.result(), val.result(), result); diff -r 4c8bda53850f -r b2fee789d23f src/cpu/x86/vm/interp_masm_x86.cpp --- a/src/cpu/x86/vm/interp_masm_x86.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/x86/vm/interp_masm_x86.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -127,7 +127,7 @@ if (MethodData::profile_return()) { // We're right after the type profile for the last - // argument. tmp is the number of cell left in the + // argument. tmp is the number of cells left in the // CallTypeData/VirtualCallTypeData to reach its end. Non null // if there's a return to profile. assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); @@ -198,7 +198,7 @@ // parameters. Collect profiling from last parameter down. // mdo start + parameters offset + array length - 1 addptr(mdp, tmp1); - movptr(tmp1, Address(mdp, in_bytes(ArrayData::array_len_offset()))); + movptr(tmp1, Address(mdp, ArrayData::array_len_offset())); decrement(tmp1, TypeStackSlotEntries::per_arg_count()); Label loop; diff -r 4c8bda53850f -r b2fee789d23f src/cpu/x86/vm/stubGenerator_x86_32.cpp --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -2403,6 +2403,9 @@ // c_rarg3 - r vector byte array address // c_rarg4 - input length // + // Output: + // rax - input length + // address generate_cipherBlockChaining_encryptAESCrypt() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); @@ -2483,7 +2486,7 @@ __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object handleSOERegisters(false /*restoring*/); - __ movl(rax, 0); // return 0 (why?) + __ movptr(rax, len_param); // return length __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -2557,6 +2560,9 @@ // c_rarg3 - r vector byte array address // c_rarg4 - input length // + // Output: + // rax - input length + // address generate_cipherBlockChaining_decryptAESCrypt() { assert(UseAES, "need AES instructions and misaligned SSE support"); @@ -2650,7 +2656,7 @@ __ movptr(rvec , rvec_param); // restore this since used in loop __ movdqu(Address(rvec, 0), xmm_temp); // final value of r stored in rvec of CipherBlockChaining object handleSOERegisters(false /*restoring*/); - __ movl(rax, 0); // return 0 (why?) + __ movptr(rax, len_param); // return length __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); diff -r 4c8bda53850f -r b2fee789d23f src/cpu/x86/vm/stubGenerator_x86_64.cpp --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -3217,6 +3217,9 @@ // c_rarg3 - r vector byte array address // c_rarg4 - input length // + // Output: + // rax - input length + // address generate_cipherBlockChaining_encryptAESCrypt() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); @@ -3232,7 +3235,7 @@ #ifndef _WIN64 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) #else - const Address len_mem(rsp, 6 * wordSize); // length is on stack on Win64 + const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 const Register len_reg = r10; // pick the first volatile windows register #endif const Register pos = rax; @@ -3259,6 +3262,8 @@ for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { __ movdqu(xmm_save(i), as_XMMRegister(i)); } +#else + __ push(len_reg); // Save #endif const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front @@ -3301,8 +3306,10 @@ for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { __ movdqu(as_XMMRegister(i), xmm_save(i)); } + __ movl(rax, len_mem); +#else + __ pop(rax); // return length #endif - __ movl(rax, 0); // return 0 (why?) __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -3409,6 +3416,9 @@ // c_rarg3 - r vector byte array address // c_rarg4 - input length // + // Output: + // rax - input length + // address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { assert(UseAES, "need AES instructions and misaligned SSE support"); @@ -3427,7 +3437,7 @@ #ifndef _WIN64 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) #else - const Address len_mem(rsp, 6 * wordSize); // length is on stack on Win64 + const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 const Register len_reg = r10; // pick the first volatile windows register #endif const Register pos = rax; @@ -3448,7 +3458,10 @@ for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { __ movdqu(xmm_save(i), as_XMMRegister(i)); } +#else + __ push(len_reg); // Save #endif + // the java expanded key ordering is rotated one position from what we want // so we start from 0x10 here and hit 0x00 last const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front @@ -3554,8 +3567,10 @@ for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { __ movdqu(as_XMMRegister(i), xmm_save(i)); } + __ movl(rax, len_mem); +#else + __ pop(rax); // return length #endif - __ movl(rax, 0); // return 0 (why?) __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); diff -r 4c8bda53850f -r b2fee789d23f src/cpu/x86/vm/x86.ad --- a/src/cpu/x86/vm/x86.ad Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/x86/vm/x86.ad Tue Feb 11 11:26:05 2014 -0800 @@ -581,6 +581,12 @@ return !AlignVector; // can be changed by flag } +// x86 AES instructions are compatible with SunJCE expanded +// keys, hence we do not need to pass the original key to stubs +const bool Matcher::pass_original_key_for_aes() { + return false; +} + // Helper methods for MachSpillCopyNode::implementation(). static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo, int src_hi, int dst_hi, uint ireg, outputStream* st) { diff -r 4c8bda53850f -r b2fee789d23f src/cpu/x86/vm/x86_32.ad --- a/src/cpu/x86/vm/x86_32.ad Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/x86/vm/x86_32.ad Tue Feb 11 11:26:05 2014 -0800 @@ -3889,6 +3889,17 @@ interface(CONST_INTER); %} +// Int Immediate non-negative +operand immU31() +%{ + predicate(n->get_int() >= 0); + match(ConI); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + // Constant for long shifts operand immI_32() %{ predicate( n->get_int() == 32 ); @@ -6119,12 +6130,12 @@ ins_pipe(ialu_reg_mem); %} -// Load Integer with 32-bit mask into Long Register -instruct loadI2L_immI(eRegL dst, memory mem, immI mask, eFlagsReg cr) %{ +// Load Integer with 31-bit mask into Long Register +instruct loadI2L_immU31(eRegL dst, memory mem, immU31 mask, eFlagsReg cr) %{ match(Set dst (ConvI2L (AndI (LoadI mem) mask))); effect(KILL cr); - format %{ "MOV $dst.lo,$mem\t# int & 32-bit mask -> long\n\t" + format %{ "MOV $dst.lo,$mem\t# int & 31-bit mask -> long\n\t" "XOR $dst.hi,$dst.hi\n\t" "AND $dst.lo,$mask" %} ins_encode %{ diff -r 4c8bda53850f -r b2fee789d23f src/cpu/x86/vm/x86_64.ad --- a/src/cpu/x86/vm/x86_64.ad Thu Feb 06 13:08:44 2014 -0800 +++ b/src/cpu/x86/vm/x86_64.ad Tue Feb 11 11:26:05 2014 -0800 @@ -3086,6 +3086,17 @@ interface(CONST_INTER); %} +// Int Immediate non-negative +operand immU31() +%{ + predicate(n->get_int() >= 0); + match(ConI); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + // Constant for long shifts operand immI_32() %{ @@ -5042,12 +5053,12 @@ ins_pipe(ialu_reg_mem); %} -// Load Integer with a 32-bit mask into Long Register -instruct loadI2L_immI(rRegL dst, memory mem, immI mask, rFlagsReg cr) %{ +// Load Integer with a 31-bit mask into Long Register +instruct loadI2L_immU31(rRegL dst, memory mem, immU31 mask, rFlagsReg cr) %{ match(Set dst (ConvI2L (AndI (LoadI mem) mask))); effect(KILL cr); - format %{ "movl $dst, $mem\t# int & 32-bit mask -> long\n\t" + format %{ "movl $dst, $mem\t# int & 31-bit mask -> long\n\t" "andl $dst, $mask" %} ins_encode %{ Register Rdst = $dst$$Register; diff -r 4c8bda53850f -r b2fee789d23f src/os/bsd/dtrace/hotspot.d --- a/src/os/bsd/dtrace/hotspot.d Thu Feb 06 13:08:44 2014 -0800 +++ b/src/os/bsd/dtrace/hotspot.d Tue Feb 11 11:26:05 2014 -0800 @@ -56,7 +56,7 @@ probe thread__park__end(uintptr_t); probe thread__unpark(uintptr_t); probe method__compile__begin( - const char*, uintptr_t, const char*, uintptr_t, const char*, uintptr_t, const char*, uintptr_t); + char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, char*, uintptr_t); probe method__compile__end( char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, uintptr_t); diff -r 4c8bda53850f -r b2fee789d23f src/os/bsd/vm/os_bsd.cpp --- a/src/os/bsd/vm/os_bsd.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/os/bsd/vm/os_bsd.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -1557,6 +1557,17 @@ } #endif /* !__APPLE__ */ +void* os::get_default_process_handle() { +#ifdef __APPLE__ + // MacOS X needs to use RTLD_FIRST instead of RTLD_LAZY + // to avoid finding unexpected symbols on second (or later) + // loads of a library. + return (void*)::dlopen(NULL, RTLD_FIRST); +#else + return (void*)::dlopen(NULL, RTLD_LAZY); +#endif +} + // XXX: Do we need a lock around this as per Linux? void* os::dll_lookup(void* handle, const char* name) { return dlsym(handle, name); @@ -2625,9 +2636,21 @@ } } -int os::naked_sleep() { - // %% make the sleep time an integer flag. for now use 1 millisec. - return os::sleep(Thread::current(), 1, false); +void os::naked_short_sleep(jlong ms) { + struct timespec req; + + assert(ms < 1000, "Un-interruptable sleep, short time use only"); + req.tv_sec = 0; + if (ms > 0) { + req.tv_nsec = (ms % 1000) * 1000000; + } + else { + req.tv_nsec = 1; + } + + nanosleep(&req, NULL); + + return; } // Sleep forever; naked call to OS-specific sleep; use with CAUTION diff -r 4c8bda53850f -r b2fee789d23f src/os/linux/vm/os_linux.cpp --- a/src/os/linux/vm/os_linux.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/os/linux/vm/os_linux.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -2104,6 +2104,9 @@ return res; } +void* os::get_default_process_handle() { + return (void*)::dlopen(NULL, RTLD_LAZY); +} static bool _print_ascii_file(const char* filename, outputStream* st) { int fd = ::open(filename, O_RDONLY); @@ -3868,9 +3871,33 @@ } } -int os::naked_sleep() { - // %% make the sleep time an integer flag. for now use 1 millisec. - return os::sleep(Thread::current(), 1, false); +// +// Short sleep, direct OS call. +// +// Note: certain versions of Linux CFS scheduler (since 2.6.23) do not guarantee +// sched_yield(2) will actually give up the CPU: +// +// * Alone on this pariticular CPU, keeps running. +// * Before the introduction of "skip_buddy" with "compat_yield" disabled +// (pre 2.6.39). +// +// So calling this with 0 is an alternative. +// +void os::naked_short_sleep(jlong ms) { + struct timespec req; + + assert(ms < 1000, "Un-interruptable sleep, short time use only"); + req.tv_sec = 0; + if (ms > 0) { + req.tv_nsec = (ms % 1000) * 1000000; + } + else { + req.tv_nsec = 1; + } + + nanosleep(&req, NULL); + + return; } // Sleep forever; naked call to OS-specific sleep; use with CAUTION diff -r 4c8bda53850f -r b2fee789d23f src/os/posix/vm/os_posix.cpp --- a/src/os/posix/vm/os_posix.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/os/posix/vm/os_posix.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -262,10 +262,6 @@ return ::fdopen(fd, mode); } -void* os::get_default_process_handle() { - return (void*)::dlopen(NULL, RTLD_LAZY); -} - // Builds a platform dependent Agent_OnLoad_ function name // which is used to find statically linked in agents. // Parameters: diff -r 4c8bda53850f -r b2fee789d23f src/os/solaris/dtrace/hotspot.d --- a/src/os/solaris/dtrace/hotspot.d Thu Feb 06 13:08:44 2014 -0800 +++ b/src/os/solaris/dtrace/hotspot.d Tue Feb 11 11:26:05 2014 -0800 @@ -25,7 +25,7 @@ provider hotspot { probe class__loaded(char*, uintptr_t, void*, uintptr_t); probe class__unloaded(char*, uintptr_t, void*, uintptr_t); - probe class__initialization__required(char*, uintptr_t, void*, intptr_t,int); + probe class__initialization__required(char*, uintptr_t, void*, intptr_t); probe class__initialization__recursive(char*, uintptr_t, void*, intptr_t,int); probe class__initialization__concurrent(char*, uintptr_t, void*, intptr_t,int); probe class__initialization__erroneous(char*, uintptr_t, void*, intptr_t, int); diff -r 4c8bda53850f -r b2fee789d23f src/os/solaris/dtrace/hotspot_jni.d --- a/src/os/solaris/dtrace/hotspot_jni.d Thu Feb 06 13:08:44 2014 -0800 +++ b/src/os/solaris/dtrace/hotspot_jni.d Tue Feb 11 11:26:05 2014 -0800 @@ -211,7 +211,7 @@ probe CallVoidMethodV__return(); probe CreateJavaVM__entry(void**, void**, void*); probe CreateJavaVM__return(uint32_t); - probe DefineClass__entry(void*, const char*, void*, char, uintptr_t); + probe DefineClass__entry(void*, const char*, void*, char*, uintptr_t); probe DefineClass__return(void*); probe DeleteGlobalRef__entry(void*, void*); probe DeleteGlobalRef__return(); diff -r 4c8bda53850f -r b2fee789d23f src/os/solaris/vm/os_solaris.cpp --- a/src/os/solaris/vm/os_solaris.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/os/solaris/vm/os_solaris.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -2146,6 +2146,10 @@ return dlsym(handle, name); } +void* os::get_default_process_handle() { + return (void*)::dlopen(NULL, RTLD_LAZY); +} + int os::stat(const char *path, struct stat *sbuf) { char pathbuf[MAX_PATH]; if (strlen(path) > MAX_PATH - 1) { @@ -3536,9 +3540,14 @@ return os_sleep(millis, interruptible); } -int os::naked_sleep() { - // %% make the sleep time an integer flag. for now use 1 millisec. - return os_sleep(1, false); +void os::naked_short_sleep(jlong ms) { + assert(ms < 1000, "Un-interruptable sleep, short time use only"); + + // usleep is deprecated and removed from POSIX, in favour of nanosleep, but + // Solaris requires -lrt for this. + usleep((ms * 1000)); + + return; } // Sleep forever; naked call to OS-specific sleep; use with CAUTION diff -r 4c8bda53850f -r b2fee789d23f src/os/windows/vm/os_windows.cpp --- a/src/os/windows/vm/os_windows.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/os/windows/vm/os_windows.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -166,12 +166,10 @@ return; } -#ifndef _WIN64 // previous UnhandledExceptionFilter, if there is one static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); -#endif void os::init_system_properties_values() { /* sysclasspath, java_home, dll_dir */ { @@ -2240,11 +2238,11 @@ return EXCEPTION_CONTINUE_EXECUTION; } -#ifndef _WIN64 //----------------------------------------------------------------------------- LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { + PCONTEXT ctx = exceptionInfo->ContextRecord; +#ifndef _WIN64 // handle exception caused by native method modifying control word - PCONTEXT ctx = exceptionInfo->ContextRecord; DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; switch (exception_code) { @@ -2270,17 +2268,11 @@ // UnhandledExceptionFilter. return (prev_uef_handler)(exceptionInfo); } - - return EXCEPTION_CONTINUE_SEARCH; -} -#else //_WIN64 +#else // !_WIN64 /* On Windows, the mxcsr control bits are non-volatile across calls See also CR 6192333 - If EXCEPTION_FLT_* happened after some native method modified - mxcsr - it is not a jvm fault. - However should we decide to restore of mxcsr after a faulty - native method we can uncomment following code + */ jint MxCsr = INITIAL_MXCSR; // we can't use StubRoutines::addr_mxcsr_std() // because in Win64 mxcsr is not saved there @@ -2288,10 +2280,10 @@ ctx->MxCsr = MxCsr; return EXCEPTION_CONTINUE_EXECUTION; } - -*/ -#endif //_WIN64 - +#endif // !_WIN64 + + return EXCEPTION_CONTINUE_SEARCH; +} // Fatal error reporting is single threaded so we can make this a // static and preallocated. If it's more than MAX_PATH silently ignore @@ -2640,7 +2632,6 @@ } // switch } -#ifndef _WIN64 if (((thread->thread_state() == _thread_in_Java) || (thread->thread_state() == _thread_in_native)) && exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) @@ -2648,7 +2639,6 @@ LONG result=Handle_FLT_Exception(exceptionInfo); if (result==EXCEPTION_CONTINUE_EXECUTION) return result; } -#endif //_WIN64 } if (exception_code != EXCEPTION_BREAKPOINT) { @@ -3496,6 +3486,16 @@ return result; } +// +// Short sleep, direct OS call. +// +// ms = 0, means allow others (if any) to run. +// +void os::naked_short_sleep(jlong ms) { + assert(ms < 1000, "Un-interruptable sleep, short time use only"); + Sleep(ms); +} + // Sleep forever; naked call to OS-specific sleep; use with CAUTION void os::infinite_sleep() { while (true) { // sleep forever ... diff -r 4c8bda53850f -r b2fee789d23f src/os_cpu/linux_x86/vm/os_linux_x86.cpp --- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -49,6 +49,7 @@ #include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" #include "runtime/timer.hpp" +#include "services/memTracker.hpp" #include "utilities/events.hpp" #include "utilities/vmError.hpp" @@ -906,6 +907,9 @@ if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) { return; // No matter, we tried, best effort. } + + MemTracker::record_virtual_memory_type((address)codebuf, mtInternal); + if (PrintMiscellaneous && (Verbose || WizardMode)) { tty->print_cr("[CS limit NX emulation work-around, exec code at: %p]", codebuf); } diff -r 4c8bda53850f -r b2fee789d23f src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp --- a/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -75,13 +75,19 @@ do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m); // Extract valid instruction set extensions. - uint_t av; - uint_t avn = os::Solaris::getisax(&av, 1); - assert(avn == 1, "should only return one av"); + uint_t avs[2]; + uint_t avn = os::Solaris::getisax(avs, 2); + assert(avn <= 2, "should return two or less av's"); + uint_t av = avs[0]; #ifndef PRODUCT - if (PrintMiscellaneous && Verbose) - tty->print_cr("getisax(2) returned: " PTR32_FORMAT, av); + if (PrintMiscellaneous && Verbose) { + tty->print("getisax(2) returned: " PTR32_FORMAT, av); + if (avn > 1) { + tty->print(", " PTR32_FORMAT, avs[1]); + } + tty->cr(); + } #endif if (av & AV_SPARC_MUL32) features |= hardware_mul32_m; @@ -91,6 +97,13 @@ if (av & AV_SPARC_POPC) features |= hardware_popc_m; if (av & AV_SPARC_VIS) features |= vis1_instructions_m; if (av & AV_SPARC_VIS2) features |= vis2_instructions_m; + if (avn > 1) { + uint_t av2 = avs[1]; +#ifndef AV2_SPARC_SPARC5 +#define AV2_SPARC_SPARC5 0x00000008 /* The 29 new fp and sub instructions */ +#endif + if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m; + } // Next values are not defined before Solaris 10 // but Solaris 8 is used for jdk6 update builds. @@ -119,6 +132,11 @@ #endif if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m; +#ifndef AV_SPARC_AES +#define AV_SPARC_AES 0x00020000 /* aes instrs supported */ +#endif + if (av & AV_SPARC_AES) features |= aes_instructions_m; + } else { // getisax(2) failed, use the old legacy code. #ifndef PRODUCT diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/c1/c1_LIRGenerator.cpp --- a/src/share/vm/c1/c1_LIRGenerator.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -3288,7 +3288,10 @@ ciSignature* signature_at_call = NULL; x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call); - ciKlass* exact = profile_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()), + // The offset within the MDO of the entry to update may be too large + // to be used in load/store instructions on some platforms. So have + // profile_type() compute the address of the profile in a register. + ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0, ret->type(), x->ret(), mdp, !x->needs_null_check(), signature_at_call->return_type()->as_klass(), diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/ci/ciField.cpp --- a/src/share/vm/ci/ciField.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/ci/ciField.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -201,16 +201,10 @@ return; } - // This field just may be constant. The only cases where it will - // not be constant are: - // - // 1. The field holds a non-perm-space oop. The field is, strictly - // speaking, constant but we cannot embed non-perm-space oops into - // generated code. For the time being we need to consider the - // field to be not constant. - // 2. The field is a *special* static&final field whose value - // may change. The three examples are java.lang.System.in, - // java.lang.System.out, and java.lang.System.err. + // This field just may be constant. The only case where it will + // not be constant is when the field is a *special* static&final field + // whose value may change. The three examples are java.lang.System.in, + // java.lang.System.out, and java.lang.System.err. KlassHandle k = _holder->get_Klass(); assert( SystemDictionary::System_klass() != NULL, "Check once per vm"); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/ci/ciField.hpp --- a/src/share/vm/ci/ciField.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/ci/ciField.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -130,9 +130,7 @@ // 1. The field is both static and final // 2. The canonical holder of the field has undergone // static initialization. - // 3. If the field is an object or array, then the oop - // in question is allocated in perm space. - // 4. The field is not one of the special static/final + // 3. The field is not one of the special static/final // non-constant fields. These are java.lang.System.in // and java.lang.System.out. Abomination. // diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/classfile/classFileParser.cpp --- a/src/share/vm/classfile/classFileParser.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/classfile/classFileParser.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -4098,8 +4098,12 @@ tty->print("[Loaded %s from %s]\n", this_klass->external_name(), cfs->source()); } else if (class_loader.is_null()) { - if (THREAD->is_Java_thread()) { - Klass* caller = ((JavaThread*)THREAD)->security_get_caller_class(1); + Klass* caller = + THREAD->is_Java_thread() + ? ((JavaThread*)THREAD)->security_get_caller_class(1) + : NULL; + // caller can be NULL, for example, during a JVMTI VM_Init hook + if (caller != NULL) { tty->print("[Loaded %s by instance of %s]\n", this_klass->external_name(), InstanceKlass::cast(caller)->external_name()); @@ -4500,8 +4504,8 @@ break; // didn't find any match; get out } - if (super_m->is_final() && - // matching method in super is final + if (super_m->is_final() && !super_m->is_static() && + // matching method in super is final, and not static (Reflection::verify_field_access(this_klass(), super_m->method_holder(), super_m->method_holder(), diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/classfile/defaultMethods.cpp --- a/src/share/vm/classfile/defaultMethods.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/classfile/defaultMethods.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -390,6 +390,20 @@ Symbol* get_exception_message() { return _exception_message; } Symbol* get_exception_name() { return _exception_name; } + // Return true if the specified klass has a static method that matches + // the name and signature of the target method. + bool has_matching_static(InstanceKlass* root) { + if (_members.length() > 0) { + Pair entry = _members.at(0); + Method* impl = root->find_method(entry.first->name(), + entry.first->signature()); + if ((impl != NULL) && impl->is_static()) { + return true; + } + } + return false; + } + // Either sets the target or the exception error message void determine_target(InstanceKlass* root, TRAPS) { if (has_target() || throws_exception()) { @@ -416,19 +430,26 @@ } if (num_defaults == 0) { - if (qualified_methods.length() == 0) { - _exception_message = generate_no_defaults_message(CHECK); - } else { - assert(root != NULL, "Null root class"); - _exception_message = generate_method_message(root->name(), qualified_methods.at(0), CHECK); + // If the root klass has a static method with matching name and signature + // then do not generate an overpass method because it will hide the + // static method during resolution. + if (!has_matching_static(root)) { + if (qualified_methods.length() == 0) { + _exception_message = generate_no_defaults_message(CHECK); + } else { + assert(root != NULL, "Null root class"); + _exception_message = generate_method_message(root->name(), qualified_methods.at(0), CHECK); + } + _exception_name = vmSymbols::java_lang_AbstractMethodError(); } - _exception_name = vmSymbols::java_lang_AbstractMethodError(); + // If only one qualified method is default, select that } else if (num_defaults == 1) { _selected_target = qualified_methods.at(default_index); - } else if (num_defaults > 1) { - _exception_message = generate_conflicts_message(&qualified_methods,CHECK); - _exception_name = vmSymbols::java_lang_IncompatibleClassChangeError(); + + } else if (num_defaults > 1 && !has_matching_static(root)) { + _exception_message = generate_conflicts_message(&qualified_methods,CHECK); + _exception_name = vmSymbols::java_lang_IncompatibleClassChangeError(); if (TraceDefaultMethods) { _exception_message->print_value_on(tty); tty->print_cr(""); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/classfile/symbolTable.cpp --- a/src/share/vm/classfile/symbolTable.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/classfile/symbolTable.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -38,6 +38,9 @@ // -------------------------------------------------------------------------- +// the number of buckets a thread claims +const int ClaimChunkSize = 32; + SymbolTable* SymbolTable::_the_table = NULL; // Static arena for symbols that are not deallocated Arena* SymbolTable::_arena = NULL; @@ -83,16 +86,12 @@ } } -int SymbolTable::symbols_removed = 0; -int SymbolTable::symbols_counted = 0; +int SymbolTable::_symbols_removed = 0; +int SymbolTable::_symbols_counted = 0; +volatile int SymbolTable::_parallel_claimed_idx = 0; -// Remove unreferenced symbols from the symbol table -// This is done late during GC. -void SymbolTable::unlink() { - int removed = 0; - int total = 0; - size_t memory_total = 0; - for (int i = 0; i < the_table()->table_size(); ++i) { +void SymbolTable::buckets_unlink(int start_idx, int end_idx, int* processed, int* removed, size_t* memory_total) { + for (int i = start_idx; i < end_idx; ++i) { HashtableEntry** p = the_table()->bucket_addr(i); HashtableEntry* entry = the_table()->bucket(i); while (entry != NULL) { @@ -104,14 +103,14 @@ break; } Symbol* s = entry->literal(); - memory_total += s->size(); - total++; + (*memory_total) += s->size(); + (*processed)++; assert(s != NULL, "just checking"); // If reference count is zero, remove. if (s->refcount() == 0) { assert(!entry->is_shared(), "shared entries should be kept live"); delete s; - removed++; + (*removed)++; *p = entry->next(); the_table()->free_entry(entry); } else { @@ -121,12 +120,45 @@ entry = (HashtableEntry*)HashtableEntry::make_ptr(*p); } } - symbols_removed += removed; - symbols_counted += total; +} + +// Remove unreferenced symbols from the symbol table +// This is done late during GC. +void SymbolTable::unlink(int* processed, int* removed) { + size_t memory_total = 0; + buckets_unlink(0, the_table()->table_size(), processed, removed, &memory_total); + _symbols_removed += *removed; + _symbols_counted += *processed; // Exclude printing for normal PrintGCDetails because people parse // this output. if (PrintGCDetails && Verbose && WizardMode) { - gclog_or_tty->print(" [Symbols=%d size=" SIZE_FORMAT "K] ", total, + gclog_or_tty->print(" [Symbols=%d size=" SIZE_FORMAT "K] ", *processed, + (memory_total*HeapWordSize)/1024); + } +} + +void SymbolTable::possibly_parallel_unlink(int* processed, int* removed) { + const int limit = the_table()->table_size(); + + size_t memory_total = 0; + + for (;;) { + // Grab next set of buckets to scan + int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize; + if (start_idx >= limit) { + // End of table + break; + } + + int end_idx = MIN2(limit, start_idx + ClaimChunkSize); + buckets_unlink(start_idx, end_idx, processed, removed, &memory_total); + } + Atomic::add(*processed, &_symbols_counted); + Atomic::add(*removed, &_symbols_removed); + // Exclude printing for normal PrintGCDetails because people parse + // this output. + if (PrintGCDetails && Verbose && WizardMode) { + gclog_or_tty->print(" [Symbols: scanned=%d removed=%d size=" SIZE_FORMAT "K] ", *processed, *removed, (memory_total*HeapWordSize)/1024); } } @@ -494,11 +526,11 @@ tty->print_cr("Total number of symbols %5d", count); tty->print_cr("Total size in memory %5dK", (memory_total*HeapWordSize)/1024); - tty->print_cr("Total counted %5d", symbols_counted); - tty->print_cr("Total removed %5d", symbols_removed); - if (symbols_counted > 0) { + tty->print_cr("Total counted %5d", _symbols_counted); + tty->print_cr("Total removed %5d", _symbols_removed); + if (_symbols_counted > 0) { tty->print_cr("Percent removed %3.2f", - ((float)symbols_removed/(float)symbols_counted)* 100); + ((float)_symbols_removed/(float)_symbols_counted)* 100); } tty->print_cr("Reference counts %5d", Symbol::_total_count); tty->print_cr("Symbol arena size %5d used %5d", @@ -739,39 +771,38 @@ return result; } -void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { +void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) { + buckets_unlink_or_oops_do(is_alive, f, 0, the_table()->table_size(), processed, removed); +} + +void StringTable::possibly_parallel_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) { // Readers of the table are unlocked, so we should only be removing // entries at a safepoint. assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry** p = the_table()->bucket_addr(i); - HashtableEntry* entry = the_table()->bucket(i); - while (entry != NULL) { - assert(!entry->is_shared(), "CDS not used for the StringTable"); + const int limit = the_table()->table_size(); - if (is_alive->do_object_b(entry->literal())) { - if (f != NULL) { - f->do_oop((oop*)entry->literal_addr()); - } - p = entry->next_addr(); - } else { - *p = entry->next(); - the_table()->free_entry(entry); - } - entry = *p; + for (;;) { + // Grab next set of buckets to scan + int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize; + if (start_idx >= limit) { + // End of table + break; } + + int end_idx = MIN2(limit, start_idx + ClaimChunkSize); + buckets_unlink_or_oops_do(is_alive, f, start_idx, end_idx, processed, removed); } } -void StringTable::buckets_do(OopClosure* f, int start_idx, int end_idx) { +void StringTable::buckets_oops_do(OopClosure* f, int start_idx, int end_idx) { const int limit = the_table()->table_size(); assert(0 <= start_idx && start_idx <= limit, - err_msg("start_idx (" INT32_FORMAT ") oob?", start_idx)); + err_msg("start_idx (" INT32_FORMAT ") is out of bounds", start_idx)); assert(0 <= end_idx && end_idx <= limit, - err_msg("end_idx (" INT32_FORMAT ") oob?", end_idx)); + err_msg("end_idx (" INT32_FORMAT ") is out of bounds", end_idx)); assert(start_idx <= end_idx, - err_msg("Ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT, + err_msg("Index ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT, start_idx, end_idx)); for (int i = start_idx; i < end_idx; i += 1) { @@ -786,12 +817,44 @@ } } +void StringTable::buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed) { + const int limit = the_table()->table_size(); + + assert(0 <= start_idx && start_idx <= limit, + err_msg("start_idx (" INT32_FORMAT ") is out of bounds", start_idx)); + assert(0 <= end_idx && end_idx <= limit, + err_msg("end_idx (" INT32_FORMAT ") is out of bounds", end_idx)); + assert(start_idx <= end_idx, + err_msg("Index ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT, + start_idx, end_idx)); + + for (int i = start_idx; i < end_idx; ++i) { + HashtableEntry** p = the_table()->bucket_addr(i); + HashtableEntry* entry = the_table()->bucket(i); + while (entry != NULL) { + assert(!entry->is_shared(), "CDS not used for the StringTable"); + + if (is_alive->do_object_b(entry->literal())) { + if (f != NULL) { + f->do_oop((oop*)entry->literal_addr()); + } + p = entry->next_addr(); + } else { + *p = entry->next(); + the_table()->free_entry(entry); + (*removed)++; + } + (*processed)++; + entry = *p; + } + } +} + void StringTable::oops_do(OopClosure* f) { - buckets_do(f, 0, the_table()->table_size()); + buckets_oops_do(f, 0, the_table()->table_size()); } void StringTable::possibly_parallel_oops_do(OopClosure* f) { - const int ClaimChunkSize = 32; const int limit = the_table()->table_size(); for (;;) { @@ -803,7 +866,7 @@ } int end_idx = MIN2(limit, start_idx + ClaimChunkSize); - buckets_do(f, start_idx, end_idx); + buckets_oops_do(f, start_idx, end_idx); } } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/classfile/symbolTable.hpp --- a/src/share/vm/classfile/symbolTable.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/classfile/symbolTable.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -86,8 +86,8 @@ static bool _needs_rehashing; // For statistics - static int symbols_removed; - static int symbols_counted; + static int _symbols_removed; + static int _symbols_counted; Symbol* allocate_symbol(const u1* name, int len, bool c_heap, TRAPS); // Assumes no characters larger than 0x7F @@ -121,6 +121,11 @@ static Arena* arena() { return _arena; } // called for statistics static void initialize_symbols(int arena_alloc_size = 0); + + static volatile int _parallel_claimed_idx; + + // Release any dead symbols + static void buckets_unlink(int start_idx, int end_idx, int* processed, int* removed, size_t* memory_total); public: enum { symbol_alloc_batch_size = 8, @@ -177,7 +182,14 @@ unsigned int* hashValues, TRAPS); // Release any dead symbols - static void unlink(); + static void unlink() { + int processed = 0; + int removed = 0; + unlink(&processed, &removed); + } + static void unlink(int* processed, int* removed); + // Release any dead symbols, possibly parallel version + static void possibly_parallel_unlink(int* processed, int* removed); // iterate over symbols static void symbols_do(SymbolClosure *cl); @@ -235,6 +247,9 @@ // Rehash the symbol table if it gets out of balance static void rehash_table(); static bool needs_rehashing() { return _needs_rehashing; } + // Parallel chunked scanning + static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; } + static int parallel_claimed_index() { return _parallel_claimed_idx; } }; class StringTable : public Hashtable { @@ -258,7 +273,10 @@ // Apply the give oop closure to the entries to the buckets // in the range [start_idx, end_idx). - static void buckets_do(OopClosure* f, int start_idx, int end_idx); + static void buckets_oops_do(OopClosure* f, int start_idx, int end_idx); + // Unlink or apply the give oop closure to the entries to the buckets + // in the range [start_idx, end_idx). + static void buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed); StringTable() : Hashtable((int)StringTableSize, sizeof (HashtableEntry)) {} @@ -280,15 +298,28 @@ // GC support // Delete pointers to otherwise-unreachable objects. - static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f); + static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f) { + int processed = 0; + int removed = 0; + unlink_or_oops_do(cl, f, &processed, &removed); + } static void unlink(BoolObjectClosure* cl) { - unlink_or_oops_do(cl, NULL); + int processed = 0; + int removed = 0; + unlink_or_oops_do(cl, NULL, &processed, &removed); } - + static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f, int* processed, int* removed); + static void unlink(BoolObjectClosure* cl, int* processed, int* removed) { + unlink_or_oops_do(cl, NULL, processed, removed); + } // Serially invoke "f->do_oop" on the locations of all oops in the table. static void oops_do(OopClosure* f); - // Possibly parallel version of the above + // Possibly parallel versions of the above + static void possibly_parallel_unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f, int* processed, int* removed); + static void possibly_parallel_unlink(BoolObjectClosure* cl, int* processed, int* removed) { + possibly_parallel_unlink_or_oops_do(cl, NULL, processed, removed); + } static void possibly_parallel_oops_do(OopClosure* f); // Hashing algorithm, used as the hash value used by the @@ -349,5 +380,6 @@ // Parallel chunked scanning static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; } + static int parallel_claimed_index() { return _parallel_claimed_idx; } }; #endif // SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/classfile/vmSymbols.hpp --- a/src/share/vm/classfile/vmSymbols.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/classfile/vmSymbols.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -787,7 +787,7 @@ do_intrinsic(_cipherBlockChaining_decryptAESCrypt, com_sun_crypto_provider_cipherBlockChaining, decrypt_name, byteArray_int_int_byteArray_int_signature, F_R) \ do_name( encrypt_name, "encrypt") \ do_name( decrypt_name, "decrypt") \ - do_signature(byteArray_int_int_byteArray_int_signature, "([BII[BI)V") \ + do_signature(byteArray_int_int_byteArray_int_signature, "([BII[BI)I") \ \ /* support for java.util.zip */ \ do_class(java_util_zip_CRC32, "java/util/zip/CRC32") \ diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/code/codeCache.cpp --- a/src/share/vm/code/codeCache.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/code/codeCache.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -596,20 +596,13 @@ } #ifndef PRODUCT -// used to keep track of how much time is spent in mark_for_deoptimization +// Keeps track of time spent for checking dependencies static elapsedTimer dependentCheckTime; -static int dependentCheckCount = 0; -#endif // PRODUCT +#endif int CodeCache::mark_for_deoptimization(DepChange& changes) { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - -#ifndef PRODUCT - dependentCheckTime.start(); - dependentCheckCount++; -#endif // PRODUCT - int number_of_marked_CodeBlobs = 0; // search the hierarchy looking for nmethods which are affected by the loading of this class @@ -617,32 +610,23 @@ // then search the interfaces this class implements looking for nmethods // which might be dependent of the fact that an interface only had one // implementor. - - { No_Safepoint_Verifier nsv; - for (DepChange::ContextStream str(changes, nsv); str.next(); ) { - Klass* d = str.klass(); - number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); - } - } - - if (VerifyDependencies) { - // Turn off dependency tracing while actually testing deps. - NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) ); - FOR_ALL_ALIVE_NMETHODS(nm) { - if (!nm->is_marked_for_deoptimization() && - nm->check_all_dependencies()) { - ResourceMark rm; - tty->print_cr("Should have been marked for deoptimization:"); - changes.print(); - nm->print(); - nm->print_dependencies(); - } - } + // nmethod::check_all_dependencies works only correctly, if no safepoint + // can happen + No_Safepoint_Verifier nsv; + for (DepChange::ContextStream str(changes, nsv); str.next(); ) { + Klass* d = str.klass(); + number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); } #ifndef PRODUCT - dependentCheckTime.stop(); -#endif // PRODUCT + if (VerifyDependencies) { + // Object pointers are used as unique identifiers for dependency arguments. This + // is only possible if no safepoint, i.e., GC occurs during the verification code. + dependentCheckTime.start(); + nmethod::check_all_dependencies(changes); + dependentCheckTime.stop(); + } +#endif return number_of_marked_CodeBlobs; } @@ -899,9 +883,7 @@ } tty->print_cr("CodeCache:"); - - tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(), - dependentCheckTime.seconds() / dependentCheckCount); + tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); if (!live.is_empty()) { live.print("live"); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/code/dependencies.cpp --- a/src/share/vm/code/dependencies.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/code/dependencies.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -678,6 +678,17 @@ return result; } +/** + * Returns a unique identifier for each dependency argument. + */ +uintptr_t Dependencies::DepStream::get_identifier(int i) { + if (has_oop_argument()) { + return (uintptr_t)(oopDesc*)argument_oop(i); + } else { + return (uintptr_t)argument(i); + } +} + oop Dependencies::DepStream::argument_oop(int i) { oop result = recorded_oop_at(argument_index(i)); assert(result == NULL || result->is_oop(), "must be"); @@ -713,6 +724,57 @@ return NULL; } +// ----------------- DependencySignature -------------------------------------- +bool DependencySignature::equals(const DependencySignature& sig) const { + if (type() != sig.type()) { + return false; + } + + if (args_count() != sig.args_count()) { + return false; + } + + for (int i = 0; i < sig.args_count(); i++) { + if (arg(i) != sig.arg(i)) { + return false; + } + } + return true; +} + + +// ----------------- DependencySignatureBuffer -------------------------------------- +DependencySignatureBuffer::DependencySignatureBuffer() { + _signatures = NEW_RESOURCE_ARRAY(GrowableArray*, Dependencies::TYPE_LIMIT); + memset(_signatures, 0, sizeof(DependencySignature*) * Dependencies::TYPE_LIMIT); +} + +/* Check if arguments are identical. Two dependency signatures are considered + * identical, if the type as well as all argument identifiers are identical. + * If the dependency has not already been checked, the dependency signature is + * added to the checked dependencies of the same type. The function returns + * false, which causes the dependency to be checked in the caller. + */ +bool DependencySignatureBuffer::add_if_missing(const DependencySignature& sig) { + const int index = sig.type(); + GrowableArray* buffer = _signatures[index]; + if (buffer == NULL) { + buffer = new GrowableArray(); + _signatures[index] = buffer; + } + + // Check if we have already checked the dependency + for (int i = 0; i < buffer->length(); i++) { + DependencySignature* checked_signature = buffer->at(i); + if (checked_signature->equals(sig)) { + return true; + } + } + buffer->append((DependencySignature*)&sig); + return false; +} + + /// Checking dependencies: // This hierarchy walker inspects subtypes of a given type, @@ -1159,11 +1221,9 @@ // We could also return false if m does not yet appear to be // executed, if the VM version supports this distinction also. + // Default methods are considered "concrete" as well. return !m->is_abstract() && - !InstanceKlass::cast(m->method_holder())->is_interface(); - // TODO: investigate whether default methods should be - // considered as "concrete" in this situation. For now they - // are not. + !m->is_overpass(); // error functions aren't concrete } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/code/dependencies.hpp --- a/src/share/vm/code/dependencies.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/code/dependencies.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -480,6 +480,9 @@ bool next(); DepType type() { return _type; } + bool has_oop_argument() { return type() == call_site_target_value; } + uintptr_t get_identifier(int i); + int argument_count() { return dep_args(type()); } int argument_index(int i) { assert(0 <= i && i < argument_count(), "oob"); return _xi[i]; } @@ -523,6 +526,38 @@ }; +class DependencySignature : public ResourceObj { + private: + int _args_count; + uintptr_t _argument_hash[Dependencies::max_arg_count]; + Dependencies::DepType _type; + + + public: + DependencySignature(Dependencies::DepStream& dep) { + _args_count = dep.argument_count(); + _type = dep.type(); + for (int i = 0; i < _args_count; i++) { + _argument_hash[i] = dep.get_identifier(i); + } + } + + bool equals(const DependencySignature& sig) const; + + int args_count() const { return _args_count; } + uintptr_t arg(int idx) const { return _argument_hash[idx]; } + Dependencies::DepType type() const { return _type; } +}; + +class DependencySignatureBuffer : public StackObj { + private: + GrowableArray** _signatures; + + public: + DependencySignatureBuffer(); + bool add_if_missing(const DependencySignature& sig); +}; + // Every particular DepChange is a sub-class of this class. class DepChange : public StackObj { public: diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/code/nmethod.cpp --- a/src/share/vm/code/nmethod.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/code/nmethod.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -2161,16 +2161,41 @@ } -bool nmethod::check_all_dependencies() { - bool found_check = false; - // wholesale check of all dependencies - for (Dependencies::DepStream deps(this); deps.next(); ) { - if (deps.check_dependency() != NULL) { - found_check = true; - NOT_DEBUG(break); +void nmethod::check_all_dependencies(DepChange& changes) { + // Checked dependencies are allocated into this ResourceMark + ResourceMark rm; + + // Turn off dependency tracing while actually testing dependencies. + NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) ); + + // 'dep_signature_buffers' caches already checked dependencies. + DependencySignatureBuffer dep_signature_buffers; + + // Iterate over live nmethods and check dependencies of all nmethods that are not + // marked for deoptimization. A particular dependency is only checked once. + for(nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); nm != NULL; nm = CodeCache::alive_nmethod(CodeCache::next(nm))) { + if (!nm->is_marked_for_deoptimization()) { + for (Dependencies::DepStream deps(nm); deps.next(); ) { + // Construct abstraction of a dependency. + const DependencySignature* current_sig = new DependencySignature(deps); + // Determine if 'deps' is already checked. If it is not checked, + // 'add_if_missing()' adds the dependency signature and returns + // false. + if (!dep_signature_buffers.add_if_missing(*current_sig)) { + if (deps.check_dependency() != NULL) { + // Dependency checking failed. Print out information about the failed + // dependency and finally fail with an assert. We can fail here, since + // dependency checking is never done in a product build. + ResourceMark rm; + changes.print(); + nm->print(); + nm->print_dependencies(); + assert(false, "Should have been marked for deoptimization"); + } + } + } } } - return found_check; // tell caller if we found anything } bool nmethod::check_dependency_on(DepChange& changes) { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/code/nmethod.hpp --- a/src/share/vm/code/nmethod.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/code/nmethod.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -679,7 +679,7 @@ // tells if any of this method's dependencies have been invalidated // (this is expensive!) - bool check_all_dependencies(); + static void check_all_dependencies(DepChange& changes); // tells if this compiled method is dependent on the given changes, // and the changes have invalidated it diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/compiler/compileBroker.cpp --- a/src/share/vm/compiler/compileBroker.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/compiler/compileBroker.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -98,7 +98,7 @@ Symbol* name = (method)->name(); \ Symbol* signature = (method)->signature(); \ HOTSPOT_METHOD_COMPILE_BEGIN( \ - comp_name, strlen(comp_name), \ + (char *) comp_name, strlen(comp_name), \ (char *) klass_name->bytes(), klass_name->utf8_length(), \ (char *) name->bytes(), name->utf8_length(), \ (char *) signature->bytes(), signature->utf8_length()); \ @@ -110,7 +110,7 @@ Symbol* name = (method)->name(); \ Symbol* signature = (method)->signature(); \ HOTSPOT_METHOD_COMPILE_END( \ - comp_name, strlen(comp_name), \ + (char *) comp_name, strlen(comp_name), \ (char *) klass_name->bytes(), klass_name->utf8_length(), \ (char *) name->bytes(), name->utf8_length(), \ (char *) signature->bytes(), signature->utf8_length(), (success)); \ diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -466,7 +466,7 @@ void CMSAdaptiveSizePolicy::checkpoint_roots_final_begin() { _STW_timer.stop(); _latest_cms_initial_mark_end_to_remark_start_secs = _STW_timer.seconds(); - // Start accumumlating time for the remark in the STW timer. + // Start accumulating time for the remark in the STW timer. _STW_timer.reset(); _STW_timer.start(); } @@ -537,8 +537,8 @@ avg_msc_pause()->sample(msc_pause_in_seconds); double mutator_time_in_seconds = 0.0; if (_latest_cms_collection_end_to_collection_start_secs == 0.0) { - // This assertion may fail because of time stamp gradularity. - // Comment it out and investiage it at a later time. The large + // This assertion may fail because of time stamp granularity. + // Comment it out and investigate it at a later time. The large // time stamp granularity occurs on some older linux systems. #ifndef CLOCK_GRANULARITY_TOO_LARGE assert((_latest_cms_concurrent_marking_time_secs == 0.0) && @@ -836,7 +836,7 @@ void CMSAdaptiveSizePolicy::ms_collection_marking_begin() { _STW_timer.stop(); - // Start accumumlating time for the marking in the STW timer. + // Start accumulating time for the marking in the STW timer. _STW_timer.reset(); _STW_timer.start(); } @@ -1227,7 +1227,7 @@ // We use the tenuring threshold to equalize the cost of major // and minor collections. // ThresholdTolerance is used to indicate how sensitive the - // tenuring threshold is to differences in cost betweent the + // tenuring threshold is to differences in cost between the // collection types. // Get the times of interest. This involves a little work, so diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -356,7 +356,7 @@ void concurrent_sweeping_begin(); void concurrent_sweeping_end(); // Similar to the above (e.g., concurrent_marking_end()) and - // is used for both the precleaning an abortable precleaing + // is used for both the precleaning an abortable precleaning // phases. void concurrent_precleaning_begin(); void concurrent_precleaning_end(); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -88,8 +88,7 @@ // of the tenured generation. PerfVariable* _avg_msc_pause_counter; // Average for the time between the most recent end of a - // MSC collection and the beginning of the next - // MSC collection. + // MSC collection and the beginning of the next MSC collection. PerfVariable* _avg_msc_interval_counter; // Average for the GC cost of a MSC collection based on // _avg_msc_pause_counter and _avg_msc_interval_counter. @@ -99,8 +98,7 @@ // of the tenured generation. PerfVariable* _avg_ms_pause_counter; // Average for the time between the most recent end of a - // MS collection and the beginning of the next - // MS collection. + // MS collection and the beginning of the next MS collection. PerfVariable* _avg_ms_interval_counter; // Average for the GC cost of a MS collection based on // _avg_ms_pause_counter and _avg_ms_interval_counter. @@ -108,9 +106,9 @@ // Average of the bytes promoted per minor collection. PerfVariable* _promoted_avg_counter; - // Average of the deviation of the promoted average + // Average of the deviation of the promoted average. PerfVariable* _promoted_avg_dev_counter; - // Padded average of the bytes promoted per minor colleciton + // Padded average of the bytes promoted per minor collection. PerfVariable* _promoted_padded_avg_counter; // See description of the _change_young_gen_for_maj_pauses diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -258,10 +258,10 @@ bool take_from_overflow_list(); }; -// Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit +// In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit // stack and the bitMap are shared, so access needs to be suitably -// sycnhronized. An OopTaskQueue structure, supporting efficient -// workstealing, replaces a CMSMarkStack for storing grey objects. +// synchronized. An OopTaskQueue structure, supporting efficient +// work stealing, replaces a CMSMarkStack for storing grey objects. class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure { private: MemRegion _span; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -407,8 +407,8 @@ res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size, (size_t) SmallForLinearAlloc - 1)); // XXX the following could potentially be pretty slow; - // should one, pesimally for the rare cases when res - // caclulated above is less than IndexSetSize, + // should one, pessimistically for the rare cases when res + // calculated above is less than IndexSetSize, // just return res calculated above? My reasoning was that // those cases will be so rare that the extra time spent doesn't // really matter.... @@ -759,7 +759,7 @@ // Note on locking for the space iteration functions: // since the collector's iteration activities are concurrent with // allocation activities by mutators, absent a suitable mutual exclusion -// mechanism the iterators may go awry. For instace a block being iterated +// mechanism the iterators may go awry. For instance a block being iterated // may suddenly be allocated or divided up and part of it allocated and // so on. @@ -2090,7 +2090,7 @@ // Support for concurrent collection policy decisions. bool CompactibleFreeListSpace::should_concurrent_collect() const { - // In the future we might want to add in frgamentation stats -- + // In the future we might want to add in fragmentation stats -- // including erosion of the "mountain" into this decision as well. return !adaptive_freelists() && linearAllocationWouldFail(); } @@ -2099,7 +2099,7 @@ void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); - // prepare_for_compaction() uses the space between live objects + // Prepare_for_compaction() uses the space between live objects // so that later phase can skip dead space quickly. So verification // of the free lists doesn't work after. } @@ -2122,7 +2122,7 @@ SCAN_AND_COMPACT(obj_size); } -// fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] +// Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] // where fbs is free block sizes double CompactibleFreeListSpace::flsFrag() const { size_t itabFree = totalSizeInIndexedFreeLists(); @@ -2651,7 +2651,7 @@ // changes on-the-fly during a scavenge and avoid such a phase-change // pothole. The following code is a heuristic attempt to do that. // It is protected by a product flag until we have gained - // enough experience with this heuristic and fine-tuned its behaviour. + // enough experience with this heuristic and fine-tuned its behavior. // WARNING: This might increase fragmentation if we overreact to // small spikes, so some kind of historical smoothing based on // previous experience with the greater reactivity might be useful. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -58,7 +58,7 @@ HeapWord* _ptr; size_t _word_size; size_t _refillSize; - size_t _allocation_size_limit; // largest size that will be allocated + size_t _allocation_size_limit; // Largest size that will be allocated void print_on(outputStream* st) const; }; @@ -116,14 +116,14 @@ PromotionInfo _promoInfo; - // helps to impose a global total order on freelistLock ranks; + // Helps to impose a global total order on freelistLock ranks; // assumes that CFLSpace's are allocated in global total order static int _lockRank; - // a lock protecting the free lists and free blocks; + // A lock protecting the free lists and free blocks; // mutable because of ubiquity of locking even for otherwise const methods mutable Mutex _freelistLock; - // locking verifier convenience function + // Locking verifier convenience function void assert_locked() const PRODUCT_RETURN; void assert_locked(const Mutex* lock) const PRODUCT_RETURN; @@ -131,12 +131,13 @@ LinearAllocBlock _smallLinearAllocBlock; FreeBlockDictionary::DictionaryChoice _dictionaryChoice; - AFLBinaryTreeDictionary* _dictionary; // ptr to dictionary for large size blocks + AFLBinaryTreeDictionary* _dictionary; // Pointer to dictionary for large size blocks + // Indexed array for small size blocks AdaptiveFreeList _indexedFreeList[IndexSetSize]; - // indexed array for small size blocks - // allocation stategy - bool _fitStrategy; // Use best fit strategy. + + // Allocation strategy + bool _fitStrategy; // Use best fit strategy bool _adaptive_freelists; // Use adaptive freelists // This is an address close to the largest free chunk in the heap. @@ -157,7 +158,7 @@ // Extra stuff to manage promotion parallelism. - // a lock protecting the dictionary during par promotion allocation. + // A lock protecting the dictionary during par promotion allocation. mutable Mutex _parDictionaryAllocLock; Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } @@ -275,26 +276,26 @@ } protected: - // reset the indexed free list to its initial empty condition. + // Reset the indexed free list to its initial empty condition. void resetIndexedFreeListArray(); - // reset to an initial state with a single free block described + // Reset to an initial state with a single free block described // by the MemRegion parameter. void reset(MemRegion mr); // Return the total number of words in the indexed free lists. size_t totalSizeInIndexedFreeLists() const; public: - // Constructor... + // Constructor CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr, bool use_adaptive_freelists, FreeBlockDictionary::DictionaryChoice); - // accessors + // Accessors bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } FreeBlockDictionary* dictionary() const { return _dictionary; } HeapWord* nearLargestChunk() const { return _nearLargestChunk; } void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } - // Set CMS global values + // Set CMS global values. static void set_cms_values(); // Return the free chunk at the end of the space. If no such @@ -305,7 +306,7 @@ void set_collector(CMSCollector* collector) { _collector = collector; } - // Support for parallelization of rescan and marking + // Support for parallelization of rescan and marking. const size_t rescan_task_size() const { return _rescan_task_size; } const size_t marking_task_size() const { return _marking_task_size; } SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } @@ -346,7 +347,7 @@ // Resizing support void set_end(HeapWord* value); // override - // mutual exclusion support + // Mutual exclusion support Mutex* freelistLock() const { return &_freelistLock; } // Iteration support @@ -370,7 +371,7 @@ // If the iteration encounters an unparseable portion of the region, // terminate the iteration and return the address of the start of the // subregion that isn't done. Return of "NULL" indicates that the - // interation completed. + // iteration completed. virtual HeapWord* object_iterate_careful_m(MemRegion mr, ObjectClosureCareful* cl); @@ -393,11 +394,11 @@ size_t block_size_nopar(const HeapWord* p) const; bool block_is_obj_nopar(const HeapWord* p) const; - // iteration support for promotion + // Iteration support for promotion void save_marks(); bool no_allocs_since_save_marks(); - // iteration support for sweeping + // Iteration support for sweeping void save_sweep_limit() { _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? unallocated_block() : end(); @@ -457,7 +458,7 @@ FreeChunk* allocateScratch(size_t size); - // returns true if either the small or large linear allocation buffer is empty. + // Returns true if either the small or large linear allocation buffer is empty. bool linearAllocationWouldFail() const; // Adjust the chunk for the minimum size. This version is called in @@ -477,18 +478,18 @@ void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, bool coalesced); - // Support for decisions regarding concurrent collection policy + // Support for decisions regarding concurrent collection policy. bool should_concurrent_collect() const; - // Support for compaction + // Support for compaction. void prepare_for_compaction(CompactPoint* cp); void adjust_pointers(); void compact(); - // reset the space to reflect the fact that a compaction of the + // Reset the space to reflect the fact that a compaction of the // space has been done. virtual void reset_after_compaction(); - // Debugging support + // Debugging support. void print() const; void print_on(outputStream* st) const; void prepare_for_verify(); @@ -500,7 +501,7 @@ // i.e. either the binary tree dictionary, the indexed free lists // or the linear allocation block. bool verify_chunk_in_free_list(FreeChunk* fc) const; - // Verify that the given chunk is the linear allocation block + // Verify that the given chunk is the linear allocation block. bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const; // Do some basic checks on the the free lists. void check_free_list_consistency() const PRODUCT_RETURN; @@ -516,7 +517,7 @@ size_t sumIndexedFreeListArrayReturnedBytes(); // Return the total number of chunks in the indexed free lists. size_t totalCountInIndexedFreeLists() const; - // Return the total numberof chunks in the space. + // Return the total number of chunks in the space. size_t totalCount(); ) diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -117,10 +117,10 @@ // hide the naked CGC_lock manipulation in the baton-passing code // further below. That's something we should try to do. Also, the proof // of correctness of this 2-level locking scheme is far from obvious, -// and potentially quite slippery. We have an uneasy supsicion, for instance, +// and potentially quite slippery. We have an uneasy suspicion, for instance, // that there may be a theoretical possibility of delay/starvation in the // low-level lock/wait/notify scheme used for the baton-passing because of -// potential intereference with the priority scheme embodied in the +// potential interference with the priority scheme embodied in the // CMS-token-passing protocol. See related comments at a CGC_lock->wait() // invocation further below and marked with "XXX 20011219YSR". // Indeed, as we note elsewhere, this may become yet more slippery @@ -259,7 +259,7 @@ // Ideally, in the calculation below, we'd compute the dilatation // factor as: MinChunkSize/(promoting_gen's min object size) // Since we do not have such a general query interface for the - // promoting generation, we'll instead just use the mimimum + // promoting generation, we'll instead just use the minimum // object size (which today is a header's worth of space); // note that all arithmetic is in units of HeapWords. assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking"); @@ -274,7 +274,7 @@ // // Let "f" be MinHeapFreeRatio in // -// _intiating_occupancy = 100-f + +// _initiating_occupancy = 100-f + // f * (CMSTriggerRatio/100) // where CMSTriggerRatio is the argument "tr" below. // @@ -2671,7 +2671,7 @@ // that it's responsible for collecting, while itself doing any // work common to all generations it's responsible for. A similar // comment applies to the gc_epilogue()'s. -// The role of the varaible _between_prologue_and_epilogue is to +// The role of the variable _between_prologue_and_epilogue is to // enforce the invocation protocol. void CMSCollector::gc_prologue(bool full) { // Call gc_prologue_work() for the CMSGen @@ -2878,10 +2878,10 @@ // Check reachability of the given heap address in CMS generation, // treating all other generations as roots. bool CMSCollector::is_cms_reachable(HeapWord* addr) { - // We could "guarantee" below, rather than assert, but i'll + // We could "guarantee" below, rather than assert, but I'll // leave these as "asserts" so that an adventurous debugger // could try this in the product build provided some subset of - // the conditions were met, provided they were intersted in the + // the conditions were met, provided they were interested in the // results and knew that the computation below wouldn't interfere // with other concurrent computations mutating the structures // being read or written. @@ -2982,7 +2982,7 @@ // This is as intended, because by this time // GC must already have cleared any refs that need to be cleared, // and traced those that need to be marked; moreover, - // the marking done here is not going to intefere in any + // the marking done here is not going to interfere in any // way with the marking information used by GC. NoRefDiscovery no_discovery(ref_processor()); @@ -3000,7 +3000,7 @@ if (CMSRemarkVerifyVariant == 1) { // In this first variant of verification, we complete - // all marking, then check if the new marks-verctor is + // all marking, then check if the new marks-vector is // a subset of the CMS marks-vector. verify_after_remark_work_1(); } else if (CMSRemarkVerifyVariant == 2) { @@ -3033,7 +3033,6 @@ gch->gen_process_strong_roots(_cmsGen->level(), true, // younger gens are roots true, // activate StrongRootsScope - false, // not scavenging SharedHeap::ScanningOption(roots_scanning_options()), ¬Older, true, // walk code active on stacks @@ -3101,7 +3100,6 @@ gch->gen_process_strong_roots(_cmsGen->level(), true, // younger gens are roots true, // activate StrongRootsScope - false, // not scavenging SharedHeap::ScanningOption(roots_scanning_options()), ¬Older, true, // walk code active on stacks @@ -3303,7 +3301,7 @@ void CMSCollector::setup_cms_unloading_and_verification_state() { const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC || VerifyBeforeExit; - const int rso = SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; + const int rso = SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache; // We set the proper root for this CMS cycle here. if (should_unload_classes()) { // Should unload classes this cycle @@ -3401,7 +3399,7 @@ CMSExpansionCause::_allocate_par_lab); // Now go around the loop and try alloc again; // A competing par_promote might beat us to the expansion space, - // so we may go around the loop again if promotion fails agaion. + // so we may go around the loop again if promotion fails again. if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); } @@ -3738,10 +3736,9 @@ gch->gen_process_strong_roots(_cmsGen->level(), true, // younger gens are roots true, // activate StrongRootsScope - false, // not scavenging SharedHeap::ScanningOption(roots_scanning_options()), ¬Older, - true, // walk all of code cache if (so & SO_CodeCache) + true, // walk all of code cache if (so & SO_AllCodeCache) NULL, &klass_closure); } @@ -4373,7 +4370,7 @@ // should really use wait/notify, which is the recommended // way of doing this type of interaction. Additionally, we should // consolidate the eight methods that do the yield operation and they - // are almost identical into one for better maintenability and + // are almost identical into one for better maintainability and // readability. See 6445193. // // Tony 2006.06.29 @@ -4541,7 +4538,7 @@ // If Eden's current occupancy is below this threshold, // immediately schedule the remark; else preclean // past the next scavenge in an effort to - // schedule the pause as described avove. By choosing + // schedule the pause as described above. By choosing // CMSScheduleRemarkEdenSizeThreshold >= max eden size // we will never do an actual abortable preclean cycle. if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { @@ -5238,14 +5235,13 @@ gch->gen_process_strong_roots(_collector->_cmsGen->level(), false, // yg was scanned above false, // this is parallel code - false, // not scavenging SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), &par_mri_cl, - true, // walk all of code cache if (so & SO_CodeCache) + true, // walk all of code cache if (so & SO_AllCodeCache) NULL, &klass_closure); assert(_collector->should_unload_classes() - || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache), + || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); _timer.stop(); if (PrintCMSStatistics != 0) { @@ -5375,14 +5371,13 @@ gch->gen_process_strong_roots(_collector->_cmsGen->level(), false, // yg was scanned above false, // this is parallel code - false, // not scavenging SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), &par_mrias_cl, - true, // walk all of code cache if (so & SO_CodeCache) + true, // walk all of code cache if (so & SO_AllCodeCache) NULL, NULL); // The dirty klasses will be handled below assert(_collector->should_unload_classes() - || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache), + || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); _timer.stop(); if (PrintCMSStatistics != 0) { @@ -5537,8 +5532,8 @@ // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! // CAUTION: This closure has state that persists across calls to // the work method dirty_range_iterate_clear() in that it has - // imbedded in it a (subtype of) UpwardsObjectClosure. The - // use of that state in the imbedded UpwardsObjectClosure instance + // embedded in it a (subtype of) UpwardsObjectClosure. The + // use of that state in the embedded UpwardsObjectClosure instance // assumes that the cards are always iterated (even if in parallel // by several threads) in monotonically increasing order per each // thread. This is true of the implementation below which picks @@ -5553,7 +5548,7 @@ // sure that the changes there do not run counter to the // assumptions made here and necessary for correctness and // efficiency. Note also that this code might yield inefficient - // behaviour in the case of very large objects that span one or + // behavior in the case of very large objects that span one or // more work chunks. Such objects would potentially be scanned // several times redundantly. Work on 4756801 should try and // address that performance anomaly if at all possible. XXX @@ -5579,7 +5574,7 @@ while (!pst->is_task_claimed(/* reference */ nth_task)) { // Having claimed the nth_task, compute corresponding mem-region, - // which is a-fortiori aligned correctly (i.e. at a MUT bopundary). + // which is a-fortiori aligned correctly (i.e. at a MUT boundary). // The alignment restriction ensures that we do not need any // synchronization with other gang-workers while setting or // clearing bits in thus chunk of the MUT. @@ -5966,7 +5961,6 @@ gch->gen_process_strong_roots(_cmsGen->level(), true, // younger gens as roots false, // use the local StrongRootsScope - false, // not scavenging SharedHeap::ScanningOption(roots_scanning_options()), &mrias_cl, true, // walk code active on stacks @@ -5974,7 +5968,7 @@ NULL); // The dirty klasses will be handled below assert(should_unload_classes() - || (roots_scanning_options() & SharedHeap::SO_CodeCache), + || (roots_scanning_options() & SharedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); } @@ -6371,7 +6365,7 @@ _inter_sweep_timer.reset(); _inter_sweep_timer.start(); - // We need to use a monotonically non-deccreasing time in ms + // We need to use a monotonically non-decreasing time in ms // or we will see time-warp warnings and os::javaTimeMillis() // does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; @@ -6732,7 +6726,7 @@ warning("CMS bit map allocation failure"); return false; } - // For now we'll just commit all of the bit map up fromt. + // For now we'll just commit all of the bit map up front. // Later on we'll try to be more parsimonious with swap. if (!_virtual_space.initialize(brs, brs.size())) { warning("CMS bit map backing store failure"); @@ -6839,8 +6833,8 @@ // XXX FIX ME !!! In the MT case we come in here holding a // leaf lock. For printing we need to take a further lock -// which has lower rank. We need to recallibrate the two -// lock-ranks involved in order to be able to rpint the +// which has lower rank. We need to recalibrate the two +// lock-ranks involved in order to be able to print the // messages below. (Or defer the printing to the caller. // For now we take the expedient path of just disabling the // messages for the problematic case.) @@ -7180,7 +7174,7 @@ } #endif // ASSERT } else { - // an unitialized object + // An uninitialized object. assert(_bitMap->isMarked(addr+1), "missing Printezis mark?"); HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); size = pointer_delta(nextOneAddr + 1, addr); @@ -7188,7 +7182,7 @@ "alignment problem"); // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass() // will dirty the card when the klass pointer is installed in the - // object (signalling the completion of initialization). + // object (signaling the completion of initialization). } } else { // Either a not yet marked object or an uninitialized object @@ -7999,7 +7993,7 @@ // we need to dirty all of the cards that the object spans, // since the rescan of object arrays will be limited to the // dirty cards. - // Note that no one can be intefering with us in this action + // Note that no one can be interfering with us in this action // of dirtying the mod union table, so no locking or atomics // are required. if (obj->is_objArray()) { @@ -9025,7 +9019,7 @@ // It's OK to call this multi-threaded; the worst thing // that can happen is that we'll get a bunch of closely -// spaced simulated oveflows, but that's OK, in fact +// spaced simulated overflows, but that's OK, in fact // probably good as it would exercise the overflow code // under contention. bool CMSCollector::simulate_overflow() { @@ -9145,7 +9139,7 @@ (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); } } else { - // Chop off the suffix and rerturn it to the global list. + // Chop off the suffix and return it to the global list. assert(cur->mark() != BUSY, "Error"); oop suffix_head = cur->mark(); // suffix will be put back on global list cur->set_mark(NULL); // break off suffix diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -171,19 +171,19 @@ // Ideally this should be GrowableArray<> just like MSC's marking stack(s). class CMSMarkStack: public CHeapObj { // - friend class CMSCollector; // to get at expasion stats further below + friend class CMSCollector; // To get at expansion stats further below. // - VirtualSpace _virtual_space; // space for the stack - oop* _base; // bottom of stack - size_t _index; // one more than last occupied index - size_t _capacity; // max #elements - Mutex _par_lock; // an advisory lock used in case of parallel access - NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run + VirtualSpace _virtual_space; // Space for the stack + oop* _base; // Bottom of stack + size_t _index; // One more than last occupied index + size_t _capacity; // Max #elements + Mutex _par_lock; // An advisory lock used in case of parallel access + NOT_PRODUCT(size_t _max_depth;) // Max depth plumbed during run protected: - size_t _hit_limit; // we hit max stack size limit - size_t _failed_double; // we failed expansion before hitting limit + size_t _hit_limit; // We hit max stack size limit + size_t _failed_double; // We failed expansion before hitting limit public: CMSMarkStack(): @@ -238,7 +238,7 @@ _index = 0; } - // Expand the stack, typically in response to an overflow condition + // Expand the stack, typically in response to an overflow condition. void expand(); // Compute the least valued stack element. @@ -250,7 +250,7 @@ return least; } - // Exposed here to allow stack expansion in || case + // Exposed here to allow stack expansion in || case. Mutex* par_lock() { return &_par_lock; } }; @@ -557,7 +557,7 @@ // Manipulated with CAS in the parallel/multi-threaded case. oop _overflow_list; // The following array-pair keeps track of mark words - // displaced for accomodating overflow list above. + // displaced for accommodating overflow list above. // This code will likely be revisited under RFE#4922830. Stack _preserved_oop_stack; Stack _preserved_mark_stack; @@ -599,7 +599,7 @@ void verify_after_remark_work_1(); void verify_after_remark_work_2(); - // true if any verification flag is on. + // True if any verification flag is on. bool _verifying; bool verifying() const { return _verifying; } void set_verifying(bool v) { _verifying = v; } @@ -611,9 +611,9 @@ void set_did_compact(bool v); // XXX Move these to CMSStats ??? FIX ME !!! - elapsedTimer _inter_sweep_timer; // time between sweeps - elapsedTimer _intra_sweep_timer; // time _in_ sweeps - // padded decaying average estimates of the above + elapsedTimer _inter_sweep_timer; // Time between sweeps + elapsedTimer _intra_sweep_timer; // Time _in_ sweeps + // Padded decaying average estimates of the above AdaptivePaddedAverage _inter_sweep_estimate; AdaptivePaddedAverage _intra_sweep_estimate; @@ -632,16 +632,16 @@ void report_heap_summary(GCWhen::Type when); protected: - ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) - MemRegion _span; // span covering above two - CardTableRS* _ct; // card table + ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS) + MemRegion _span; // Span covering above two + CardTableRS* _ct; // Card table // CMS marking support structures CMSBitMap _markBitMap; CMSBitMap _modUnionTable; CMSMarkStack _markStack; - HeapWord* _restart_addr; // in support of marking stack overflow + HeapWord* _restart_addr; // In support of marking stack overflow void lower_restart_addr(HeapWord* low); // Counters in support of marking stack / work queue overflow handling: @@ -656,12 +656,12 @@ size_t _par_kac_ovflw; NOT_PRODUCT(ssize_t _num_par_pushes;) - // ("Weak") Reference processing support + // ("Weak") Reference processing support. ReferenceProcessor* _ref_processor; CMSIsAliveClosure _is_alive_closure; - // keep this textually after _markBitMap and _span; c'tor dependency + // Keep this textually after _markBitMap and _span; c'tor dependency. - ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work + ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work ModUnionClosure _modUnionClosure; ModUnionClosurePar _modUnionClosurePar; @@ -697,7 +697,7 @@ // State related to prologue/epilogue invocation for my generations bool _between_prologue_and_epilogue; - // Signalling/State related to coordination between fore- and backgroud GC + // Signaling/State related to coordination between fore- and background GC // Note: When the baton has been passed from background GC to foreground GC, // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. static bool _foregroundGCIsActive; // true iff foreground collector is active or @@ -712,13 +712,13 @@ int _numYields; size_t _numDirtyCards; size_t _sweep_count; - // number of full gc's since the last concurrent gc. + // Number of full gc's since the last concurrent gc. uint _full_gcs_since_conc_gc; - // occupancy used for bootstrapping stats + // Occupancy used for bootstrapping stats double _bootstrap_occupancy; - // timer + // Timer elapsedTimer _timer; // Timing, allocation and promotion statistics, used for scheduling. @@ -770,7 +770,7 @@ int no_of_gc_threads); void push_on_overflow_list(oop p); void par_push_on_overflow_list(oop p); - // the following is, obviously, not, in general, "MT-stable" + // The following is, obviously, not, in general, "MT-stable" bool overflow_list_is_empty() const; void preserve_mark_if_necessary(oop p); @@ -778,24 +778,24 @@ void preserve_mark_work(oop p, markOop m); void restore_preserved_marks_if_any(); NOT_PRODUCT(bool no_preserved_marks() const;) - // in support of testing overflow code + // In support of testing overflow code NOT_PRODUCT(int _overflow_counter;) - NOT_PRODUCT(bool simulate_overflow();) // sequential + NOT_PRODUCT(bool simulate_overflow();) // Sequential NOT_PRODUCT(bool par_simulate_overflow();) // MT version // CMS work methods - void checkpointRootsInitialWork(bool asynch); // initial checkpoint work + void checkpointRootsInitialWork(bool asynch); // Initial checkpoint work - // a return value of false indicates failure due to stack overflow - bool markFromRootsWork(bool asynch); // concurrent marking work + // A return value of false indicates failure due to stack overflow + bool markFromRootsWork(bool asynch); // Concurrent marking work public: // FIX ME!!! only for testing - bool do_marking_st(bool asynch); // single-threaded marking - bool do_marking_mt(bool asynch); // multi-threaded marking + bool do_marking_st(bool asynch); // Single-threaded marking + bool do_marking_mt(bool asynch); // Multi-threaded marking private: - // concurrent precleaning work + // Concurrent precleaning work size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, ScanMarkedObjectsAgainCarefullyClosure* cl); size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, @@ -811,26 +811,26 @@ // Resets (i.e. clears) the per-thread plab sample vectors void reset_survivor_plab_arrays(); - // final (second) checkpoint work + // Final (second) checkpoint work void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, bool init_mark_was_synchronous); - // work routine for parallel version of remark + // Work routine for parallel version of remark void do_remark_parallel(); - // work routine for non-parallel version of remark + // Work routine for non-parallel version of remark void do_remark_non_parallel(); - // reference processing work routine (during second checkpoint) + // Reference processing work routine (during second checkpoint) void refProcessingWork(bool asynch, bool clear_all_soft_refs); - // concurrent sweeping work + // Concurrent sweeping work void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); - // (concurrent) resetting of support data structures + // (Concurrent) resetting of support data structures void reset(bool asynch); // Clear _expansion_cause fields of constituent generations void clear_expansion_cause(); - // An auxilliary method used to record the ends of + // An auxiliary method used to record the ends of // used regions of each generation to limit the extent of sweep void save_sweep_limits(); @@ -854,7 +854,7 @@ bool is_external_interruption(); void report_concurrent_mode_interruption(); - // If the backgrould GC is active, acquire control from the background + // If the background GC is active, acquire control from the background // GC and do the collection. void acquire_control_and_collect(bool full, bool clear_all_soft_refs); @@ -893,7 +893,7 @@ ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } - // locking checks + // Locking checks NOT_PRODUCT(static bool have_cms_token();) // XXXPERM bool should_collect(bool full, size_t size, bool tlab); @@ -958,7 +958,7 @@ CMSBitMap* markBitMap() { return &_markBitMap; } void directAllocated(HeapWord* start, size_t size); - // main CMS steps and related support + // Main CMS steps and related support void checkpointRootsInitial(bool asynch); bool markFromRoots(bool asynch); // a return value of false indicates failure // due to stack overflow @@ -977,7 +977,7 @@ // Performance Counter Support CollectorCounters* counters() { return _gc_counters; } - // timer stuff + // Timer stuff void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } @@ -1014,18 +1014,18 @@ static void print_on_error(outputStream* st); - // debugging + // Debugging void verify(); bool verify_after_remark(bool silent = VerifySilently); void verify_ok_to_terminate() const PRODUCT_RETURN; void verify_work_stacks_empty() const PRODUCT_RETURN; void verify_overflow_empty() const PRODUCT_RETURN; - // convenience methods in support of debugging + // Convenience methods in support of debugging static const size_t skip_header_HeapWords() PRODUCT_RETURN0; HeapWord* block_start(const void* p) const PRODUCT_RETURN0; - // accessors + // Accessors CMSMarkStack* verification_mark_stack() { return &_markStack; } CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } @@ -1109,7 +1109,7 @@ CollectionTypes _debug_collection_type; - // True if a compactiing collection was done. + // True if a compacting collection was done. bool _did_compact; bool did_compact() { return _did_compact; } @@ -1203,7 +1203,7 @@ // Support for compaction CompactibleSpace* first_compaction_space() const; - // Adjust quantites in the generation affected by + // Adjust quantities in the generation affected by // the compaction. void reset_after_compaction(); @@ -1301,7 +1301,7 @@ void setNearLargestChunk(); bool isNearLargestChunk(HeapWord* addr); - // Get the chunk at the end of the space. Delagates to + // Get the chunk at the end of the space. Delegates to // the space. FreeChunk* find_chunk_at_end(); @@ -1422,7 +1422,6 @@ // marking from the roots following the first checkpoint. // XXX This should really be a subclass of The serial version // above, but i have not had the time to refactor things cleanly. -// That willbe done for Dolphin. class Par_MarkFromRootsClosure: public BitMapClosure { CMSCollector* _collector; MemRegion _whole_span; @@ -1780,7 +1779,7 @@ void do_already_free_chunk(FreeChunk *fc); // Work method called when processing an already free or a // freshly garbage chunk to do a lookahead and possibly a - // premptive flush if crossing over _limit. + // preemptive flush if crossing over _limit. void lookahead_and_flush(FreeChunk* fc, size_t chunkSize); // Process a garbage chunk during sweeping. size_t do_garbage_chunk(FreeChunk *fc); @@ -1879,7 +1878,7 @@ }; // Allow yielding or short-circuiting of reference list -// prelceaning work. +// precleaning work. class CMSPrecleanRefsYieldClosure: public YieldClosure { CMSCollector* _collector; void do_yield_work(); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -197,13 +197,13 @@ } -// Return the HeapWord address corrsponding to the next "0" bit +// Return the HeapWord address corresponding to the next "0" bit // (inclusive). inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const { return getNextUnmarkedWordAddress(addr, endWord()); } -// Return the HeapWord address corrsponding to the next "0" bit +// Return the HeapWord address corresponding to the next "0" bit // (inclusive). inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress( HeapWord* start_addr, HeapWord* end_addr) const { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -164,7 +164,7 @@ // _pending_yields that holds the sum (of both sync and async requests), and // a second counter _pending_decrements that only holds the async requests, // for greater efficiency, since in a typical CMS run, there are many more - // pontential (i.e. static) yield points than there are actual + // potential (i.e. static) yield points than there are actual // (i.e. dynamic) yields because of requests, which are few and far between. // // Note that, while "_pending_yields >= _pending_decrements" is an invariant, diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -279,7 +279,7 @@ // When _spoolTail is NULL, then the set of slots with displaced headers // is all those starting at the slot <_spoolHead, _firstIndex> and // going up to the last slot of last block in the linked list. -// In this lartter case, _splice_point points to the tail block of +// In this latter case, _splice_point points to the tail block of // this linked list of blocks holding displaced headers. void PromotionInfo::verify() const { // Verify the following: diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -141,8 +141,7 @@ #ifndef USDT2 HS_DTRACE_PROBE(hs_private, cms__initmark__begin); #else /* USDT2 */ - HS_PRIVATE_CMS_INITMARK_BEGIN( - ); + HS_PRIVATE_CMS_INITMARK_BEGIN(); #endif /* USDT2 */ _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark"); @@ -162,8 +161,7 @@ #ifndef USDT2 HS_DTRACE_PROBE(hs_private, cms__initmark__end); #else /* USDT2 */ - HS_PRIVATE_CMS_INITMARK_END( - ); + HS_PRIVATE_CMS_INITMARK_END(); #endif /* USDT2 */ } @@ -178,8 +176,7 @@ #ifndef USDT2 HS_DTRACE_PROBE(hs_private, cms__remark__begin); #else /* USDT2 */ - HS_PRIVATE_CMS_REMARK_BEGIN( - ); + HS_PRIVATE_CMS_REMARK_BEGIN(); #endif /* USDT2 */ _collector->_gc_timer_cm->register_gc_pause_start("Final Mark"); @@ -200,8 +197,7 @@ #ifndef USDT2 HS_DTRACE_PROBE(hs_private, cms__remark__end); #else /* USDT2 */ - HS_PRIVATE_CMS_REMARK_END( - ); + HS_PRIVATE_CMS_REMARK_END(); #endif /* USDT2 */ } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp --- a/src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -39,7 +39,7 @@ // up, the wrapped closure is applied to all elements, keeping track of // this elapsed time of this process, and leaving the array empty. // The caller must be sure to call "done" to process any unprocessed -// buffered entriess. +// buffered entries. class Generation; class HeapRegion; @@ -98,116 +98,4 @@ _closure_app_seconds(0.0) { } }; -class BufferingOopsInGenClosure: public OopsInGenClosure { - BufferingOopClosure _boc; - OopsInGenClosure* _oc; - protected: - template inline void do_oop_work(T* p) { - assert(generation()->is_in_reserved((void*)p), "Must be in!"); - _boc.do_oop(p); - } - public: - BufferingOopsInGenClosure(OopsInGenClosure *oc) : - _boc(oc), _oc(oc) {} - - virtual void do_oop(narrowOop* p) { do_oop_work(p); } - virtual void do_oop(oop* p) { do_oop_work(p); } - - void done() { - _boc.done(); - } - - double closure_app_seconds () { - return _boc.closure_app_seconds(); - } - - void set_generation(Generation* gen) { - OopsInGenClosure::set_generation(gen); - _oc->set_generation(gen); - } - - void reset_generation() { - // Make sure we finish the current work with the current generation. - _boc.done(); - OopsInGenClosure::reset_generation(); - _oc->reset_generation(); - } - -}; - - -class BufferingOopsInHeapRegionClosure: public OopsInHeapRegionClosure { -private: - enum PrivateConstants { - BufferLength = 1024 - }; - - StarTask _buffer[BufferLength]; - StarTask* _buffer_top; - StarTask* _buffer_curr; - - HeapRegion* _hr_buffer[BufferLength]; - HeapRegion** _hr_curr; - - OopsInHeapRegionClosure* _oc; - double _closure_app_seconds; - - void process_buffer () { - - assert((_hr_curr - _hr_buffer) == (_buffer_curr - _buffer), - "the two lengths should be the same"); - - double start = os::elapsedTime(); - HeapRegion** hr_curr = _hr_buffer; - HeapRegion* hr_prev = NULL; - for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) { - HeapRegion* region = *hr_curr; - if (region != hr_prev) { - _oc->set_region(region); - hr_prev = region; - } - if (curr->is_narrow()) { - assert(UseCompressedOops, "Error"); - _oc->do_oop((narrowOop*)(*curr)); - } else { - _oc->do_oop((oop*)(*curr)); - } - ++hr_curr; - } - _buffer_curr = _buffer; - _hr_curr = _hr_buffer; - _closure_app_seconds += (os::elapsedTime() - start); - } - -public: - virtual void do_oop(narrowOop* p) { do_oop_work(p); } - virtual void do_oop( oop* p) { do_oop_work(p); } - - template void do_oop_work(T* p) { - if (_buffer_curr == _buffer_top) { - assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr"); - process_buffer(); - } - StarTask new_ref(p); - *_buffer_curr = new_ref; - ++_buffer_curr; - *_hr_curr = _from; - ++_hr_curr; - } - void done () { - if (_buffer_curr > _buffer) { - assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr"); - process_buffer(); - } - } - double closure_app_seconds () { - return _closure_app_seconds; - } - BufferingOopsInHeapRegionClosure (OopsInHeapRegionClosure *oc) : - _oc(oc), - _buffer_curr(_buffer), _buffer_top(_buffer + BufferLength), - _hr_curr(_hr_buffer), - _closure_app_seconds(0.0) { } -}; - #endif // SHARE_VM_GC_IMPLEMENTATION_G1_BUFFERINGOOPCLOSURE_HPP diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp --- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -33,7 +33,7 @@ _threads(NULL), _n_threads(0), _hot_card_cache(g1h) { - // Ergomonically select initial concurrent refinement parameters + // Ergonomically select initial concurrent refinement parameters if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) { FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2(ParallelGCThreads, 1)); } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp --- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -44,8 +44,8 @@ _vtime_accum(0.0) { - // Each thread has its own monitor. The i-th thread is responsible for signalling - // to thread i+1 if the number of buffers in the queue exceeds a threashold for this + // Each thread has its own monitor. The i-th thread is responsible for signaling + // to thread i+1 if the number of buffers in the queue exceeds a threshold for this // thread. Monitors are also used to wake up the threads during termination. // The 0th worker in notified by mutator threads and has a special monitor. // The last worker is used for young gen rset size sampling. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -909,7 +909,7 @@ } #endif - // Initialise marking structures. This has to be done in a STW phase. + // Initialize marking structures. This has to be done in a STW phase. reset(); // For each region note start of marking. @@ -923,8 +923,8 @@ // If we force an overflow during remark, the remark operation will // actually abort and we'll restart concurrent marking. If we always - // force an oveflow during remark we'll never actually complete the - // marking phase. So, we initilize this here, at the start of the + // force an overflow during remark we'll never actually complete the + // marking phase. So, we initialize this here, at the start of the // cycle, so that at the remaining overflow number will decrease at // every remark and we'll eventually not need to cause one. force_overflow_stw()->init(); @@ -959,7 +959,7 @@ * * Note, however, that this code is also used during remark and in * this case we should not attempt to leave / enter the STS, otherwise - * we'll either hit an asseert (debug / fastdebug) or deadlock + * we'll either hit an assert (debug / fastdebug) or deadlock * (product). So we should only leave / enter the STS if we are * operating concurrently. * @@ -1001,7 +1001,7 @@ // task 0 is responsible for clearing the global data structures // We should be here because of an overflow. During STW we should // not clear the overflow flag since we rely on it being true when - // we exit this method to abort the pause and restart concurent + // we exit this method to abort the pause and restart concurrent // marking. reset_marking_state(true /* clear_overflow */); force_overflow()->update(); @@ -1251,7 +1251,7 @@ CMConcurrentMarkingTask markingTask(this, cmThread()); if (use_parallel_marking_threads()) { _parallel_workers->set_active_workers((int)active_workers); - // Don't set _n_par_threads because it affects MT in proceess_strong_roots() + // Don't set _n_par_threads because it affects MT in process_strong_roots() // and the decisions on that MT processing is made elsewhere. assert(_parallel_workers->active_workers() > 0, "Should have been set"); _parallel_workers->run_task(&markingTask); @@ -1484,7 +1484,7 @@ } // Set the marked bytes for the current region so that - // it can be queried by a calling verificiation routine + // it can be queried by a calling verification routine _region_marked_bytes = marked_bytes; return false; @@ -1619,7 +1619,6 @@ } }; - class G1ParVerifyFinalCountTask: public AbstractGangTask { protected: G1CollectedHeap* _g1h; @@ -2307,7 +2306,7 @@ // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). // // CMTask::do_marking_step() is called in a loop, which we'll exit - // if there's nothing more to do (i.e. we'completely drained the + // if there's nothing more to do (i.e. we've completely drained the // entries that were pushed as a a result of applying the 'keep alive' // closure to the entries on the discovered ref lists) or we overflow // the global marking stack. @@ -2470,7 +2469,7 @@ // reference processing is not multi-threaded and is thus // performed by the current thread instead of a gang worker). // - // The gang tasks involved in parallel reference procssing create + // The gang tasks involved in parallel reference processing create // their own instances of these closures, which do their own // synchronization among themselves. G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); @@ -2529,10 +2528,9 @@ assert(!rp->discovery_enabled(), "Post condition"); } - // Now clean up stale oops in StringTable - StringTable::unlink(&g1_is_alive); - // Clean up unreferenced symbols in symbol table. - SymbolTable::unlink(); + g1h->unlink_string_and_symbol_table(&g1_is_alive, + /* process_strings */ false, // currently strings are always roots + /* process_symbols */ true); } void ConcurrentMark::swapMarkBitMaps() { @@ -2548,7 +2546,7 @@ public: void work(uint worker_id) { // Since all available tasks are actually started, we should - // only proceed if we're supposed to be actived. + // only proceed if we're supposed to be active. if (worker_id < _cm->active_tasks()) { CMTask* task = _cm->task(worker_id); task->record_start_time(); @@ -3068,7 +3066,7 @@ // 'start' should be in the heap. assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); - // 'end' *may* be just beyone the end of the heap (if hr is the last region) + // 'end' *may* be just beyond the end of the heap (if hr is the last region) assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); @@ -4416,7 +4414,7 @@ // overflow was raised. This means we have to restart the // marking phase and start iterating over regions. However, in // order to do this we have to make sure that all tasks stop - // what they are doing and re-initialise in a safe manner. We + // what they are doing and re-initialize in a safe manner. We // will achieve this with the use of two barrier sync points. if (_cm->verbose_low()) { @@ -4430,7 +4428,7 @@ // When we exit this sync barrier we know that all tasks have // stopped doing marking work. So, it's now safe to - // re-initialise our data structures. At the end of this method, + // re-initialize our data structures. At the end of this method, // task 0 will clear the global data structures. } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/concurrentMark.hpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -378,19 +378,19 @@ friend class G1CMDrainMarkingStackClosure; protected: - ConcurrentMarkThread* _cmThread; // the thread doing the work - G1CollectedHeap* _g1h; // the heap. - uint _parallel_marking_threads; // the number of marking - // threads we're use - uint _max_parallel_marking_threads; // max number of marking - // threads we'll ever use - double _sleep_factor; // how much we have to sleep, with + ConcurrentMarkThread* _cmThread; // The thread doing the work + G1CollectedHeap* _g1h; // The heap + uint _parallel_marking_threads; // The number of marking + // threads we're using + uint _max_parallel_marking_threads; // Max number of marking + // threads we'll ever use + double _sleep_factor; // How much we have to sleep, with // respect to the work we just did, to // meet the marking overhead goal - double _marking_task_overhead; // marking target overhead for + double _marking_task_overhead; // Marking target overhead for // a single task - // same as the two above, but for the cleanup task + // Same as the two above, but for the cleanup task double _cleanup_sleep_factor; double _cleanup_task_overhead; @@ -399,8 +399,8 @@ // Concurrent marking support structures CMBitMap _markBitMap1; CMBitMap _markBitMap2; - CMBitMapRO* _prevMarkBitMap; // completed mark bitmap - CMBitMap* _nextMarkBitMap; // under-construction mark bitmap + CMBitMapRO* _prevMarkBitMap; // Completed mark bitmap + CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap BitMap _region_bm; BitMap _card_bm; @@ -409,43 +409,43 @@ HeapWord* _heap_start; HeapWord* _heap_end; - // Root region tracking and claiming. + // Root region tracking and claiming CMRootRegions _root_regions; // For gray objects - CMMarkStack _markStack; // Grey objects behind global finger. - HeapWord* volatile _finger; // the global finger, region aligned, + CMMarkStack _markStack; // Grey objects behind global finger + HeapWord* volatile _finger; // The global finger, region aligned, // always points to the end of the // last claimed region - // marking tasks - uint _max_worker_id;// maximum worker id - uint _active_tasks; // task num currently active - CMTask** _tasks; // task queue array (max_worker_id len) - CMTaskQueueSet* _task_queues; // task queue set - ParallelTaskTerminator _terminator; // for termination + // Marking tasks + uint _max_worker_id;// Maximum worker id + uint _active_tasks; // Task num currently active + CMTask** _tasks; // Task queue array (max_worker_id len) + CMTaskQueueSet* _task_queues; // Task queue set + ParallelTaskTerminator _terminator; // For termination - // Two sync barriers that are used to synchronise tasks when an + // Two sync barriers that are used to synchronize tasks when an // overflow occurs. The algorithm is the following. All tasks enter // the first one to ensure that they have all stopped manipulating - // the global data structures. After they exit it, they re-initialise - // their data structures and task 0 re-initialises the global data + // the global data structures. After they exit it, they re-initialize + // their data structures and task 0 re-initializes the global data // structures. Then, they enter the second sync barrier. This // ensure, that no task starts doing work before all data - // structures (local and global) have been re-initialised. When they + // structures (local and global) have been re-initialized. When they // exit it, they are free to start working again. WorkGangBarrierSync _first_overflow_barrier_sync; WorkGangBarrierSync _second_overflow_barrier_sync; - // this is set by any task, when an overflow on the global data - // structures is detected. + // This is set by any task, when an overflow on the global data + // structures is detected volatile bool _has_overflown; - // true: marking is concurrent, false: we're in remark + // True: marking is concurrent, false: we're in remark volatile bool _concurrent; - // set at the end of a Full GC so that marking aborts + // Set at the end of a Full GC so that marking aborts volatile bool _has_aborted; - // used when remark aborts due to an overflow to indicate that + // Used when remark aborts due to an overflow to indicate that // another concurrent marking phase should start volatile bool _restart_for_overflow; @@ -455,10 +455,10 @@ // time of remark. volatile bool _concurrent_marking_in_progress; - // verbose level + // Verbose level CMVerboseLevel _verbose_level; - // All of these times are in ms. + // All of these times are in ms NumberSeq _init_times; NumberSeq _remark_times; NumberSeq _remark_mark_times; @@ -467,7 +467,7 @@ double _total_counting_time; double _total_rs_scrub_time; - double* _accum_task_vtime; // accumulated task vtime + double* _accum_task_vtime; // Accumulated task vtime FlexibleWorkGang* _parallel_workers; @@ -487,7 +487,7 @@ void reset_marking_state(bool clear_overflow = true); // We do this after we're done with marking so that the marking data - // structures are initialised to a sensible and predictable state. + // structures are initialized to a sensible and predictable state. void set_non_marking_state(); // Called to indicate how many threads are currently active. @@ -497,14 +497,14 @@ // mark or remark) and how many threads are currently active. void set_concurrency_and_phase(uint active_tasks, bool concurrent); - // prints all gathered CM-related statistics + // Prints all gathered CM-related statistics void print_stats(); bool cleanup_list_is_empty() { return _cleanup_list.is_empty(); } - // accessor methods + // Accessor methods uint parallel_marking_threads() const { return _parallel_marking_threads; } uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;} double sleep_factor() { return _sleep_factor; } @@ -542,7 +542,7 @@ // frequently. HeapRegion* claim_region(uint worker_id); - // It determines whether we've run out of regions to scan. + // It determines whether we've run out of regions to scan bool out_of_regions() { return _finger == _heap_end; } // Returns the task with the given id @@ -816,7 +816,7 @@ inline bool do_yield_check(uint worker_i = 0); inline bool should_yield(); - // Called to abort the marking cycle after a Full GC takes palce. + // Called to abort the marking cycle after a Full GC takes place. void abort(); bool has_aborted() { return _has_aborted; } @@ -933,11 +933,11 @@ // Similar to the above routine but there are times when we cannot // safely calculate the size of obj due to races and we, therefore, - // pass the size in as a parameter. It is the caller's reponsibility + // pass the size in as a parameter. It is the caller's responsibility // to ensure that the size passed in for obj is valid. inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id); - // Unconditionally mark the given object, and unconditinally count + // Unconditionally mark the given object, and unconditionally count // the object in the counting structures for worker id 0. // Should *not* be called from parallel code. inline bool mark_and_count(oop obj, HeapRegion* hr); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -105,7 +105,7 @@ // will then correspond to a (non-existent) card that is also // just beyond the heap. if (g1h->is_in_g1_reserved(end) && !ct_bs->is_card_aligned(end)) { - // end of region is not card aligned - incremement to cover + // end of region is not card aligned - increment to cover // all the cards spanned by the region. end_idx += 1; } @@ -222,7 +222,7 @@ return false; } -// Unconditionally mark the given object, and unconditinally count +// Unconditionally mark the given object, and unconditionally count // the object in the counting structures for worker id 0. // Should *not* be called from parallel code. inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp --- a/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -70,7 +70,7 @@ inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size, bool bot_updates) { - // First we have to tedo the allocation, assuming we're holding the + // First we have to redo the allocation, assuming we're holding the // appropriate lock, in case another thread changed the region while // we were waiting to get the lock. HeapWord* result = attempt_allocation(word_size, bot_updates); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1BiasedArray.hpp --- a/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -79,7 +79,7 @@ assert((uintptr_t)end % mapping_granularity_in_bytes == 0, err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT, mapping_granularity_in_bytes, end)); - size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize); + size_t num_target_elems = pointer_delta(end, bottom, mapping_granularity_in_bytes); idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes; address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes); initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes)); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp --- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -448,7 +448,7 @@ // Otherwise, find the block start using the table, but taking // care (cf block_start_unsafe() above) not to parse any objects/blocks - // on the cards themsleves. + // on the cards themselves. size_t index = _array->index_for(addr); assert(_array->address_for_index(index) == addr, "arg should be start of card"); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1CardCounts.cpp --- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -169,7 +169,7 @@ // We use the last address in hr as hr could be the // last region in the heap. In which case trying to find - // the card for hr->end() will be an OOB accesss to the + // the card for hr->end() will be an OOB access to the // card table. HeapWord* last = hr->end() - 1; assert(_g1h->g1_committed().contains(last), diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -50,8 +50,8 @@ #include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "memory/gcLocker.inline.hpp" -#include "memory/genOopClosures.inline.hpp" #include "memory/generationSpec.hpp" +#include "memory/iterator.hpp" #include "memory/referenceProcessor.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.pcgc.inline.hpp" @@ -1575,8 +1575,6 @@ void G1CollectedHeap:: resize_if_necessary_after_full_collection(size_t word_size) { - assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); - // Include the current allocation, if any, and bytes that will be // pre-allocated to support collections, as "used". const size_t used_after_gc = used(); @@ -2996,7 +2994,17 @@ } size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { - return HeapRegion::GrainBytes; + return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes; +} + +size_t G1CollectedHeap::tlab_used(Thread* ignored) const { + return young_list()->eden_used_bytes(); +} + +// For G1 TLABs should not contain humongous objects, so the maximum TLAB size +// must be smaller than the humongous object limit. +size_t G1CollectedHeap::max_tlab_size() const { + return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment); } size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { @@ -3008,11 +3016,11 @@ // humongous objects. HeapRegion* hr = _mutator_alloc_region.get(); - size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; + size_t max_tlab = max_tlab_size() * wordSize; if (hr == NULL) { - return max_tlab_size; + return max_tlab; } else { - return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size); + return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab); } } @@ -3077,11 +3085,7 @@ return NULL; // keep some compilers happy } -// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can -// pass it as the perm_blk to SharedHeap::process_strong_roots. -// When process_strong_roots stop calling perm_blk->younger_refs_iterate -// we can change this closure to extend the simpler OopClosure. -class VerifyRootsClosure: public OopsInGenClosure { +class VerifyRootsClosure: public OopClosure { private: G1CollectedHeap* _g1h; VerifyOption _vo; @@ -3117,7 +3121,7 @@ void do_oop(narrowOop* p) { do_oop_nv(p); } }; -class G1VerifyCodeRootOopClosure: public OopsInGenClosure { +class G1VerifyCodeRootOopClosure: public OopClosure { G1CollectedHeap* _g1h; OopClosure* _root_cl; nmethod* _nm; @@ -3396,14 +3400,12 @@ // We apply the relevant closures to all the oops in the // system dictionary, the string table and the code cache. - const int so = SO_AllClasses | SO_Strings | SO_CodeCache; + const int so = SO_AllClasses | SO_Strings | SO_AllCodeCache; // Need cleared claim bits for the strong roots processing ClassLoaderDataGraph::clear_claimed_marks(); process_strong_roots(true, // activate StrongRootsScope - false, // we set "is scavenging" to false, - // so we don't reset the dirty cards. ScanningOption(so), // roots scanning options &rootsCl, &blobsCl, @@ -3655,6 +3657,7 @@ // always_do_update_barrier = false; assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); // Fill TLAB's and such + accumulate_statistics_all_tlabs(); ensure_parsability(true); if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) && @@ -3679,6 +3682,8 @@ "derived pointer present")); // always_do_update_barrier = true; + resize_all_tlabs(); + // We have just completed a GC. Update the soft reference // policy with the new heap occupancy Universe::update_heap_info_at_gc(); @@ -4651,8 +4656,8 @@ _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()), _mark_in_progress(_g1->mark_in_progress()) { } -template -void G1ParCopyClosure::mark_object(oop obj) { +template +void G1ParCopyClosure::mark_object(oop obj) { #ifdef ASSERT HeapRegion* hr = _g1->heap_region_containing(obj); assert(hr != NULL, "sanity"); @@ -4663,8 +4668,8 @@ _cm->grayRoot(obj, (size_t) obj->size(), _worker_id); } -template -void G1ParCopyClosure +template +void G1ParCopyClosure ::mark_forwarded_object(oop from_obj, oop to_obj) { #ifdef ASSERT assert(from_obj->is_forwarded(), "from obj should be forwarded"); @@ -4687,8 +4692,8 @@ _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id); } -template -oop G1ParCopyClosure +template +oop G1ParCopyClosure ::copy_to_survivor_space(oop old) { size_t word_sz = old->size(); HeapRegion* from_region = _g1->heap_region_containing_raw(old); @@ -4784,13 +4789,11 @@ } } -template +template template -void G1ParCopyClosure +void G1ParCopyClosure ::do_oop_work(T* p) { oop obj = oopDesc::load_decode_heap_oop(p); - assert(barrier != G1BarrierRS || obj != NULL, - "Precondition: G1BarrierRS implies obj is non-NULL"); assert(_worker_id == _par_scan_state->queue_num(), "sanity"); @@ -4810,10 +4813,7 @@ mark_forwarded_object(obj, forwardee); } - // When scanning the RS, we only care about objs in CS. - if (barrier == G1BarrierRS) { - _par_scan_state->update_rs(_from, p, _worker_id); - } else if (barrier == G1BarrierKlass) { + if (barrier == G1BarrierKlass) { do_klass_barrier(p, forwardee); } } else { @@ -4828,14 +4828,10 @@ if (barrier == G1BarrierEvac && obj != NULL) { _par_scan_state->update_rs(_from, p, _worker_id); } - - if (do_gen_barrier && obj != NULL) { - par_do_barrier(p); - } -} - -template void G1ParCopyClosure::do_oop_work(oop* p); -template void G1ParCopyClosure::do_oop_work(narrowOop* p); +} + +template void G1ParCopyClosure::do_oop_work(oop* p); +template void G1ParCopyClosure::do_oop_work(narrowOop* p); template void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { assert(has_partial_array_mask(p), "invariant"); @@ -5119,13 +5115,13 @@ BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); - assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow"); + assert(so & SO_AllCodeCache || scan_rs != NULL, "must scan code roots somehow"); // Walk the code cache/strong code roots w/o buffering, because StarTask // cannot handle unaligned oop locations. CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */); process_strong_roots(false, // no scoping; this is parallel code - is_scavenging, so, + so, &buf_scan_non_heap_roots, &eager_scan_code_roots, scan_klasses @@ -5173,7 +5169,7 @@ // the collection set. // Note all threads participate in this set of root tasks. double mark_strong_code_roots_ms = 0.0; - if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) { + if (g1_policy()->during_initial_mark_pause() && !(so & SO_AllCodeCache)) { double mark_strong_roots_start = os::elapsedTime(); mark_strong_code_roots(worker_i); mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0; @@ -5193,6 +5189,99 @@ SharedHeap::process_weak_roots(root_closure, &roots_in_blobs); } +class G1StringSymbolTableUnlinkTask : public AbstractGangTask { +private: + BoolObjectClosure* _is_alive; + int _initial_string_table_size; + int _initial_symbol_table_size; + + bool _process_strings; + int _strings_processed; + int _strings_removed; + + bool _process_symbols; + int _symbols_processed; + int _symbols_removed; +public: + G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) : + AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive), + _process_strings(process_strings), _strings_processed(0), _strings_removed(0), + _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) { + + _initial_string_table_size = StringTable::the_table()->table_size(); + _initial_symbol_table_size = SymbolTable::the_table()->table_size(); + if (process_strings) { + StringTable::clear_parallel_claimed_index(); + } + if (process_symbols) { + SymbolTable::clear_parallel_claimed_index(); + } + } + + ~G1StringSymbolTableUnlinkTask() { + guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size, + err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT, + StringTable::parallel_claimed_index(), _initial_string_table_size)); + guarantee(!_process_strings || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size, + err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT, + SymbolTable::parallel_claimed_index(), _initial_symbol_table_size)); + } + + void work(uint worker_id) { + if (G1CollectedHeap::use_parallel_gc_threads()) { + int strings_processed = 0; + int strings_removed = 0; + int symbols_processed = 0; + int symbols_removed = 0; + if (_process_strings) { + StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed); + Atomic::add(strings_processed, &_strings_processed); + Atomic::add(strings_removed, &_strings_removed); + } + if (_process_symbols) { + SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed); + Atomic::add(symbols_processed, &_symbols_processed); + Atomic::add(symbols_removed, &_symbols_removed); + } + } else { + if (_process_strings) { + StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed); + } + if (_process_symbols) { + SymbolTable::unlink(&_symbols_processed, &_symbols_removed); + } + } + } + + size_t strings_processed() const { return (size_t)_strings_processed; } + size_t strings_removed() const { return (size_t)_strings_removed; } + + size_t symbols_processed() const { return (size_t)_symbols_processed; } + size_t symbols_removed() const { return (size_t)_symbols_removed; } +}; + +void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, + bool process_strings, bool process_symbols) { + uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? + _g1h->workers()->active_workers() : 1); + + G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols); + if (G1CollectedHeap::use_parallel_gc_threads()) { + set_par_threads(n_workers); + workers()->run_task(&g1_unlink_task); + set_par_threads(0); + } else { + g1_unlink_task.work(0); + } + if (G1TraceStringSymbolTableScrubbing) { + gclog_or_tty->print_cr("Cleaned string and symbol table, " + "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, " + "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed", + g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(), + g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed()); + } +} + // Weak Reference Processing support // An always "is_alive" closure that is used to preserve referents. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -209,7 +209,7 @@ friend class OldGCAllocRegion; // Closures used in implementation. - template + template friend class G1ParCopyClosure; friend class G1IsAliveClosure; friend class G1EvacuateFollowersClosure; @@ -1373,7 +1373,7 @@ // Divide the heap region sequence into "chunks" of some size (the number // of regions divided by the number of parallel threads times some // overpartition factor, currently 4). Assumes that this will be called - // in parallel by ParallelGCThreads worker threads with discinct worker + // in parallel by ParallelGCThreads worker threads with distinct worker // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel // calls will use the same "claim_value", and that that claim value is // different from the claim_value of any heap region before the start of @@ -1470,9 +1470,11 @@ // Section on thread-local allocation buffers (TLABs) // See CollectedHeap for semantics. - virtual bool supports_tlab_allocation() const; - virtual size_t tlab_capacity(Thread* thr) const; - virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; + bool supports_tlab_allocation() const; + size_t tlab_capacity(Thread* ignored) const; + size_t tlab_used(Thread* ignored) const; + size_t max_tlab_size() const; + size_t unsafe_max_tlab_alloc(Thread* ignored) const; // Can a compiler initialize a new object without store barriers? // This permission only extends from the creation of a new object @@ -1518,7 +1520,7 @@ // Returns "true" iff the given word_size is "very large". static bool isHumongous(size_t word_size) { // Note this has to be strictly greater-than as the TLABs - // are capped at the humongous thresold and we want to + // are capped at the humongous threshold and we want to // ensure that we don't try to allocate a TLAB as // humongous and that we don't allocate a humongous // object in a TLAB. @@ -1557,7 +1559,7 @@ void set_region_short_lived_locked(HeapRegion* hr); // add appropriate methods for any other surv rate groups - YoungList* young_list() { return _young_list; } + YoungList* young_list() const { return _young_list; } // debugging bool check_young_list_well_formed() { @@ -1648,26 +1650,30 @@ // Optimized nmethod scanning support routines - // Register the given nmethod with the G1 heap + // Register the given nmethod with the G1 heap. virtual void register_nmethod(nmethod* nm); - // Unregister the given nmethod from the G1 heap + // Unregister the given nmethod from the G1 heap. virtual void unregister_nmethod(nmethod* nm); // Migrate the nmethods in the code root lists of the regions // in the collection set to regions in to-space. In the event // of an evacuation failure, nmethods that reference objects - // that were not successfullly evacuated are not migrated. + // that were not successfully evacuated are not migrated. void migrate_strong_code_roots(); // During an initial mark pause, mark all the code roots that // point into regions *not* in the collection set. void mark_strong_code_roots(uint worker_id); - // Rebuild the stong code root lists for each region - // after a full GC + // Rebuild the strong code root lists for each region + // after a full GC. void rebuild_strong_code_roots(); + // Delete entries for dead interned string and clean up unreferenced symbols + // in symbol table, possibly in parallel. + void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true); + // Verification // The following is just to alert the verification code diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -318,7 +318,7 @@ void G1CollectorPolicy::initialize_alignments() { _space_alignment = HeapRegion::GrainBytes; - size_t card_table_alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable); + size_t card_table_alignment = GenRemSet::max_alignment_constraint(); size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size); } @@ -1075,7 +1075,7 @@ } _short_lived_surv_rate_group->start_adding_regions(); - // do that for any other surv rate groupsx + // Do that for any other surv rate groups if (update_stats) { double cost_per_card_ms = 0.0; @@ -1741,7 +1741,7 @@ _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; _inc_cset_bytes_used_before += used_bytes; - // Cache the values we have added to the aggregated informtion + // Cache the values we have added to the aggregated information // in the heap region in case we have to remove this region from // the incremental collection set, or it is updated by the // rset sampling code diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -116,7 +116,7 @@ // If only -XX:NewRatio is set we should use the specified ratio of the heap // as both min and max. This will be interpreted as "fixed" just like the // NewSize==MaxNewSize case above. But we will update the min and max -// everytime the heap size changes. +// every time the heap size changes. // // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is // combined with either NewSize or MaxNewSize. (A warning message is printed.) @@ -523,9 +523,9 @@ // synchronize updates to this field. size_t _inc_cset_recorded_rs_lengths; - // A concurrent refinement thread periodcially samples the young + // A concurrent refinement thread periodically samples the young // region RSets and needs to update _inc_cset_recorded_rs_lengths as - // the RSets grow. Instead of having to syncronize updates to that + // the RSets grow. Instead of having to synchronize updates to that // field we accumulate them in this field and add it to // _inc_cset_recorded_rs_lengths_diffs at the start of a GC. ssize_t _inc_cset_recorded_rs_lengths_diffs; @@ -604,7 +604,7 @@ // Calculate and return the maximum young list target length that // can fit into the pause time goal. The parameters are: rs_lengths // represent the prediction of how large the young RSet lengths will - // be, base_min_length is the alreay existing number of regions in + // be, base_min_length is the already existing number of regions in // the young list, min_length and max_length are the desired min and // max young list length according to the user's inputs. uint calculate_young_list_target_length(size_t rs_lengths, @@ -820,6 +820,8 @@ // do that for any other surv rate groups } + size_t young_list_target_length() const { return _young_list_target_length; } + bool is_young_list_full() { uint young_list_length = _g1->young_list()->length(); uint young_list_target_length = _young_list_target_length; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1MMUTracker.hpp --- a/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -103,7 +103,7 @@ // The data structure implemented is a circular queue. // Head "points" to the most recent addition, tail to the oldest one. // The array is of fixed size and I don't think we'll need more than - // two or three entries with the current behaviour of G1 pauses. + // two or three entries with the current behavior of G1 pauses. // If the array is full, an easy fix is to look for the pauses with // the shortest gap between them and consolidate them. // For now, we have taken the expedient alternative of forgetting diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1MarkSweep.cpp --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -131,7 +131,6 @@ ClassLoaderDataGraph::clear_claimed_marks(); sh->process_strong_roots(true, // activate StrongRootsScope - false, // not scavenging. SharedHeap::SO_SystemClasses, &GenMarkSweep::follow_root_closure, &GenMarkSweep::follow_code_root_closure, @@ -163,11 +162,8 @@ // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&GenMarkSweep::is_alive); - // Delete entries for dead interned strings. - StringTable::unlink(&GenMarkSweep::is_alive); - - // Clean up unreferenced symbols in symbol table. - SymbolTable::unlink(); + // Delete entries for dead interned string and clean up unreferenced symbols in symbol table. + G1CollectedHeap::heap()->unlink_string_and_symbol_table(&GenMarkSweep::is_alive); if (VerifyDuringGC) { HandleMark hm; // handle scope @@ -180,7 +176,7 @@ // any hash values from the mark word. These hash values are // used when verifying the dictionaries and so removing them // from the mark word can make verification of the dictionaries - // fail. At the end of the GC, the orginal mark word values + // fail. At the end of the GC, the original mark word values // (including hash values) are restored to the appropriate // objects. if (!VerifySilently) { @@ -311,7 +307,6 @@ ClassLoaderDataGraph::clear_claimed_marks(); sh->process_strong_roots(true, // activate StrongRootsScope - false, // not scavenging. SharedHeap::SO_AllClasses, &GenMarkSweep::adjust_pointer_closure, NULL, // do not touch code cache here diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp --- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -112,7 +112,7 @@ // take_sample() only returns "used". When sampling was used, there // were some anomolous values emitted which may have been the consequence // of not updating all values simultaneously (i.e., see the calculation done - // in eden_space_used(), is it possbile that the values used to + // in eden_space_used(), is it possible that the values used to // calculate either eden_used or survivor_used are being updated by // the collector when the sample is being done?). const bool sampled = false; @@ -135,7 +135,7 @@ // Young collection set // name "generation.0". This is logically the young generation. - // The "0, 3" are paremeters for the n-th genertaion (=0) with 3 spaces. + // The "0, 3" are parameters for the n-th generation (=0) with 3 spaces. // See _old_collection_counters for additional counters _young_collection_counters = new G1YoungGenerationCounters(this, "young"); @@ -254,7 +254,7 @@ eden_counters()->update_capacity(pad_capacity(eden_space_committed())); eden_counters()->update_used(eden_space_used()); // only the to survivor space (s1) is active, so we don't need to - // update the counteres for the from survivor space (s0) + // update the counters for the from survivor space (s0) to_counters()->update_capacity(pad_capacity(survivor_space_committed())); to_counters()->update_used(survivor_space_used()); old_space_counters()->update_capacity(pad_capacity(old_space_committed())); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp --- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -108,7 +108,7 @@ // is that all the above sizes need to be recalculated when the old // gen changes capacity (after a GC or after a humongous allocation) // but only the eden occupancy changes when a new eden region is -// allocated. So, in the latter case we have minimal recalcuation to +// allocated. So, in the latter case we have minimal recalculation to // do which is important as we want to keep the eden region allocation // path as low-overhead as possible. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1OopClosures.hpp --- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -38,7 +38,7 @@ // A class that scans oops in a given heap region (much as OopsInGenClosure // scans oops in a generation.) -class OopsInHeapRegionClosure: public OopsInGenClosure { +class OopsInHeapRegionClosure: public ExtendedOopClosure { protected: HeapRegion* _from; public: @@ -131,7 +131,7 @@ template void do_klass_barrier(T* p, oop new_obj); }; -template +template class G1ParCopyClosure : public G1ParCopyHelper { G1ParScanClosure _scanner; template void do_oop_work(T* p); @@ -166,22 +166,16 @@ virtual void do_oop(narrowOop* p) { do_oop_nv(p); } }; -typedef G1ParCopyClosure G1ParScanExtRootClosure; -typedef G1ParCopyClosure G1ParScanMetadataClosure; +typedef G1ParCopyClosure G1ParScanExtRootClosure; +typedef G1ParCopyClosure G1ParScanMetadataClosure; -typedef G1ParCopyClosure G1ParScanAndMarkExtRootClosure; -typedef G1ParCopyClosure G1ParScanAndMarkClosure; -typedef G1ParCopyClosure G1ParScanAndMarkMetadataClosure; - -// The following closure types are no longer used but are retained -// for historical reasons: -// typedef G1ParCopyClosure G1ParScanHeapRSClosure; -// typedef G1ParCopyClosure G1ParScanAndMarkHeapRSClosure; +typedef G1ParCopyClosure G1ParScanAndMarkExtRootClosure; +typedef G1ParCopyClosure G1ParScanAndMarkMetadataClosure; // The following closure type is defined in g1_specialized_oop_closures.hpp: // -// typedef G1ParCopyClosure G1ParScanHeapEvacClosure; +// typedef G1ParCopyClosure G1ParScanHeapEvacClosure; // We use a separate closure to handle references during evacuation // failure processing. @@ -189,7 +183,7 @@ // (since that closure no longer assumes that the references it // handles point into the collection set). -typedef G1ParCopyClosure G1ParScanHeapEvacFailureClosure; +typedef G1ParCopyClosure G1ParScanHeapEvacFailureClosure; class FilterIntoCSClosure: public ExtendedOopClosure { G1CollectedHeap* _g1; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp --- a/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -177,7 +177,7 @@ // The _record_refs_into_cset flag is true during the RSet // updating part of an evacuation pause. It is false at all // other times: - // * rebuilding the rembered sets after a full GC + // * rebuilding the remembered sets after a full GC // * during concurrent refinement. // * updating the remembered sets of regions in the collection // set in the event of an evacuation failure (when deferred diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1RemSet.cpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -195,7 +195,7 @@ HeapRegionRemSetIterator iter(hrrs); size_t card_index; - // We claim cards in block so as to recude the contention. The block size is determined by + // We claim cards in block so as to reduce the contention. The block size is determined by // the G1RSetScanBlockSize parameter. size_t jump_to_card = hrrs->iter_claimed_next(_block_size); for (size_t current_card = 0; iter.has_next(card_index); current_card++) { @@ -587,7 +587,7 @@ // While we are processing RSet buffers during the collection, we // actually don't want to scan any cards on the collection set, - // since we don't want to update remebered sets with entries that + // since we don't want to update remembered sets with entries that // point into the collection set, given that live objects from the // collection set are about to move and such entries will be stale // very soon. This change also deals with a reliability issue which diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1_globals.hpp --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -71,6 +71,9 @@ diagnostic(bool, G1TraceConcRefinement, false, \ "Trace G1 concurrent refinement") \ \ + experimental(bool, G1TraceStringSymbolTableScrubbing, false, \ + "Trace information string and symbol table scrubbing.") \ + \ product(double, G1ConcMarkStepDurationMillis, 10.0, \ "Target duration of individual concurrent marking steps " \ "in milliseconds.") \ diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp --- a/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -33,18 +33,17 @@ // Forward declarations. enum G1Barrier { G1BarrierNone, - G1BarrierRS, G1BarrierEvac, G1BarrierKlass }; -template +template class G1ParCopyClosure; class G1ParScanClosure; class G1ParPushHeapRSClosure; -typedef G1ParCopyClosure G1ParScanHeapEvacClosure; +typedef G1ParCopyClosure G1ParScanHeapEvacClosure; class FilterIntoCSClosure; class FilterOutOfRegionClosure; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/heapRegion.cpp --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -1027,7 +1027,7 @@ } } - // Loook up end - 1 + // Look up end - 1 HeapWord* addr_4 = the_end - 1; HeapWord* b_start_4 = _offsets.block_start_const(addr_4); if (b_start_4 != p) { @@ -1111,7 +1111,7 @@ // will be false, and it will pick up top() as the high water mark // of region. If it does so after _gc_time_stamp = ..., then it // will pick up the right saved_mark_word() as the high water mark - // of the region. Either way, the behaviour will be correct. + // of the region. Either way, the behavior will be correct. ContiguousSpace::set_saved_mark(); OrderAccess::storestore(); _gc_time_stamp = curr_gc_time_stamp; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/heapRegionSeq.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -97,7 +97,7 @@ HeapWord* heap_end() const {return _regions.end_address_mapped(); } public: - // Empty contructor, we'll initialize it with the initialize() method. + // Empty constructor, we'll initialize it with the initialize() method. HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { } void initialize(HeapWord* bottom, HeapWord* end); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/ptrQueue.cpp --- a/src/share/vm/gc_implementation/g1/ptrQueue.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/ptrQueue.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -71,7 +71,7 @@ assert(_lock->owned_by_self(), "Required."); // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before - // we acquire DirtyCardQ_CBL_mon inside enqeue_complete_buffer as they + // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they // have the same rank and we may get the "possible deadlock" message _lock->unlock(); @@ -151,7 +151,7 @@ // The current PtrQ may be the shared dirty card queue and // may be being manipulated by more than one worker thread - // during a pause. Since the enqueuing of the completed + // during a pause. Since the enqueueing of the completed // buffer unlocks the Shared_DirtyCardQ_lock more than one // worker thread can 'race' on reading the shared queue attributes // (_buf and _index) and multiple threads can call into this @@ -170,7 +170,7 @@ locking_enqueue_completed_buffer(buf); // enqueue completed buffer - // While the current thread was enqueuing the buffer another thread + // While the current thread was enqueueing the buffer another thread // may have a allocated a new buffer and inserted it into this pointer // queue. If that happens then we just return so that the current // thread doesn't overwrite the buffer allocated by the other thread diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/satbQueue.cpp --- a/src/share/vm/gc_implementation/g1/satbQueue.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -219,58 +219,52 @@ } #ifdef ASSERT -void SATBMarkQueueSet::dump_active_values(JavaThread* first, - bool expected_active) { - gclog_or_tty->print_cr("SATB queue active values for Java Threads"); - gclog_or_tty->print_cr(" SATB queue set: active is %s", - (is_active()) ? "TRUE" : "FALSE"); - gclog_or_tty->print_cr(" expected_active is %s", - (expected_active) ? "TRUE" : "FALSE"); - for (JavaThread* t = first; t; t = t->next()) { - bool active = t->satb_mark_queue().is_active(); - gclog_or_tty->print_cr(" thread %s, active is %s", - t->name(), (active) ? "TRUE" : "FALSE"); +void SATBMarkQueueSet::dump_active_states(bool expected_active) { + gclog_or_tty->print_cr("Expected SATB active state: %s", + expected_active ? "ACTIVE" : "INACTIVE"); + gclog_or_tty->print_cr("Actual SATB active states:"); + gclog_or_tty->print_cr(" Queue set: %s", is_active() ? "ACTIVE" : "INACTIVE"); + for (JavaThread* t = Threads::first(); t; t = t->next()) { + gclog_or_tty->print_cr(" Thread \"%s\" queue: %s", t->name(), + t->satb_mark_queue().is_active() ? "ACTIVE" : "INACTIVE"); + } + gclog_or_tty->print_cr(" Shared queue: %s", + shared_satb_queue()->is_active() ? "ACTIVE" : "INACTIVE"); +} + +void SATBMarkQueueSet::verify_active_states(bool expected_active) { + // Verify queue set state + if (is_active() != expected_active) { + dump_active_states(expected_active); + guarantee(false, "SATB queue set has an unexpected active state"); + } + + // Verify thread queue states + for (JavaThread* t = Threads::first(); t; t = t->next()) { + if (t->satb_mark_queue().is_active() != expected_active) { + dump_active_states(expected_active); + guarantee(false, "Thread SATB queue has an unexpected active state"); + } + } + + // Verify shared queue state + if (shared_satb_queue()->is_active() != expected_active) { + dump_active_states(expected_active); + guarantee(false, "Shared SATB queue has an unexpected active state"); } } #endif // ASSERT -void SATBMarkQueueSet::set_active_all_threads(bool b, - bool expected_active) { +void SATBMarkQueueSet::set_active_all_threads(bool active, bool expected_active) { assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); - JavaThread* first = Threads::first(); - #ifdef ASSERT - if (_all_active != expected_active) { - dump_active_values(first, expected_active); - - // I leave this here as a guarantee, instead of an assert, so - // that it will still be compiled in if we choose to uncomment - // the #ifdef ASSERT in a product build. The whole block is - // within an #ifdef ASSERT so the guarantee will not be compiled - // in a product build anyway. - guarantee(false, - "SATB queue set has an unexpected active value"); - } + verify_active_states(expected_active); #endif // ASSERT - _all_active = b; - - for (JavaThread* t = first; t; t = t->next()) { -#ifdef ASSERT - bool active = t->satb_mark_queue().is_active(); - if (active != expected_active) { - dump_active_values(first, expected_active); - - // I leave this here as a guarantee, instead of an assert, so - // that it will still be compiled in if we choose to uncomment - // the #ifdef ASSERT in a product build. The whole block is - // within an #ifdef ASSERT so the guarantee will not be compiled - // in a product build anyway. - guarantee(false, - "thread has an unexpected active value in its SATB queue"); - } -#endif // ASSERT - t->satb_mark_queue().set_active(b); + _all_active = active; + for (JavaThread* t = Threads::first(); t; t = t->next()) { + t->satb_mark_queue().set_active(active); } + shared_satb_queue()->set_active(active); } void SATBMarkQueueSet::filter_thread_buffers() { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/satbQueue.hpp --- a/src/share/vm/gc_implementation/g1/satbQueue.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/satbQueue.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -87,7 +87,8 @@ bool apply_closure_to_completed_buffer_work(bool par, int worker); #ifdef ASSERT - void dump_active_values(JavaThread* first, bool expected_active); + void dump_active_states(bool expected_active); + void verify_active_states(bool expected_active); #endif // ASSERT public: @@ -99,11 +100,11 @@ static void handle_zero_index_for_thread(JavaThread* t); - // Apply "set_active(b)" to all Java threads' SATB queues. It should be + // Apply "set_active(active)" to all SATB queues in the set. It should be // called only with the world stopped. The method will assert that the // SATB queues of all threads it visits, as well as the SATB queue // set itself, has an active value same as expected_active. - void set_active_all_threads(bool b, bool expected_active); + void set_active_all_threads(bool active, bool expected_active); // Filter all the currently-active SATB buffers. void filter_thread_buffers(); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/g1/sparsePRT.hpp --- a/src/share/vm/gc_implementation/g1/sparsePRT.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/sparsePRT.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -144,7 +144,7 @@ // Attempts to ensure that the given card_index in the given region is in // the sparse table. If successful (because the card was already - // present, or because it was successfullly added) returns "true". + // present, or because it was successfully added) returns "true". // Otherwise, returns "false" to indicate that the addition would // overflow the entry for the region. The caller must transfer these // entries to a larger-capacity representation. @@ -201,8 +201,7 @@ bool has_next(size_t& card_index); }; -// Concurrent accesss to a SparsePRT must be serialized by some external -// mutex. +// Concurrent access to a SparsePRT must be serialized by some external mutex. class SparsePRTIter; class SparsePRTCleanupTask; @@ -248,7 +247,7 @@ // Attempts to ensure that the given card_index in the given region is in // the sparse table. If successful (because the card was already - // present, or because it was successfullly added) returns "true". + // present, or because it was successfully added) returns "true". // Otherwise, returns "false" to indicate that the addition would // overflow the entry for the region. The caller must transfer these // entries to a larger-capacity representation. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp --- a/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -154,7 +154,7 @@ // There used to be this guarantee there. // guarantee ((eden_size + 2*survivor_size) <= _max_gen_size, "incorrect input arguments"); // Code below forces this requirement. In addition the desired eden - // size and disired survivor sizes are desired goals and may + // size and desired survivor sizes are desired goals and may // exceed the total generation size. assert(min_gen_size() <= orig_size && orig_size <= max_gen_size(), diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp --- a/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -213,7 +213,7 @@ && sp->block_is_obj(first_block) // first block is an object && !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied) || oop(first_block)->is_typeArray())) { - // Find our least non-clean card, so that a left neighbour + // Find our least non-clean card, so that a left neighbor // does not scan an object straddling the mutual boundary // too far to the right, and attempt to scan a portion of // that object twice. @@ -247,14 +247,14 @@ } NOISY(else { tty->print_cr(" LNC: Found no dirty card in current chunk; leaving LNC entry NULL"); // In the future, we could have this thread look for a non-NULL value to copy from its - // right neighbour (up to the end of the first object). + // right neighbor (up to the end of the first object). if (last_card_of_cur_chunk < last_card_of_first_obj) { tty->print_cr(" LNC: BEWARE!!! first obj straddles past right end of chunk:\n" " might be efficient to get value from right neighbour?"); } }) } else { - // In this case we can help our neighbour by just asking them + // In this case we can help our neighbor by just asking them // to stop at our first card (even though it may not be dirty). NOISY(tty->print_cr(" LNC: first block is not a non-array object; setting LNC to first card of current chunk");) assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter"); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parNew/parNewGeneration.cpp --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -612,14 +612,13 @@ KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(), gch->rem_set()->klass_rem_set()); - int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; + int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache; par_scan_state.start_strong_roots(); gch->gen_process_strong_roots(_gen->level(), true, // Process younger gens, if any, // as strong roots. false, // no scope; this is parallel code - true, // is scavenging SharedHeap::ScanningOption(so), &par_scan_state.to_space_root_closure(), true, // walk *all* scavengable nmethods @@ -1071,7 +1070,7 @@ size_policy->avg_survived()->sample(from()->used()); } - // We need to use a monotonically non-deccreasing time in ms + // We need to use a monotonically non-decreasing time in ms // or we will see time-warp warnings and os::javaTimeMillis() // does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; @@ -1403,7 +1402,7 @@ #ifndef PRODUCT // It's OK to call this multi-threaded; the worst thing // that can happen is that we'll get a bunch of closely -// spaced simulated oveflows, but that's OK, in fact +// spaced simulated overflows, but that's OK, in fact // probably good as it would exercise the overflow code // under contention. bool ParNewGeneration::should_simulate_overflow() { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -118,8 +118,8 @@ // Make checks on the current sizes of the generations and -// the contraints on the sizes of the generations. Push -// up the boundary within the contraints. A partial +// the constraints on the sizes of the generations. Push +// up the boundary within the constraints. A partial // push can occur. void AdjoiningGenerations::request_old_gen_expansion(size_t expand_in_bytes) { assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check"); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -69,7 +69,7 @@ // the available space and attempt to move the boundary if more space // is needed. The growth is not guaranteed to occur. void adjust_boundary_for_old_gen_needs(size_t desired_change_in_bytes); - // Similary for a growth of the young generation. + // Similarly for a growth of the young generation. void adjust_boundary_for_young_gen_needs(size_t eden_size, size_t survivor_size); // Return the total byte size of the reserved space diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -65,7 +65,7 @@ } }; -// Checks all objects for the existance of some type of mark, +// Checks all objects for the existence of some type of mark, // precise or imprecise, dirty or newgen. class CheckForUnmarkedObjects : public ObjectClosure { private: @@ -84,7 +84,7 @@ } // Card marks are not precise. The current system can leave us with - // a mismash of precise marks and beginning of object marks. This means + // a mismatch of precise marks and beginning of object marks. This means // we test for missing precise marks first. If any are found, we don't // fail unless the object head is also unmarked. virtual void do_object(oop obj) { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -202,12 +202,12 @@ list->print("list:"); } if (list->is_empty()) { - // Enqueuing the empty list: nothing to do. + // Enqueueing the empty list: nothing to do. return; } uint list_length = list->length(); if (is_empty()) { - // Enqueuing to empty list: just acquire elements. + // Enqueueing to empty list: just acquire elements. set_insert_end(list->insert_end()); set_remove_end(list->remove_end()); set_length(list_length); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -303,7 +303,7 @@ // load balancing (i.e., over partitioning). The last task to be // executed by a GC thread in a job is a work stealing task. A // GC thread that gets a work stealing task continues to execute -// that task until the job is done. In the static number of GC theads +// that task until the job is done. In the static number of GC threads // case, tasks are added to a queue (FIFO). The work stealing tasks are // the last to be added. Once the tasks are added, the GC threads grab // a task and go. A single thread can do all the non-work stealing tasks diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -139,11 +139,6 @@ return true; } } - // No object starts in this slice; verify this using - // more traditional methods: Note that no object can - // start before the start_addr. - assert(end_addr == start_addr || - object_start(end_addr - 1) <= start_addr, - "Oops an object does start in this slice?"); + return false; } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -488,6 +488,10 @@ return young_gen()->eden_space()->tlab_capacity(thr); } +size_t ParallelScavengeHeap::tlab_used(Thread* thr) const { + return young_gen()->eden_space()->tlab_used(thr); +} + size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); } @@ -673,7 +677,7 @@ // Before delegating the resize to the young generation, // the reserved space for the young and old generations -// may be changed to accomodate the desired resize. +// may be changed to accommodate the desired resize. void ParallelScavengeHeap::resize_young_gen(size_t eden_size, size_t survivor_size) { if (UseAdaptiveGCBoundary) { @@ -690,7 +694,7 @@ // Before delegating the resize to the old generation, // the reserved space for the young and old generations -// may be changed to accomodate the desired resize. +// may be changed to accommodate the desired resize. void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { if (UseAdaptiveGCBoundary) { if (size_policy()->bytes_absorbed_from_eden() != 0) { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -187,6 +187,7 @@ bool supports_tlab_allocation() const { return true; } size_t tlab_capacity(Thread* thr) const; + size_t tlab_used(Thread* thr) const; size_t unsafe_max_tlab_alloc(Thread* thr) const; // Can a compiler initialize a new object without store barriers? diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -45,7 +45,7 @@ // the do_it() method of a ThreadRootsMarkingTask is executed, it // starts marking from the thread's roots. // -// The enqueuing of the MarkFromRootsTask and ThreadRootsMarkingTask +// The enqueueing of the MarkFromRootsTask and ThreadRootsMarkingTask // do little more than create the task and put it on a queue. The // queue is a GCTaskQueue and threads steal tasks from this GCTaskQueue. // diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" @@ -76,6 +77,38 @@ _old_gen_policy_is_ready = false; } +size_t PSAdaptiveSizePolicy::calculate_free_based_on_live(size_t live, uintx ratio_as_percentage) { + // We want to calculate how much free memory there can be based on the + // amount of live data currently in the old gen. Using the formula: + // ratio * (free + live) = free + // Some equation solving later we get: + // free = (live * ratio) / (1 - ratio) + + const double ratio = ratio_as_percentage / 100.0; + const double ratio_inverse = 1.0 - ratio; + const double tmp = live * ratio; + size_t free = (size_t)(tmp / ratio_inverse); + + return free; +} + +size_t PSAdaptiveSizePolicy::calculated_old_free_size_in_bytes() const { + size_t free_size = (size_t)(_promo_size + avg_promoted()->padded_average()); + size_t live = ParallelScavengeHeap::heap()->old_gen()->used_in_bytes(); + + if (MinHeapFreeRatio != 0) { + size_t min_free = calculate_free_based_on_live(live, MinHeapFreeRatio); + free_size = MAX2(free_size, min_free); + } + + if (MaxHeapFreeRatio != 100) { + size_t max_free = calculate_free_based_on_live(live, MaxHeapFreeRatio); + free_size = MIN2(max_free, free_size); + } + + return free_size; +} + void PSAdaptiveSizePolicy::major_collection_begin() { // Update the interval time _major_timer.stop(); @@ -482,7 +515,7 @@ // adjust down the total heap size. Adjust down the larger of the // generations. - // Add some checks for a threshhold for a change. For example, + // Add some checks for a threshold for a change. For example, // a change less than the necessary alignment is probably not worth // attempting. @@ -1161,7 +1194,7 @@ // We use the tenuring threshold to equalize the cost of major // and minor collections. // ThresholdTolerance is used to indicate how sensitive the - // tenuring threshold is to differences in cost betweent the + // tenuring threshold is to differences in cost between the // collection types. // Get the times of interest. This involves a little work, so @@ -1292,3 +1325,18 @@ st, PSScavenge::tenuring_threshold()); } + +#ifndef PRODUCT + +void TestOldFreeSpaceCalculation_test() { + assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 20) == 25, "Calculation of free memory failed"); + assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 50) == 100, "Calculation of free memory failed"); + assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 60) == 150, "Calculation of free memory failed"); + assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 75) == 300, "Calculation of free memory failed"); + assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 20) == 100, "Calculation of free memory failed"); + assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 50) == 400, "Calculation of free memory failed"); + assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 60) == 600, "Calculation of free memory failed"); + assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 75) == 1200, "Calculation of free memory failed"); +} + +#endif /* !PRODUCT */ diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -37,7 +37,7 @@ // // It also computes an optimal tenuring threshold between the young // and old generations, so as to equalize the cost of collections -// of those generations, as well as optimial survivor space sizes +// of those generations, as well as optimal survivor space sizes // for the young generation. // // While this class is specifically intended for a generational system @@ -113,7 +113,7 @@ // Changing the generation sizing depends on the data that is // gathered about the effects of changes on the pause times and // throughput. These variable count the number of data points - // gathered. The policy may use these counters as a threshhold + // gathered. The policy may use these counters as a threshold // for reliable data. julong _young_gen_change_for_major_pause_count; @@ -240,7 +240,6 @@ void major_collection_begin(); void major_collection_end(size_t amount_live, GCCause::Cause gc_cause); - // void tenured_allocation(size_t size) { _avg_pretenured->sample(size); } @@ -248,9 +247,9 @@ // Accessors // NEEDS_CLEANUP should use sizes.hpp - size_t calculated_old_free_size_in_bytes() const { - return (size_t)(_promo_size + avg_promoted()->padded_average()); - } + static size_t calculate_free_based_on_live(size_t live, uintx ratio_as_percentage); + + size_t calculated_old_free_size_in_bytes() const; size_t average_old_live_in_bytes() const { return (size_t) avg_old_live()->average(); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -195,7 +195,7 @@ // Update all the counters that can be updated from the size policy. // This should be called after all policy changes have been made - // and reflected internall in the size policy. + // and reflected internally in the size policy. void update_counters_from_policy(); // Update counters that can be updated from fields internal to the diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -661,7 +661,7 @@ } jlong PSMarkSweep::millis_since_last_gc() { - // We need a monotonically non-deccreasing time in ms but + // We need a monotonically non-decreasing time in ms but // os::javaTimeMillis() does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; jlong ret_val = now - _time_of_last_gc; @@ -674,7 +674,7 @@ } void PSMarkSweep::reset_millis_since_last_gc() { - // We need a monotonically non-deccreasing time in ms but + // We need a monotonically non-decreasing time in ms but // os::javaTimeMillis() does not guarantee monotonicity. _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -280,7 +280,7 @@ "Should be true before post_resize()"); MemRegion mangle_region(object_space()->end(), virtual_space_high); // Note that the object space has not yet been updated to - // coincede with the new underlying virtual space. + // coincide with the new underlying virtual space. SpaceMangler::mangle_region(mangle_region); } post_resize(); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -187,7 +187,7 @@ void space_invariants() PRODUCT_RETURN; - // Performace Counter support + // Performance Counter support void update_counters(); // Printing support diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -2176,7 +2176,7 @@ heap->resize_all_tlabs(); - // Resize the metaspace capactiy after a collection + // Resize the metaspace capacity after a collection MetaspaceGC::compute_new_size(); if (TraceGen1Time) accumulated_time()->stop(); @@ -3285,7 +3285,7 @@ } jlong PSParallelCompact::millis_since_last_gc() { - // We need a monotonically non-deccreasing time in ms but + // We need a monotonically non-decreasing time in ms but // os::javaTimeMillis() does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; jlong ret_val = now - _time_of_last_gc; @@ -3298,7 +3298,7 @@ } void PSParallelCompact::reset_millis_since_last_gc() { - // We need a monotonically non-deccreasing time in ms but + // We need a monotonically non-decreasing time in ms but // os::javaTimeMillis() does not guarantee monotonicity. _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -877,7 +877,7 @@ // The summary phase calculates the total live data to the left of each region // XXX. Based on that total and the bottom of the space, it can calculate the // starting location of the live data in XXX. The summary phase calculates for -// each region XXX quantites such as +// each region XXX quantities such as // // - the amount of live data at the beginning of a region from an object // entering the region. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -78,7 +78,7 @@ // Returns a subregion containing all objects in this space. MemRegion used_region() { return MemRegion(bottom(), top()); } - // Boolean querries. + // Boolean queries. bool is_empty() const { return used() == 0; } bool not_empty() const { return used() > 0; } bool contains(const void* p) const { return _bottom <= p && p < _end; } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -529,8 +529,19 @@ counters->update_survivor_overflowed(_survivor_overflow); } + size_t max_young_size = young_gen->max_size(); + + // Deciding a free ratio in the young generation is tricky, so if + // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating + // that the old generation size may have been limited because of them) we + // should then limit our young generation size using NewRatio to have it + // follow the old generation size. + if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) { + max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size()); + } + size_t survivor_limit = - size_policy->max_survivor_size(young_gen->max_size()); + size_policy->max_survivor_size(max_young_size); _tenuring_threshold = size_policy->compute_survivor_space_size_and_threshold( _survivor_overflow, @@ -553,12 +564,11 @@ // Do call at minor collections? // Don't check if the size_policy is ready at this // level. Let the size_policy check that internally. - if (UseAdaptiveSizePolicy && - UseAdaptiveGenerationSizePolicyAtMinorCollection && + if (UseAdaptiveGenerationSizePolicyAtMinorCollection && ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC)) { - // Calculate optimial free space amounts + // Calculate optimal free space amounts assert(young_gen->max_size() > young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), @@ -568,7 +578,7 @@ size_t eden_live = young_gen->eden_space()->used_in_bytes(); size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); size_t max_old_gen_size = old_gen->max_gen_size(); - size_t max_eden_size = young_gen->max_size() - + size_t max_eden_size = max_young_size - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -35,7 +35,7 @@ class PSVirtualSpace : public CHeapObj { friend class VMStructs; protected: - // The space is committed/uncommited in chunks of size _alignment. The + // The space is committed/uncommitted in chunks of size _alignment. The // ReservedSpace passed to initialize() must be aligned to this value. const size_t _alignment; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -136,7 +136,7 @@ // generation - the less space committed, the smaller the survivor // space, possibly as small as an alignment. However, we are interested // in the case where the young generation is 100% committed, as this - // is the point where eden reachs its maximum size. At this point, + // is the point where eden reaches its maximum size. At this point, // the size of a survivor space is max_survivor_size. max_eden_size = size - 2 * max_survivor_size; } @@ -288,7 +288,7 @@ // There used to be this guarantee there. // guarantee ((eden_size + 2*survivor_size) <= _max_gen_size, "incorrect input arguments"); // Code below forces this requirement. In addition the desired eden - // size and disired survivor sizes are desired goals and may + // size and desired survivor sizes are desired goals and may // exceed the total generation size. assert(min_gen_size() <= orig_size && orig_size <= max_size(), "just checking"); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -127,7 +127,7 @@ void adjust_pointers(); void compact(); - // Called during/after gc + // Called during/after GC void swap_spaces(); // Resize generation using suggested free space size and survivor size @@ -146,14 +146,14 @@ size_t free_in_words() const; // The max this generation can grow to - size_t max_size() const { return _reserved.byte_size(); } + size_t max_size() const { return _reserved.byte_size(); } // The max this generation can grow to if the boundary between // the generations are allowed to move. size_t gen_size_limit() const { return _max_gen_size; } bool is_maximal_no_gc() const { - return true; // never expands except at a GC + return true; // Never expands except at a GC } // Allocation diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp --- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -121,7 +121,7 @@ // Choose a number of GC threads based on the current size // of the heap. This may be complicated because the size of - // the heap depends on factors such as the thoughput goal. + // the heap depends on factors such as the throughput goal. // Still a large heap should be collected by more GC threads. active_workers_by_heap_size = MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread); @@ -445,7 +445,7 @@ // into account (i.e., don't trigger if the amount of free // space has suddenly jumped up). If the current is much // higher than the average, use the average since it represents - // the longer term behavor. + // the longer term behavior. const size_t live_in_eden = MIN2(eden_live, (size_t) avg_eden_live()->average()); const size_t free_in_eden = max_eden_size > live_in_eden ? diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp --- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -74,7 +74,7 @@ }; // Goal for the fraction of the total time during which application - // threads run. + // threads run const double _throughput_goal; // Last calculated sizes, in bytes, and aligned @@ -83,21 +83,21 @@ size_t _survivor_size; // calculated survivor size in bytes - // This is a hint for the heap: we've detected that gc times + // This is a hint for the heap: we've detected that GC times // are taking longer than GCTimeLimit allows. bool _gc_overhead_limit_exceeded; // Use for diagnostics only. If UseGCOverheadLimit is false, // this variable is still set. bool _print_gc_overhead_limit_would_be_exceeded; // Count of consecutive GC that have exceeded the - // GC time limit criterion. + // GC time limit criterion uint _gc_overhead_limit_count; // This flag signals that GCTimeLimit is being exceeded - // but may not have done so for the required number of consequetive - // collections. + // but may not have done so for the required number of consecutive + // collections // Minor collection timers used to determine both - // pause and interval times for collections. + // pause and interval times for collections static elapsedTimer _minor_timer; // Major collection timers, used to determine both @@ -120,7 +120,7 @@ // Statistics for survivor space calculation for young generation AdaptivePaddedAverage* _avg_survived; - // Objects that have been directly allocated in the old generation. + // Objects that have been directly allocated in the old generation AdaptivePaddedNoZeroDevAverage* _avg_pretenured; // Variable for estimating the major and minor pause times. @@ -142,33 +142,33 @@ // for making ergonomic decisions. double _latest_minor_mutator_interval_seconds; - // Allowed difference between major and minor gc times, used - // for computing tenuring_threshold. + // Allowed difference between major and minor GC times, used + // for computing tenuring_threshold const double _threshold_tolerance_percent; - const double _gc_pause_goal_sec; // goal for maximum gc pause + const double _gc_pause_goal_sec; // Goal for maximum GC pause // Flag indicating that the adaptive policy is ready to use bool _young_gen_policy_is_ready; - // decrease/increase the young generation for minor pause time + // Decrease/increase the young generation for minor pause time int _change_young_gen_for_min_pauses; - // decrease/increase the old generation for major pause time + // Decrease/increase the old generation for major pause time int _change_old_gen_for_maj_pauses; - // change old geneneration for throughput + // change old generation for throughput int _change_old_gen_for_throughput; // change young generation for throughput int _change_young_gen_for_throughput; // Flag indicating that the policy would - // increase the tenuring threshold because of the total major gc cost - // is greater than the total minor gc cost + // increase the tenuring threshold because of the total major GC cost + // is greater than the total minor GC cost bool _increment_tenuring_threshold_for_gc_cost; - // decrease the tenuring threshold because of the the total minor gc - // cost is greater than the total major gc cost + // decrease the tenuring threshold because of the the total minor GC + // cost is greater than the total major GC cost bool _decrement_tenuring_threshold_for_gc_cost; // decrease due to survivor size limit bool _decrement_tenuring_threshold_for_survivor_limit; @@ -182,7 +182,7 @@ // Changing the generation sizing depends on the data that is // gathered about the effects of changes on the pause times and // throughput. These variable count the number of data points - // gathered. The policy may use these counters as a threshhold + // gathered. The policy may use these counters as a threshold // for reliable data. julong _young_gen_change_for_minor_throughput; julong _old_gen_change_for_major_throughput; @@ -225,7 +225,7 @@ // larger than 1.0 if just the sum of the minor cost the // the major cost is used. Worse than that is the // fact that the minor cost and the major cost each - // tend toward 1.0 in the extreme of high gc costs. + // tend toward 1.0 in the extreme of high GC costs. // Limit the value of gc_cost to 1.0 so that the mutator // cost stays non-negative. virtual double gc_cost() const { @@ -238,23 +238,23 @@ virtual double time_since_major_gc() const; // Average interval between major collections to be used - // in calculating the decaying major gc cost. An overestimate + // in calculating the decaying major GC cost. An overestimate // of this time would be a conservative estimate because // this time is used to decide if the major GC cost // should be decayed (i.e., if the time since the last - // major gc is long compared to the time returned here, + // major GC is long compared to the time returned here, // then the major GC cost will be decayed). See the // implementations for the specifics. virtual double major_gc_interval_average_for_decay() const { return _avg_major_interval->average(); } - // Return the cost of the GC where the major gc cost + // Return the cost of the GC where the major GC cost // has been decayed based on the time since the last // major collection. double decaying_gc_cost() const; - // Decay the major gc cost. Use this only for decisions on + // Decay the major GC cost. Use this only for decisions on // whether to adjust, not to determine by how much to adjust. // This approximation is crude and may not be good enough for the // latter. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/shared/allocationStats.hpp --- a/src/share/vm/gc_implementation/shared/allocationStats.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/allocationStats.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -49,11 +49,11 @@ // estimates. AdaptivePaddedAverage _demand_rate_estimate; - ssize_t _desired; // Demand stimate computed as described above + ssize_t _desired; // Demand estimate computed as described above ssize_t _coal_desired; // desired +/- small-percent for tuning coalescing - ssize_t _surplus; // count - (desired +/- small-percent), - // used to tune splitting in best fit + ssize_t _surplus; // count - (desired +/- small-percent), + // used to tune splitting in best fit ssize_t _bfr_surp; // surplus at start of current sweep ssize_t _prev_sweep; // count from end of previous sweep ssize_t _before_sweep; // count from before current sweep diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/shared/concurrentGCThread.cpp --- a/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -54,7 +54,7 @@ void ConcurrentGCThread::create_and_start() { if (os::create_thread(this, os::cgc_thread)) { // XXX: need to set this to low priority - // unless "agressive mode" set; priority + // unless "aggressive mode" set; priority // should be just less than that of VMThread. os::set_priority(this, NearMaxPriority); if (!_should_terminate && !DisableStartThread) { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/shared/gcUtil.cpp --- a/src/share/vm/gc_implementation/shared/gcUtil.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/gcUtil.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -159,7 +159,7 @@ // that no calculation of the slope has yet been done. Returning true // for a slope equal to 0 reflects the intuitive expectation of the // dependence on the slope. Don't use the complement of these functions -// since that untuitive expectation is not built into the complement. +// since that intuitive expectation is not built into the complement. bool LinearLeastSquareFit::decrement_will_decrease() { return (_slope >= 0.00); } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/shared/gcUtil.hpp --- a/src/share/vm/gc_implementation/shared/gcUtil.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/gcUtil.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -210,7 +210,7 @@ double y(double x); double slope() { return _slope; } // Methods to decide if a change in the dependent variable will - // achive a desired goal. Note that these methods are not + // achieve a desired goal. Note that these methods are not // complementary and both are needed. bool decrement_will_decrease(); bool increment_will_decrease(); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp --- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -72,7 +72,7 @@ #endif // NOT_PRODUCT // There may be unallocated holes in the middle chunks -// that should be filled with dead objects to ensure parseability. +// that should be filled with dead objects to ensure parsability. void MutableNUMASpace::ensure_parsability() { for (int i = 0; i < lgrp_spaces()->length(); i++) { LGRPSpace *ls = lgrp_spaces()->at(i); @@ -173,6 +173,26 @@ return lgrp_spaces()->at(i)->space()->capacity_in_bytes(); } +size_t MutableNUMASpace::tlab_used(Thread *thr) const { + // Please see the comments for tlab_capacity(). + guarantee(thr != NULL, "No thread"); + int lgrp_id = thr->lgrp_id(); + if (lgrp_id == -1) { + if (lgrp_spaces()->length() > 0) { + return (used_in_bytes()) / lgrp_spaces()->length(); + } else { + assert(false, "There should be at least one locality group"); + return 0; + } + } + int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); + if (i == -1) { + return 0; + } + return lgrp_spaces()->at(i)->space()->used_in_bytes(); +} + + size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { // Please see the comments for tlab_capacity(). guarantee(thr != NULL, "No thread"); @@ -880,8 +900,8 @@ } void MutableNUMASpace::verify() { - // This can be called after setting an arbitary value to the space's top, - // so an object can cross the chunk boundary. We ensure the parsablity + // This can be called after setting an arbitrary value to the space's top, + // so an object can cross the chunk boundary. We ensure the parsability // of the space and just walk the objects in linear fashion. ensure_parsability(); MutableSpace::verify(); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp --- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -217,6 +217,7 @@ using MutableSpace::capacity_in_words; virtual size_t capacity_in_words(Thread* thr) const; virtual size_t tlab_capacity(Thread* thr) const; + virtual size_t tlab_used(Thread* thr) const; virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; // Allocation (return NULL if full) diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/shared/mutableSpace.hpp --- a/src/share/vm/gc_implementation/shared/mutableSpace.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/mutableSpace.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -31,7 +31,7 @@ // A MutableSpace is a subtype of ImmutableSpace that supports the // concept of allocation. This includes the concepts that a space may -// be only partially full, and the querry methods that go with such +// be only partially full, and the query methods that go with such // an assumption. MutableSpace is also responsible for minimizing the // page allocation time by having the memory pretouched (with // AlwaysPretouch) and for optimizing page placement on NUMA systems @@ -111,7 +111,7 @@ virtual void mangle_region(MemRegion mr) PRODUCT_RETURN; - // Boolean querries. + // Boolean queries. bool is_empty() const { return used_in_words() == 0; } bool not_empty() const { return used_in_words() > 0; } bool contains(const void* p) const { return _bottom <= p && p < _end; } @@ -124,6 +124,7 @@ virtual size_t used_in_words() const { return pointer_delta(top(), bottom()); } virtual size_t free_in_words() const { return pointer_delta(end(), top()); } virtual size_t tlab_capacity(Thread* thr) const { return capacity_in_bytes(); } + virtual size_t tlab_used(Thread* thr) const { return used_in_bytes(); } virtual size_t unsafe_max_tlab_alloc(Thread* thr) const { return free_in_bytes(); } // Allocation (return NULL if full) diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp --- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -89,6 +89,10 @@ // scavenge; it clears the sensor accumulators. void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) { assert(ResizePLAB, "Not set"); + + assert(is_object_aligned(max_size()) && min_size() <= max_size(), + "PLAB clipping computation may be incorrect"); + if (_allocated == 0) { assert(_unused == 0, err_msg("Inconsistency in PLAB stats: " @@ -152,7 +156,7 @@ // The buffer comes with its own BOT, with a shared (obviously) underlying // BlockOffsetSharedArray. We manipulate this BOT in the normal way -// as we would for any contiguous space. However, on accasion we +// as we would for any contiguous space. However, on occasion we // need to do some buffer surgery at the extremities before we // start using the body of the buffer for allocations. Such surgery // (as explained elsewhere) is to prevent allocation on a card that diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp --- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -92,7 +92,7 @@ } // The total (word) size of the buffer, including both allocated and - // unallocted space. + // unallocated space. size_t word_sz() { return _word_sz; } // Should only be done if we are about to reset with a new buffer of the @@ -181,16 +181,7 @@ _used(0), _desired_plab_sz(desired_plab_sz_), _filter(wt) - { - size_t min_sz = min_size(); - size_t max_sz = max_size(); - size_t aligned_min_sz = align_object_size(min_sz); - size_t aligned_max_sz = align_object_size(max_sz); - assert(min_sz <= aligned_min_sz && max_sz >= aligned_max_sz && - min_sz <= max_sz, - "PLAB clipping computation in adjust_desired_plab_sz()" - " may be incorrect"); - } + { } static const size_t min_size() { return ParGCAllocBuffer::min_size(); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/shared/spaceDecorator.hpp --- a/src/share/vm/gc_implementation/shared/spaceDecorator.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/spaceDecorator.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -75,7 +75,7 @@ // High water mark for allocations. Typically, the space above // this point have been mangle previously and don't need to be - // touched again. Space belows this point has been allocated + // touched again. Space below this point has been allocated // and remangling is needed between the current top and this // high water mark. HeapWord* _top_for_allocations; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_implementation/shared/vmGCOperations.cpp --- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -56,6 +56,7 @@ #else /* USDT2 */ HOTSPOT_GC_BEGIN( full); + HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); #endif /* USDT2 */ } @@ -64,8 +65,8 @@ HS_DTRACE_PROBE(hotspot, gc__end); HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); #else /* USDT2 */ - HOTSPOT_GC_END( -); + HOTSPOT_GC_END(); + HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); #endif /* USDT2 */ } @@ -82,7 +83,7 @@ // Allocations may fail in several threads at about the same time, // resulting in multiple gc requests. We only want to do one of them. -// In case a GC locker is active and the need for a GC is already signalled, +// In case a GC locker is active and the need for a GC is already signaled, // we want to skip this GC attempt altogether, without doing a futile // safepoint operation. bool VM_GC_Operation::skip_operation() const { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_interface/collectedHeap.cpp --- a/src/share/vm/gc_interface/collectedHeap.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_interface/collectedHeap.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -320,6 +320,21 @@ assert(thread->deferred_card_mark().is_empty(), "invariant"); } +size_t CollectedHeap::max_tlab_size() const { + // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. + // This restriction could be removed by enabling filling with multiple arrays. + // If we compute that the reasonable way as + // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) + // we'll overflow on the multiply, so we do the divide first. + // We actually lose a little by dividing first, + // but that just makes the TLAB somewhat smaller than the biggest array, + // which is fine, since we'll be able to fill that. + size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + + sizeof(jint) * + ((juint) max_jint / (size_t) HeapWordSize); + return align_size_down(max_int_size, MinObjAlignment); +} + // Helper for ReduceInitialCardMarks. For performance, // compiled code may elide card-marks for initializing stores // to a newly allocated object along the fast-path. We diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_interface/collectedHeap.hpp --- a/src/share/vm/gc_interface/collectedHeap.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -394,14 +394,16 @@ // the following methods: // Returns "true" iff the heap supports thread-local allocation buffers. // The default is "no". - virtual bool supports_tlab_allocation() const { - return false; - } + virtual bool supports_tlab_allocation() const = 0; + // The amount of space available for thread-local allocation buffers. - virtual size_t tlab_capacity(Thread *thr) const { - guarantee(false, "thread-local allocation buffers not supported"); - return 0; - } + virtual size_t tlab_capacity(Thread *thr) const = 0; + + // The amount of used space for thread-local allocation buffers for the given thread. + virtual size_t tlab_used(Thread *thr) const = 0; + + virtual size_t max_tlab_size() const; + // An estimate of the maximum allocation that could be performed // for thread-local allocation buffers without triggering any // collection or expansion activity. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/gc_interface/gcCause.hpp --- a/src/share/vm/gc_interface/gcCause.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/gc_interface/gcCause.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -31,7 +31,7 @@ // This class exposes implementation details of the various // collector(s), and we need to be very careful with it. If // use of this class grows, we should split it into public -// and implemenation-private "causes". +// and implementation-private "causes". // class GCCause : public AllStatic { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/interpreter/linkResolver.cpp --- a/src/share/vm/interpreter/linkResolver.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/interpreter/linkResolver.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -649,16 +649,6 @@ } } - if (nostatics && resolved_method->is_static()) { - ResourceMark rm(THREAD); - char buf[200]; - jio_snprintf(buf, sizeof(buf), "Expected instance not static method %s", Method::name_and_sig_as_C_string(resolved_klass(), - resolved_method->name(), - resolved_method->signature())); - THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); - } - - if (check_access) { // JDK8 adds non-public interface methods, and accessability check requirement assert(current_klass.not_null() , "current_klass should not be null"); @@ -702,6 +692,15 @@ } } + if (nostatics && resolved_method->is_static()) { + ResourceMark rm(THREAD); + char buf[200]; + jio_snprintf(buf, sizeof(buf), "Expected instance not static method %s", + Method::name_and_sig_as_C_string(resolved_klass(), + resolved_method->name(), resolved_method->signature())); + THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); + } + if (TraceItables && Verbose) { ResourceMark rm(THREAD); tty->print("invokeinterface resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ", diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/allocation.hpp --- a/src/share/vm/memory/allocation.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/allocation.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -576,8 +576,8 @@ bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; } bool allocated_on_C_heap() const { return get_allocation_type() == C_HEAP; } bool allocated_on_arena() const { return get_allocation_type() == ARENA; } - ResourceObj(); // default construtor - ResourceObj(const ResourceObj& r); // default copy construtor + ResourceObj(); // default constructor + ResourceObj(const ResourceObj& r); // default copy constructor ResourceObj& operator=(const ResourceObj& r); // default copy assignment ~ResourceObj(); #endif // ASSERT diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/barrierSet.hpp --- a/src/share/vm/memory/barrierSet.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/barrierSet.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -124,7 +124,7 @@ virtual bool has_read_region_opt() = 0; virtual bool has_write_region_opt() = 0; - // These operations should assert false unless the correponding operation + // These operations should assert false unless the corresponding operation // above returns true. Otherwise, they should perform an appropriate // barrier for an array whose elements are all in the given memory region. virtual void read_ref_array(MemRegion mr) = 0; @@ -165,7 +165,7 @@ // normally reserve space for such tables, and commit parts of the table // "covering" parts of the heap that are committed. The constructor is // passed the maximum number of independently committable subregions to - // be covered, and the "resize_covoered_region" function allows the + // be covered, and the "resize_covered_region" function allows the // sub-parts of the heap to inform the barrier set of changes of their // sizes. BarrierSet(int max_covered_regions) : diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/binaryTreeDictionary.cpp --- a/src/share/vm/memory/binaryTreeDictionary.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/binaryTreeDictionary.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -56,7 +56,7 @@ template class FreeList_t> void TreeChunk::verify_tree_chunk_list() const { TreeChunk* nextTC = (TreeChunk*)next(); - if (prev() != NULL) { // interior list node shouldn'r have tree fields + if (prev() != NULL) { // interior list node shouldn't have tree fields guarantee(embedded_list()->parent() == NULL && embedded_list()->left() == NULL && embedded_list()->right() == NULL, "should be clear"); } @@ -247,7 +247,7 @@ prevFC->link_after(nextTC); } - // Below this point the embeded TreeList being used for the + // Below this point the embedded TreeList being used for the // tree node may have changed. Don't use "this" // TreeList*. // chunk should still be a free chunk (bit set in _prev) @@ -703,7 +703,7 @@ // The only use of this method would not pass the root of the // tree (as indicated by the assertion above that the tree list // has a parent) but the specification does not explicitly exclude the - // passing of the root so accomodate it. + // passing of the root so accommodate it. set_root(NULL); } debug_only( diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/binaryTreeDictionary.hpp --- a/src/share/vm/memory/binaryTreeDictionary.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/binaryTreeDictionary.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -322,7 +322,7 @@ void set_tree_hints(void); // Reset statistics for all the lists in the tree. void clear_tree_census(void); - // Print the statistcis for all the lists in the tree. Also may + // Print the statistics for all the lists in the tree. Also may // print out summaries. void print_dict_census(void) const; void print_free_lists(outputStream* st) const; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/blockOffsetTable.cpp --- a/src/share/vm/memory/blockOffsetTable.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/blockOffsetTable.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -590,7 +590,7 @@ // Otherwise, find the block start using the table, but taking // care (cf block_start_unsafe() above) not to parse any objects/blocks - // on the cards themsleves. + // on the cards themselves. size_t index = _array->index_for(addr); assert(_array->address_for_index(index) == addr, "arg should be start of card"); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/blockOffsetTable.hpp --- a/src/share/vm/memory/blockOffsetTable.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/blockOffsetTable.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -424,7 +424,7 @@ BlockOffsetArray(array, mr, false), _unallocated_block(_bottom) { } - // accessor + // Accessor HeapWord* unallocated_block() const { assert(BlockOffsetArrayUseUnallocatedBlock, "_unallocated_block is not being maintained"); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/cardTableModRefBS.cpp --- a/src/share/vm/memory/cardTableModRefBS.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/cardTableModRefBS.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -98,7 +98,7 @@ "card marking array"); } - // The assember store_check code will do an unsigned shift of the oop, + // The assembler store_check code will do an unsigned shift of the oop, // then add it to byte_map_base, i.e. // // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) @@ -243,7 +243,7 @@ if (new_region.word_size() != old_region.word_size()) { // Commit new or uncommit old pages, if necessary. MemRegion cur_committed = _committed[ind]; - // Extend the end of this _commited region + // Extend the end of this _committed region // to cover the end of any lower _committed regions. // This forms overlapping regions, but never interior regions. HeapWord* const max_prev_end = largest_prev_committed_end(ind); @@ -448,7 +448,7 @@ // off parallelism is used, then active_workers can be used in // place of n_par_threads. // This is an example of a path where n_par_threads is - // set to 0 to turn off parallism. + // set to 0 to turn off parallelism. // [7] CardTableModRefBS::non_clean_card_iterate() // [8] CardTableRS::younger_refs_in_space_iterate() // [9] Generation::younger_refs_in_space_iterate() diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/cardTableRS.cpp --- a/src/share/vm/memory/cardTableRS.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/cardTableRS.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -590,7 +590,7 @@ // Then, the case analysis above reveals that, in the worst case, // any such stale card will be scanned unnecessarily at most twice. // - // It is nonethelss advisable to try and get rid of some of this + // It is nonetheless advisable to try and get rid of some of this // redundant work in a subsequent (low priority) re-design of // the card-scanning code, if only to simplify the underlying // state machine analysis/proof. ysr 1/28/2002. XXX diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/cardTableRS.hpp --- a/src/share/vm/memory/cardTableRS.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/cardTableRS.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -105,8 +105,6 @@ ~CardTableRS(); // *** GenRemSet functions. - GenRemSet::Name rs_kind() { return GenRemSet::CardTable; } - CardTableRS* as_CardTableRS() { return this; } CardTableModRefBS* ct_bs() { return _ct_bs; } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/collectorPolicy.cpp --- a/src/share/vm/memory/collectorPolicy.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/collectorPolicy.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -45,7 +45,7 @@ #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp" #endif // INCLUDE_ALL_GCS -// CollectorPolicy methods. +// CollectorPolicy methods CollectorPolicy::CollectorPolicy() : _space_alignment(0), @@ -178,17 +178,14 @@ // byte entry and the os page size is 4096, the maximum heap size should // be 512*4096 = 2MB aligned. - // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable - // is supported. - // Requirements of any new remembered set implementations must be added here. - size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable); + size_t alignment = GenRemSet::max_alignment_constraint(); // Parallel GC does its own alignment of the generations to avoid requiring a // large page (256M on some platforms) for the permanent generation. The // other collectors should also be updated to do their own alignment and then // this use of lcm() should be removed. if (UseLargePages && !UseParallelGC) { - // in presence of large pages we have to make sure that our + // In presence of large pages we have to make sure that our // alignment is large page aware alignment = lcm(os::large_page_size(), alignment); } @@ -196,7 +193,7 @@ return alignment; } -// GenCollectorPolicy methods. +// GenCollectorPolicy methods GenCollectorPolicy::GenCollectorPolicy() : _min_gen0_size(0), @@ -378,10 +375,10 @@ _initial_heap_byte_size = InitialHeapSize; } - // adjust max heap size if necessary + // Adjust NewSize and OldSize or MaxHeapSize to match each other if (NewSize + OldSize > MaxHeapSize) { if (_max_heap_size_cmdline) { - // somebody set a maximum heap size with the intention that we should not + // Somebody has set a maximum heap size with the intention that we should not // exceed it. Adjust New/OldSize as necessary. uintx calculated_size = NewSize + OldSize; double shrink_factor = (double) MaxHeapSize / calculated_size; @@ -442,9 +439,8 @@ // minimum gen0 sizes. if (_max_heap_byte_size == _min_heap_byte_size) { - // The maximum and minimum heap sizes are the same so - // the generations minimum and initial must be the - // same as its maximum. + // The maximum and minimum heap sizes are the same so the generations + // minimum and initial must be the same as its maximum. _min_gen0_size = max_new_size; _initial_gen0_size = max_new_size; _max_gen0_size = max_new_size; @@ -466,8 +462,7 @@ // For the case where NewSize is the default, use NewRatio // to size the minimum and initial generation sizes. // Use the default NewSize as the floor for these values. If - // NewRatio is overly large, the resulting sizes can be too - // small. + // NewRatio is overly large, the resulting sizes can be too small. _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize); desired_new_size = MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize); @@ -486,8 +481,7 @@ _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size); // At this point all three sizes have been checked against the - // maximum sizes but have not been checked for consistency - // among the three. + // maximum sizes but have not been checked for consistency among the three. // Final check min <= initial <= max _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size); @@ -495,7 +489,7 @@ _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size); } - // Write back to flags if necessary + // Write back to flags if necessary. if (NewSize != _initial_gen0_size) { FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size); } @@ -541,7 +535,7 @@ } // Minimum sizes of the generations may be different than -// the initial sizes. An inconsistently is permitted here +// the initial sizes. An inconsistency is permitted here // in the total size that can be specified explicitly by // command line specification of OldSize and NewSize and // also a command line specification of -Xms. Issue a warning @@ -553,12 +547,12 @@ // At this point the minimum, initial and maximum sizes // of the overall heap and of gen0 have been determined. // The maximum gen1 size can be determined from the maximum gen0 - // and maximum heap size since no explicit flags exits + // and maximum heap size since no explicit flags exist // for setting the gen1 maximum. _max_gen1_size = MAX2(_max_heap_byte_size - _max_gen0_size, _gen_alignment); // If no explicit command line flag has been set for the - // gen1 size, use what is left for gen1. + // gen1 size, use what is left for gen1 if (!FLAG_IS_CMDLINE(OldSize)) { // The user has not specified any value but the ergonomics // may have chosen a value (which may or may not be consistent @@ -570,14 +564,14 @@ // _max_gen1_size has already been made consistent above FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size); } else { - // It's been explicitly set on the command line. Use the + // OldSize has been explicitly set on the command line. Use the // OldSize and then determine the consequences. _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size); _initial_gen1_size = OldSize; // If the user has explicitly set an OldSize that is inconsistent // with other command line flags, issue a warning. - // The generation minimums and the overall heap mimimum should + // The generation minimums and the overall heap minimum should // be within one generation alignment. if ((_min_gen1_size + _min_gen0_size + _gen_alignment) < _min_heap_byte_size) { warning("Inconsistency between minimum heap size and minimum " @@ -599,7 +593,7 @@ _min_gen0_size, _initial_gen0_size, _max_gen0_size); } } - // Initial size + // The same as above for the old gen initial size. if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, _initial_heap_byte_size)) { if (PrintGCDetails && Verbose) { @@ -609,10 +603,10 @@ } } } - // Enforce the maximum gen1 size. + _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size); - // Check that min gen1 <= initial gen1 <= max gen1 + // Make sure that min gen1 <= initial gen1 <= max gen1. _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size); _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size); @@ -653,10 +647,9 @@ HeapWord* result = NULL; - // Loop until the allocation is satisified, - // or unsatisfied after GC. + // Loop until the allocation is satisfied, or unsatisfied after GC. for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { - HandleMark hm; // discard any handles allocated in each iteration + HandleMark hm; // Discard any handles allocated in each iteration. // First allocation attempt is lock-free. Generation *gen0 = gch->get_gen(0); @@ -669,7 +662,7 @@ return result; } } - unsigned int gc_count_before; // read inside the Heap_lock locked region + unsigned int gc_count_before; // Read inside the Heap_lock locked region. { MutexLocker ml(Heap_lock); if (PrintGC && Verbose) { @@ -688,19 +681,19 @@ if (GC_locker::is_active_and_needs_gc()) { if (is_tlab) { - return NULL; // Caller will retry allocating individual object + return NULL; // Caller will retry allocating individual object. } if (!gch->is_maximal_no_gc()) { - // Try and expand heap to satisfy request + // Try and expand heap to satisfy request. result = expand_heap_and_allocate(size, is_tlab); - // result could be null if we are out of space + // Result could be null if we are out of space. if (result != NULL) { return result; } } if (gclocker_stalled_count > GCLockerRetryAllocationCount) { - return NULL; // we didn't get to do a GC and we didn't get any memory + return NULL; // We didn't get to do a GC and we didn't get any memory. } // If this thread is not in a jni critical section, we stall @@ -735,7 +728,7 @@ result = op.result(); if (op.gc_locked()) { assert(result == NULL, "must be NULL if gc_locked() is true"); - continue; // retry and/or stall as necessary + continue; // Retry and/or stall as necessary. } // Allocation has failed and a collection @@ -796,7 +789,7 @@ if (!gch->is_maximal_no_gc()) { result = expand_heap_and_allocate(size, is_tlab); } - return result; // could be null if we are out of space + return result; // Could be null if we are out of space. } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { // Do an incremental collection. gch->do_collection(false /* full */, @@ -918,10 +911,8 @@ GCCause::_metadata_GC_threshold); VMThread::execute(&op); - // If GC was locked out, try again. Check - // before checking success because the prologue - // could have succeeded and the GC still have - // been locked out. + // If GC was locked out, try again. Check before checking success because the + // prologue could have succeeded and the GC still have been locked out. if (op.gc_locked()) { continue; } @@ -982,7 +973,7 @@ } void MarkSweepPolicy::initialize_gc_policy_counters() { - // initialize the policy counters - 2 collectors, 3 generations + // Initialize the policy counters - 2 collectors, 3 generations. if (UseParNewGC) { _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3); } else { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/collectorPolicy.hpp --- a/src/share/vm/memory/collectorPolicy.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/collectorPolicy.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -76,10 +76,10 @@ size_t _heap_alignment; // Needed to keep information if MaxHeapSize was set on the command line - // when the flag value is aligned etc by ergonomics + // when the flag value is aligned etc by ergonomics. bool _max_heap_size_cmdline; - // The sizing of the heap are controlled by a sizing policy. + // The sizing of the heap is controlled by a sizing policy. AdaptiveSizePolicy* _size_policy; // Set to true when policy wants soft refs cleared. @@ -102,7 +102,7 @@ initialize_size_info(); } - // Return maximum heap alignment that may be imposed by the policy + // Return maximum heap alignment that may be imposed by the policy. static size_t compute_heap_alignment(); size_t space_alignment() { return _space_alignment; } @@ -180,7 +180,7 @@ size_t size, Metaspace::MetadataType mdtype); - // Performace Counter support + // Performance Counter support GCPolicyCounters* counters() { return _gc_policy_counters; } // Create the jstat counters for the GC policy. By default, policy's @@ -231,9 +231,8 @@ GenerationSpec **_generations; - // Return true if an allocation should be attempted in the older - // generation if it fails in the younger generation. Return - // false, otherwise. + // Return true if an allocation should be attempted in the older generation + // if it fails in the younger generation. Return false, otherwise. virtual bool should_try_older_generation_allocation(size_t word_size) const; void initialize_flags(); @@ -245,7 +244,7 @@ // Try to allocate space by expanding the heap. virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab); - // Compute max heap alignment + // Compute max heap alignment. size_t compute_max_alignment(); // Scale the base_size by NewRatio according to @@ -253,7 +252,7 @@ // and align by min_alignment() size_t scale_by_NewRatio_aligned(size_t base_size); - // Bound the value by the given maximum minus the min_alignment + // Bound the value by the given maximum minus the min_alignment. size_t bound_minus_alignment(size_t desired_size, size_t maximum_size); public: diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/defNewGeneration.cpp --- a/src/share/vm/memory/defNewGeneration.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/defNewGeneration.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -61,7 +61,6 @@ DefNewGeneration::KeepAliveClosure:: KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); - assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind."); _rs = (CardTableRS*)rs; } @@ -619,13 +618,12 @@ assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); - int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; + int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache; gch->gen_process_strong_roots(_level, true, // Process younger gens, if any, // as strong roots. true, // activate StrongRootsScope - true, // is scavenging SharedHeap::ScanningOption(so), &fsc_with_no_gc_barrier, true, // walk *all* scavengable nmethods @@ -1086,6 +1084,10 @@ return eden()->capacity(); } +size_t DefNewGeneration::tlab_used() const { + return eden()->used(); +} + size_t DefNewGeneration::unsafe_max_tlab_alloc() const { return unsafe_max_alloc_nogc(); } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/defNewGeneration.hpp --- a/src/share/vm/memory/defNewGeneration.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/defNewGeneration.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -239,6 +239,7 @@ // Thread-local allocation buffers bool supports_tlab_allocation() const { return true; } size_t tlab_capacity() const; + size_t tlab_used() const; size_t unsafe_max_tlab_alloc() const; // Grow the generation by the specified number of bytes. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/genCollectedHeap.cpp --- a/src/share/vm/memory/genCollectedHeap.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/genCollectedHeap.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -126,7 +126,7 @@ (HeapWord*)(heap_rs.base() + heap_rs.size())); // It is important to do this in a way such that concurrent readers can't - // temporarily think somethings in the heap. (Seen this happen in asserts.) + // temporarily think something is in the heap. (Seen this happen in asserts.) _reserved.set_word_size(0); _reserved.set_start((HeapWord*)heap_rs.base()); size_t actual_heap_size = heap_rs.size(); @@ -592,7 +592,6 @@ gen_process_strong_roots(int level, bool younger_gens_as_roots, bool activate_scope, - bool is_scavenging, SharedHeap::ScanningOption so, OopsInGenClosure* not_older_gens, bool do_code_roots, @@ -601,12 +600,12 @@ // General strong roots. if (!do_code_roots) { - SharedHeap::process_strong_roots(activate_scope, is_scavenging, so, + SharedHeap::process_strong_roots(activate_scope, so, not_older_gens, NULL, klass_closure); } else { bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active()); CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking); - SharedHeap::process_strong_roots(activate_scope, is_scavenging, so, + SharedHeap::process_strong_roots(activate_scope, so, not_older_gens, &code_roots, klass_closure); } @@ -933,6 +932,16 @@ return result; } +size_t GenCollectedHeap::tlab_used(Thread* thr) const { + size_t result = 0; + for (int i = 0; i < _n_gens; i += 1) { + if (_gens[i]->supports_tlab_allocation()) { + result += _gens[i]->tlab_used(); + } + } + return result; +} + size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { size_t result = 0; for (int i = 0; i < _n_gens; i += 1) { @@ -1263,7 +1272,7 @@ }; jlong GenCollectedHeap::millis_since_last_gc() { - // We need a monotonically non-deccreasing time in ms but + // We need a monotonically non-decreasing time in ms but // os::javaTimeMillis() does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; GenTimeOfLastGCClosure tolgc_cl(now); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/genCollectedHeap.hpp --- a/src/share/vm/memory/genCollectedHeap.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/genCollectedHeap.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -248,6 +248,7 @@ // Section on TLAB's. virtual bool supports_tlab_allocation() const; virtual size_t tlab_capacity(Thread* thr) const; + virtual size_t tlab_used(Thread* thr) const; virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; virtual HeapWord* allocate_new_tlab(size_t size); @@ -315,7 +316,7 @@ } // Update the gc statistics for each generation. - // "level" is the level of the lastest collection + // "level" is the level of the latest collection. void update_gc_stats(int current_level, bool full) { for (int i = 0; i < _n_gens; i++) { _gens[i]->update_gc_stats(current_level, full); @@ -411,7 +412,6 @@ // The remaining arguments are in an order // consistent with SharedHeap::process_strong_roots: bool activate_scope, - bool is_scavenging, SharedHeap::ScanningOption so, OopsInGenClosure* not_older_gens, bool do_code_roots, diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/genMarkSweep.cpp --- a/src/share/vm/memory/genMarkSweep.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/genMarkSweep.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -148,8 +148,8 @@ Universe::update_heap_info_at_gc(); // Update time of last gc for all generations we collected - // (which curently is all the generations in the heap). - // We need to use a monotonically non-deccreasing time in ms + // (which currently is all the generations in the heap). + // We need to use a monotonically non-decreasing time in ms // or we will see time-warp warnings and os::javaTimeMillis() // does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; @@ -210,7 +210,6 @@ gch->gen_process_strong_roots(level, false, // Younger gens are not roots. true, // activate StrongRootsScope - false, // not scavenging SharedHeap::SO_SystemClasses, &follow_root_closure, true, // walk code active on stacks @@ -296,7 +295,6 @@ gch->gen_process_strong_roots(level, false, // Younger gens are not roots. true, // activate StrongRootsScope - false, // not scavenging SharedHeap::SO_AllClasses, &adjust_pointer_closure, false, // do not walk code diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/genOopClosures.inline.hpp --- a/src/share/vm/memory/genOopClosures.inline.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/genOopClosures.inline.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -45,7 +45,6 @@ // Barrier set for the heap, must be set after heap is initialized if (_rs == NULL) { GenRemSet* rs = SharedHeap::heap()->rem_set(); - assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind"); _rs = (CardTableRS*)rs; } } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/genRemSet.cpp --- a/src/share/vm/memory/genRemSet.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/genRemSet.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -31,8 +31,7 @@ // enumerate ref fields that have been modified (since the last // enumeration.) -uintx GenRemSet::max_alignment_constraint(Name nm) { - assert(nm == GenRemSet::CardTable, "Unrecognized GenRemSet type."); +uintx GenRemSet::max_alignment_constraint() { return CardTableRS::ct_max_alignment_constraint(); } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/genRemSet.hpp --- a/src/share/vm/memory/genRemSet.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/genRemSet.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -27,7 +27,7 @@ #include "oops/oop.hpp" -// A GenRemSet provides ways of iterating over pointers accross generations. +// A GenRemSet provides ways of iterating over pointers across generations. // (This is especially useful for older-to-younger.) class Generation; @@ -53,19 +53,12 @@ KlassRemSet _klass_rem_set; public: - enum Name { - CardTable, - Other - }; - GenRemSet(BarrierSet * bs) : _bs(bs) {} GenRemSet() : _bs(NULL) {} - virtual Name rs_kind() = 0; - // These are for dynamic downcasts. Unfortunately that it names the // possible subtypes (but not that they are subtypes!) Return NULL if - // the cast is invalide. + // the cast is invalid. virtual CardTableRS* as_CardTableRS() { return NULL; } // Return the barrier set associated with "this." @@ -106,10 +99,9 @@ // within the heap, this function tells whether they are met. virtual bool is_aligned(HeapWord* addr) = 0; - // If the RS (or BS) imposes an aligment constraint on maximum heap size. - // (This must be static, and dispatch on "nm", because it is called - // before an RS is created.) - static uintx max_alignment_constraint(Name nm); + // Returns any alignment constraint that the remembered set imposes upon the + // heap. + static uintx max_alignment_constraint(); virtual void verify() = 0; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/generation.hpp --- a/src/share/vm/memory/generation.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/generation.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -289,7 +289,7 @@ // These functions return the addresses of the fields that define the // boundaries of the contiguous allocation area. (These fields should be - // physicall near to one another.) + // physically near to one another.) virtual HeapWord** top_addr() const { return NULL; } virtual HeapWord** end_addr() const { return NULL; } @@ -299,6 +299,10 @@ guarantee(false, "Generation doesn't support thread local allocation buffers"); return 0; } + virtual size_t tlab_used() const { + guarantee(false, "Generation doesn't support thread local allocation buffers"); + return 0; + } virtual size_t unsafe_max_tlab_alloc() const { guarantee(false, "Generation doesn't support thread local allocation buffers"); return 0; @@ -485,7 +489,7 @@ // General signature... virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0; // ...and specializations for de-virtualization. (The general - // implemention of the _nv versions call the virtual version. + // implementation of the _nv versions call the virtual version. // Note that the _nv suffix is not really semantically necessary, // but it avoids some not-so-useful warnings on Solaris.) #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/heap.cpp --- a/src/share/vm/memory/heap.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/heap.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -183,7 +183,7 @@ size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock)); assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList"); - // First check if we can satify request from freelist + // First check if we can satisfy request from freelist debug_only(verify()); HeapBlock* block = search_freelist(number_of_segments, is_critical); debug_only(if (VerifyCodeCacheOften) verify()); @@ -372,7 +372,7 @@ } // Scan for right place to put into list. List - // is sorted by increasing addresseses + // is sorted by increasing addresses FreeBlock* prev = NULL; FreeBlock* cur = _freelist; while(cur != NULL && cur < b) { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/heap.hpp --- a/src/share/vm/memory/heap.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/heap.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -127,8 +127,8 @@ // Heap extents bool reserve(size_t reserved_size, size_t committed_size, size_t segment_size); void release(); // releases all allocated memory - bool expand_by(size_t size); // expands commited memory by size - void shrink_by(size_t size); // shrinks commited memory by size + bool expand_by(size_t size); // expands committed memory by size + void shrink_by(size_t size); // shrinks committed memory by size void clear(); // clears all heap contents // Memory allocation diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/heapInspection.hpp --- a/src/share/vm/memory/heapInspection.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/heapInspection.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -347,7 +347,7 @@ #endif // INCLUDE_SERVICES -// These declarations are needed since teh declaration of KlassInfoTable and +// These declarations are needed since the declaration of KlassInfoTable and // KlassInfoClosure are guarded by #if INLCUDE_SERVICES class KlassInfoTable; class KlassInfoClosure; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/metaspace.cpp --- a/src/share/vm/memory/metaspace.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/metaspace.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -1455,9 +1455,10 @@ // No expansion, now see if we want to shrink // We would never want to shrink more than this + assert(capacity_until_GC >= minimum_desired_capacity, + err_msg(SIZE_FORMAT " >= " SIZE_FORMAT, + capacity_until_GC, minimum_desired_capacity)); size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; - assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT, - max_shrink_bytes)); // Should shrinking be considered? if (MaxMetaspaceFreeRatio < 100) { @@ -2398,7 +2399,7 @@ void SpaceManager::verify() { // If there are blocks in the dictionary, then - // verfication of chunks does not work since + // verification of chunks does not work since // being in the dictionary alters a chunk. if (block_freelists()->total_size() == 0) { for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { @@ -2867,7 +2868,7 @@ uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; // If compressed class space fits in lower 32G, we don't need a base. if (higher_address <= (address)klass_encoding_max) { - lower_base = 0; // effectively lower base is zero. + lower_base = 0; // Effectively lower base is zero. } } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/metaspaceShared.cpp --- a/src/share/vm/memory/metaspaceShared.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/metaspaceShared.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -487,7 +487,7 @@ NOT_PRODUCT(SystemDictionary::verify();) // Copy the the symbol table, and the system dictionary to the shared - // space in usable form. Copy the hastable + // space in usable form. Copy the hashtable // buckets first [read-write], then copy the linked lists of entries // [read-only]. @@ -953,7 +953,7 @@ // The following data in the shared misc data region are the linked // list elements (HashtableEntry objects) for the symbol table, string - // table, and shared dictionary. The heap objects refered to by the + // table, and shared dictionary. The heap objects referred to by the // symbol table, string table, and shared dictionary are permanent and // unmovable. Since new entries added to the string and symbol tables // are always added at the beginning of the linked lists, THESE LINKED diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/modRefBarrierSet.hpp --- a/src/share/vm/memory/modRefBarrierSet.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/modRefBarrierSet.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -72,7 +72,7 @@ bool has_read_region_opt() { return false; } - // These operations should assert false unless the correponding operation + // These operations should assert false unless the corresponding operation // above returns true. void read_ref_array(MemRegion mr) { assert(false, "can't call"); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/referenceProcessor.cpp --- a/src/share/vm/memory/referenceProcessor.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/referenceProcessor.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -45,7 +45,7 @@ } void ReferenceProcessor::init_statics() { - // We need a monotonically non-deccreasing time in ms but + // We need a monotonically non-decreasing time in ms but // os::javaTimeMillis() does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; @@ -100,7 +100,6 @@ _enqueuing_is_done(false), _is_alive_non_header(is_alive_non_header), _discovered_list_needs_barrier(discovered_list_needs_barrier), - _bs(NULL), _processing_is_mt(mt_processing), _next_id(0) { @@ -126,10 +125,6 @@ _discovered_refs[i].set_length(0); } - // If we do barriers, cache a copy of the barrier set. - if (discovered_list_needs_barrier) { - _bs = Universe::heap()->barrier_set(); - } setup_policy(false /* default soft ref policy */); } @@ -157,7 +152,7 @@ // Update (advance) the soft ref master clock field. This must be done // after processing the soft ref list. - // We need a monotonically non-deccreasing time in ms but + // We need a monotonically non-decreasing time in ms but // os::javaTimeMillis() does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); @@ -173,7 +168,7 @@ // javaTimeNanos(), which is guaranteed to be monotonically // non-decreasing provided the underlying platform provides such // a time source (and it is bug free). - // In product mode, however, protect ourselves from non-monotonicty. + // In product mode, however, protect ourselves from non-monotonicity. if (now > _soft_ref_timestamp_clock) { _soft_ref_timestamp_clock = now; java_lang_ref_SoftReference::set_clock(now); @@ -317,13 +312,9 @@ // Enqueue references that are not made active again, and // clear the decks for the next collection (cycle). ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); - // Do the oop-check on pending_list_addr missed in - // enqueue_discovered_reflist. We should probably - // do a raw oop_check so that future such idempotent - // oop_stores relying on the oop-check side-effect - // may be elided automatically and safely without - // affecting correctness. - oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); + // Do the post-barrier on pending_list_addr missed in + // enqueue_discovered_reflist. + oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); // Stop treating discovered references specially. ref->disable_discovery(); @@ -358,7 +349,7 @@ oop obj = NULL; oop next_d = refs_list.head(); - if (pending_list_uses_discovered_field()) { // New behaviour + if (pending_list_uses_discovered_field()) { // New behavior // Walk down the list, self-looping the next field // so that the References are not considered active. while (obj != next_d) { @@ -372,18 +363,20 @@ assert(java_lang_ref_Reference::next(obj) == NULL, "Reference not active; should not be discovered"); // Self-loop next, so as to make Ref not active. - java_lang_ref_Reference::set_next(obj, obj); + // Post-barrier not needed when looping to self. + java_lang_ref_Reference::set_next_raw(obj, obj); if (next_d == obj) { // obj is last - // Swap refs_list into pendling_list_addr and + // Swap refs_list into pending_list_addr and // set obj's discovered to what we read from pending_list_addr. oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); - // Need oop_check on pending_list_addr above; - // see special oop-check code at the end of + // Need post-barrier on pending_list_addr above; + // see special post-barrier code at the end of // enqueue_discovered_reflists() further below. - java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL + java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL + oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); } } - } else { // Old behaviour + } else { // Old behavior // Walk down the list, copying the discovered field into // the next field and clearing the discovered field. while (obj != next_d) { @@ -397,7 +390,7 @@ assert(java_lang_ref_Reference::next(obj) == NULL, "The reference should not be enqueued"); if (next_d == obj) { // obj is last - // Swap refs_list into pendling_list_addr and + // Swap refs_list into pending_list_addr and // set obj's next to what we read from pending_list_addr. oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); // Need oop_check on pending_list_addr above; @@ -516,13 +509,11 @@ // the reference object and will fail // CT verification. if (UseG1GC) { - BarrierSet* bs = oopDesc::bs(); HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref); - if (UseCompressedOops) { - bs->write_ref_field_pre((narrowOop*)next_addr, NULL); + oopDesc::bs()->write_ref_field_pre((narrowOop*)next_addr, NULL); } else { - bs->write_ref_field_pre((oop*)next_addr, NULL); + oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL); } java_lang_ref_Reference::set_next_raw(_ref, NULL); } else { @@ -790,10 +781,9 @@ }; void ReferenceProcessor::set_discovered(oop ref, oop value) { + java_lang_ref_Reference::set_discovered_raw(ref, value); if (_discovered_list_needs_barrier) { - java_lang_ref_Reference::set_discovered(ref, value); - } else { - java_lang_ref_Reference::set_discovered_raw(ref, value); + oopDesc::bs()->write_ref_field(ref, value); } } @@ -1085,7 +1075,7 @@ // so this will expand to nothing. As a result, we have manually // elided this out for G1, but left in the test for some future // collector that might have need for a pre-barrier here, e.g.:- - // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); + // oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); assert(!_discovered_list_needs_barrier || UseG1GC, "Need to check non-G1 collector: " "may need a pre-write-barrier for CAS from NULL below"); @@ -1098,7 +1088,7 @@ refs_list.set_head(obj); refs_list.inc_length(1); if (_discovered_list_needs_barrier) { - _bs->write_ref_field((void*)discovered_addr, next_discovered); + oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered); } if (TraceReferenceGC) { @@ -1260,13 +1250,13 @@ // As in the case further above, since we are over-writing a NULL // pre-value, we can safely elide the pre-barrier here for the case of G1. - // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); + // e.g.:- oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); assert(discovered == NULL, "control point invariant"); assert(!_discovered_list_needs_barrier || UseG1GC, "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below"); oop_store_raw(discovered_addr, next_discovered); if (_discovered_list_needs_barrier) { - _bs->write_ref_field((void*)discovered_addr, next_discovered); + oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered); } list->set_head(obj); list->inc_length(1); @@ -1351,7 +1341,7 @@ // whose referents are still alive, whose referents are NULL or which // are not active (have a non-NULL next field). NOTE: When we are // thus precleaning the ref lists (which happens single-threaded today), -// we do not disable refs discovery to honour the correct semantics of +// we do not disable refs discovery to honor the correct semantics of // java.lang.Reference. As a result, we need to be careful below // that ref removal steps interleave safely with ref discovery steps // (in this thread). diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/referenceProcessor.hpp --- a/src/share/vm/memory/referenceProcessor.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/referenceProcessor.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -235,7 +235,6 @@ // discovery.) bool _discovered_list_needs_barrier; - BarrierSet* _bs; // Cached copy of BarrierSet. bool _enqueuing_is_done; // true if all weak references enqueued bool _processing_is_mt; // true during phases when // reference processing is MT. @@ -420,25 +419,6 @@ void update_soft_ref_master_clock(); public: - // constructor - ReferenceProcessor(): - _span((HeapWord*)NULL, (HeapWord*)NULL), - _discovered_refs(NULL), - _discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL), - _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL), - _discovering_refs(false), - _discovery_is_atomic(true), - _enqueuing_is_done(false), - _discovery_is_mt(false), - _discovered_list_needs_barrier(false), - _bs(NULL), - _is_alive_non_header(NULL), - _num_q(0), - _max_num_q(0), - _processing_is_mt(false), - _next_id(0) - { } - // Default parameters give you a vanilla reference processor. ReferenceProcessor(MemRegion span, bool mt_processing = false, uint mt_processing_degree = 1, @@ -494,7 +474,7 @@ bool processing_is_mt() const { return _processing_is_mt; } void set_mt_processing(bool mt) { _processing_is_mt = mt; } - // whether all enqueuing of weak references is complete + // whether all enqueueing of weak references is complete bool enqueuing_is_done() { return _enqueuing_is_done; } void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/resourceArea.hpp --- a/src/share/vm/memory/resourceArea.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/resourceArea.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -196,7 +196,7 @@ // leveraging existing data structures if we simply create a way to manage this one // special need for a ResourceMark. If ResourceMark simply inherited from CHeapObj // then existing ResourceMarks would work fine since no one use new to allocate them -// and they would be stack allocated. This leaves open the possibilty of accidental +// and they would be stack allocated. This leaves open the possibility of accidental // misuse so we simple duplicate the ResourceMark functionality here. class DeoptResourceMark: public CHeapObj { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/sharedHeap.cpp --- a/src/share/vm/memory/sharedHeap.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/sharedHeap.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -137,7 +137,6 @@ } void SharedHeap::process_strong_roots(bool activate_scope, - bool is_scavenging, ScanningOption so, OopClosure* roots, CodeBlobClosure* code_roots, @@ -157,9 +156,11 @@ if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do)) JNIHandles::oops_do(roots); + CLDToOopClosure roots_from_clds(roots); + // If we limit class scanning to SO_SystemClasses we need to apply a CLD closure to + // CLDs which are strongly reachable from the thread stacks. + CLDToOopClosure* roots_from_clds_p = ((so & SO_SystemClasses) ? &roots_from_clds : NULL); // All threads execute this; the individual threads are task groups. - CLDToOopClosure roots_from_clds(roots); - CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds); if (CollectedHeap::use_parallel_gc_threads()) { Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots); } else { @@ -187,9 +188,9 @@ if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) { if (so & SO_AllClasses) { - ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging); + ClassLoaderDataGraph::oops_do(roots, klass_closure, /* must_claim */ false); } else if (so & SO_SystemClasses) { - ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging); + ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, /* must_claim */ true); } } @@ -204,17 +205,18 @@ } if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) { - if (so & SO_CodeCache) { + if (so & SO_ScavengeCodeCache) { assert(code_roots != NULL, "must supply closure for code cache"); - if (is_scavenging) { - // We only visit parts of the CodeCache when scavenging. - CodeCache::scavenge_root_nmethods_do(code_roots); - } else { - // CMSCollector uses this to do intermediate-strength collections. - // We scan the entire code cache, since CodeCache::do_unloading is not called. - CodeCache::blobs_do(code_roots); - } + // We only visit parts of the CodeCache when scavenging. + CodeCache::scavenge_root_nmethods_do(code_roots); + } + if (so & SO_AllCodeCache) { + assert(code_roots != NULL, "must supply closure for code cache"); + + // CMSCollector uses this to do intermediate-strength collections. + // We scan the entire code cache, since CodeCache::do_unloading is not called. + CodeCache::blobs_do(code_roots); } // Verify that the code cache contents are not subject to // movement by a scavenging collection. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/sharedHeap.hpp --- a/src/share/vm/memory/sharedHeap.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/sharedHeap.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -92,7 +92,7 @@ // 0 is a "special" value in set_n_threads() which translates to // setting _n_threads to 1. // -// Some code uses _n_terminiation to decide if work should be done in +// Some code uses _n_termination to decide if work should be done in // parallel. The notorious possibly_parallel_oops_do() in threads.cpp // is an example of such code. Look for variable "is_par" for other // examples. @@ -221,7 +221,8 @@ SO_AllClasses = 0x1, SO_SystemClasses = 0x2, SO_Strings = 0x4, - SO_CodeCache = 0x8 + SO_AllCodeCache = 0x8, + SO_ScavengeCodeCache = 0x10 }; FlexibleWorkGang* workers() const { return _workers; } @@ -232,9 +233,9 @@ // "SO_AllClasses" applies the closure to all entries in the SystemDictionary; // "SO_SystemClasses" to all the "system" classes and loaders; // "SO_Strings" applies the closure to all entries in StringTable; - // "SO_CodeCache" applies the closure to all elements of the CodeCache. + // "SO_AllCodeCache" applies the closure to all elements of the CodeCache. + // "SO_ScavengeCodeCache" applies the closure to elements on the scavenge root list in the CodeCache. void process_strong_roots(bool activate_scope, - bool is_scavenging, ScanningOption so, OopClosure* roots, CodeBlobClosure* code_roots, diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/space.cpp --- a/src/share/vm/memory/space.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/space.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -112,7 +112,7 @@ // cards are processed. For instance, CMS must remember mutator updates // (i.e. dirty cards) so as to re-scan mutated objects. // Such work can be piggy-backed here on dirty card scanning, so as to make - // it slightly more efficient than doing a complete non-detructive pre-scan + // it slightly more efficient than doing a complete non-destructive pre-scan // of the card table. MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure(); if (pCl != NULL) { @@ -324,8 +324,8 @@ } void OffsetTableContigSpace::set_end(HeapWord* new_end) { - // Space should not advertize an increase in size - // until after the underlying offest table has been enlarged. + // Space should not advertise an increase in size + // until after the underlying offset table has been enlarged. _offsets.resize(pointer_delta(new_end, bottom())); Space::set_end(new_end); } @@ -729,7 +729,7 @@ object_iterate_from(bm, blk); } -// For a continguous space object_iterate() and safe_object_iterate() +// For a ContiguousSpace object_iterate() and safe_object_iterate() // are the same. void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { object_iterate(blk); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/space.hpp --- a/src/share/vm/memory/space.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/space.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -56,7 +56,7 @@ // Here's the Space hierarchy: // -// - Space -- an asbtract base class describing a heap area +// - Space -- an abstract base class describing a heap area // - CompactibleSpace -- a space supporting compaction // - CompactibleFreeListSpace -- (used for CMS generation) // - ContiguousSpace -- a compactible space in which all free space @@ -159,7 +159,7 @@ // (that is, if the space is contiguous), then this region must contain only // such objects: the memregion will be from the bottom of the region to the // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of - // the space must distiguish between objects in the region allocated before + // the space must distinguish between objects in the region allocated before // and after the call to save marks. virtual MemRegion used_region_at_save_marks() const { return MemRegion(bottom(), saved_mark_word()); @@ -190,7 +190,7 @@ // Returns true iff the given the space contains the // given address as part of an allocated object. For - // ceratin kinds of spaces, this might be a potentially + // certain kinds of spaces, this might be a potentially // expensive operation. To prevent performance problems // on account of its inadvertent use in product jvm's, // we restrict its use to assertion checks only. @@ -244,13 +244,13 @@ // Return an address indicating the extent of the iteration in the // event that the iteration had to return because of finding an // uninitialized object in the space, or if the closure "cl" - // signalled early termination. + // signaled early termination. virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl); virtual HeapWord* object_iterate_careful_m(MemRegion mr, ObjectClosureCareful* cl); // Create and return a new dirty card to oop closure. Can be - // overriden to return the appropriate type of closure + // overridden to return the appropriate type of closure // depending on the type of space in which the closure will // operate. ResourceArea allocated. virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, @@ -474,13 +474,13 @@ // be one, since compaction must succeed -- we go to the first space of // the previous generation if necessary, updating "cp"), reset compact_top // and then forward. In either case, returns the new value of "compact_top". - // If the forwarding crosses "cp->threshold", invokes the "cross_threhold" + // If the forwarding crosses "cp->threshold", invokes the "cross_threshold" // function of the then-current compaction space, and updates "cp->threshold // accordingly". virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); - // Return a size with adjusments as required of the space. + // Return a size with adjustments as required of the space. virtual size_t adjust_object_size_v(size_t size) const { return size; } protected: @@ -500,7 +500,7 @@ // Requires "allowed_deadspace_words > 0", that "q" is the start of a // free block of the given "word_len", and that "q", were it an object, - // would not move if forwared. If the size allows, fill the free + // would not move if forwarded. If the size allows, fill the free // block with an object, to prevent excessive compaction. Returns "true" // iff the free region was made deadspace, and modifies // "allowed_deadspace_words" to reflect the number of available deadspace diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/tenuredGeneration.cpp --- a/src/share/vm/memory/tenuredGeneration.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/tenuredGeneration.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -135,7 +135,7 @@ free()); } } - // If we had to expand to accomodate promotions from younger generations + // If we had to expand to accommodate promotions from younger generations if (!result && _capacity_at_prologue < capacity()) { result = true; if (PrintGC && Verbose) { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/threadLocalAllocBuffer.cpp --- a/src/share/vm/memory/threadLocalAllocBuffer.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/threadLocalAllocBuffer.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -34,6 +34,7 @@ // Thread-Local Edens support // static member initialization +size_t ThreadLocalAllocBuffer::_max_size = 0; unsigned ThreadLocalAllocBuffer::_target_refills = 0; GlobalTLABStats* ThreadLocalAllocBuffer::_global_stats = NULL; @@ -45,7 +46,7 @@ void ThreadLocalAllocBuffer::accumulate_statistics_before_gc() { global_stats()->initialize(); - for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) { + for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { thread->tlab().accumulate_statistics(); thread->tlab().initialize_statistics(); } @@ -60,28 +61,32 @@ } void ThreadLocalAllocBuffer::accumulate_statistics() { - size_t capacity = Universe::heap()->tlab_capacity(myThread()) / HeapWordSize; - size_t unused = Universe::heap()->unsafe_max_tlab_alloc(myThread()) / HeapWordSize; - size_t used = capacity - unused; - - // Update allocation history if a reasonable amount of eden was allocated. - bool update_allocation_history = used > 0.5 * capacity; + Thread* thread = myThread(); + size_t capacity = Universe::heap()->tlab_capacity(thread); + size_t used = Universe::heap()->tlab_used(thread); _gc_waste += (unsigned)remaining(); + size_t total_allocated = thread->allocated_bytes(); + size_t allocated_since_last_gc = total_allocated - _allocated_before_last_gc; + _allocated_before_last_gc = total_allocated; if (PrintTLAB && (_number_of_refills > 0 || Verbose)) { print_stats("gc"); } if (_number_of_refills > 0) { + // Update allocation history if a reasonable amount of eden was allocated. + bool update_allocation_history = used > 0.5 * capacity; if (update_allocation_history) { // Average the fraction of eden allocated in a tlab by this // thread for use in the next resize operation. // _gc_waste is not subtracted because it's included in // "used". - size_t allocation = _number_of_refills * desired_size(); - double alloc_frac = allocation / (double) used; + // The result can be larger than 1.0 due to direct to old allocations. + // These allocations should ideally not be counted but since it is not possible + // to filter them out here we just cap the fraction to be at most 1.0. + double alloc_frac = MIN2(1.0, (double) allocated_since_last_gc / used); _allocation_fraction.sample(alloc_frac); } global_stats()->update_allocating_threads(); @@ -126,33 +131,32 @@ } void ThreadLocalAllocBuffer::resize_all_tlabs() { - for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) { - thread->tlab().resize(); + if (ResizeTLAB) { + for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { + thread->tlab().resize(); + } } } void ThreadLocalAllocBuffer::resize() { + // Compute the next tlab size using expected allocation amount + assert(ResizeTLAB, "Should not call this otherwise"); + size_t alloc = (size_t)(_allocation_fraction.average() * + (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize)); + size_t new_size = alloc / _target_refills; - if (ResizeTLAB) { - // Compute the next tlab size using expected allocation amount - size_t alloc = (size_t)(_allocation_fraction.average() * - (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize)); - size_t new_size = alloc / _target_refills; - - new_size = MIN2(MAX2(new_size, min_size()), max_size()); - - size_t aligned_new_size = align_object_size(new_size); + new_size = MIN2(MAX2(new_size, min_size()), max_size()); - if (PrintTLAB && Verbose) { - gclog_or_tty->print("TLAB new size: thread: " INTPTR_FORMAT " [id: %2d]" - " refills %d alloc: %8.6f desired_size: " SIZE_FORMAT " -> " SIZE_FORMAT "\n", - myThread(), myThread()->osthread()->thread_id(), - _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size); - } - set_desired_size(aligned_new_size); + size_t aligned_new_size = align_object_size(new_size); - set_refill_waste_limit(initial_refill_waste_limit()); + if (PrintTLAB && Verbose) { + gclog_or_tty->print("TLAB new size: thread: " INTPTR_FORMAT " [id: %2d]" + " refills %d alloc: %8.6f desired_size: " SIZE_FORMAT " -> " SIZE_FORMAT "\n", + myThread(), myThread()->osthread()->thread_id(), + _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size); } + set_desired_size(aligned_new_size); + set_refill_waste_limit(initial_refill_waste_limit()); } void ThreadLocalAllocBuffer::initialize_statistics() { @@ -248,31 +252,13 @@ return init_sz; } -const size_t ThreadLocalAllocBuffer::max_size() { - - // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. - // This restriction could be removed by enabling filling with multiple arrays. - // If we compute that the reasonable way as - // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) - // we'll overflow on the multiply, so we do the divide first. - // We actually lose a little by dividing first, - // but that just makes the TLAB somewhat smaller than the biggest array, - // which is fine, since we'll be able to fill that. - - size_t unaligned_max_size = typeArrayOopDesc::header_size(T_INT) + - sizeof(jint) * - ((juint) max_jint / (size_t) HeapWordSize); - return align_size_down(unaligned_max_size, MinObjAlignment); -} - void ThreadLocalAllocBuffer::print_stats(const char* tag) { Thread* thrd = myThread(); size_t waste = _gc_waste + _slow_refill_waste + _fast_refill_waste; size_t alloc = _number_of_refills * _desired_size; double waste_percent = alloc == 0 ? 0.0 : 100.0 * waste / alloc; - size_t tlab_used = Universe::heap()->tlab_capacity(thrd) - - Universe::heap()->unsafe_max_tlab_alloc(thrd); + size_t tlab_used = Universe::heap()->tlab_used(thrd); gclog_or_tty->print("TLAB: %s thread: " INTPTR_FORMAT " [id: %2d]" " desired_size: " SIZE_FORMAT "KB" " slow allocs: %d refill waste: " SIZE_FORMAT "B" diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/threadLocalAllocBuffer.hpp --- a/src/share/vm/memory/threadLocalAllocBuffer.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/threadLocalAllocBuffer.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -45,7 +45,9 @@ HeapWord* _end; // allocation end (excluding alignment_reserve) size_t _desired_size; // desired size (including alignment_reserve) size_t _refill_waste_limit; // hold onto tlab if free() is larger than this + size_t _allocated_before_last_gc; // total bytes allocated up until the last gc + static size_t _max_size; // maximum size of any TLAB static unsigned _target_refills; // expected number of refills between GCs unsigned _number_of_refills; @@ -99,12 +101,13 @@ static GlobalTLABStats* global_stats() { return _global_stats; } public: - ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight) { + ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight), _allocated_before_last_gc(0) { // do nothing. tlabs must be inited by initialize() calls } static const size_t min_size() { return align_object_size(MinTLABSize / HeapWordSize); } - static const size_t max_size(); + static const size_t max_size() { assert(_max_size != 0, "max_size not set up"); return _max_size; } + static void set_max_size(size_t max_size) { _max_size = max_size; } HeapWord* start() const { return _start; } HeapWord* end() const { return _end; } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/memory/universe.cpp --- a/src/share/vm/memory/universe.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/memory/universe.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -816,6 +816,8 @@ Universe::_collectedHeap = new GenCollectedHeap(gc_policy); } + ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size()); + jint status = Universe::heap()->initialize(); if (status != JNI_OK) { return status; @@ -1136,7 +1138,7 @@ SystemDictionary::ProtectionDomain_klass(), m);; } - // The folowing is initializing converter functions for serialization in + // The following is initializing converter functions for serialization in // JVM.cpp. If we clean up the StrictMath code above we may want to find // a better solution for this as well. initialize_converter_functions(); @@ -1178,7 +1180,7 @@ if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; // CodeCache can only be updated by a thread_in_VM and they will all be - // stopped dring the safepoint so CodeCache will be safe to update without + // stopped during the safepoint so CodeCache will be safe to update without // holding the CodeCache_lock. KlassDepChange changes(dependee); @@ -1199,7 +1201,7 @@ if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; // CodeCache can only be updated by a thread_in_VM and they will all be - // stopped dring the safepoint so CodeCache will be safe to update without + // stopped during the safepoint so CodeCache will be safe to update without // holding the CodeCache_lock. CallSiteDepChange changes(call_site(), method_handle()); @@ -1230,7 +1232,7 @@ if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; // CodeCache can only be updated by a thread_in_VM and they will all be - // stopped dring the safepoint so CodeCache will be safe to update without + // stopped during the safepoint so CodeCache will be safe to update without // holding the CodeCache_lock. // Compute the dependent nmethods diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/oops/constantPool.cpp --- a/src/share/vm/oops/constantPool.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/oops/constantPool.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1874,7 +1874,6 @@ // Printing void ConstantPool::print_on(outputStream* st) const { - EXCEPTION_MARK; assert(is_constantPool(), "must be constantPool"); st->print_cr(internal_name()); if (flags() != 0) { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/oops/method.hpp --- a/src/share/vm/oops/method.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/oops/method.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -38,13 +38,11 @@ #include "utilities/accessFlags.hpp" #include "utilities/growableArray.hpp" -// A Method* represents a Java method. +// A Method represents a Java method. // // Memory layout (each line represents a word). Note that most applications load thousands of methods, // so keeping the size of this structure small has a big impact on footprint. // -// We put all oops and method_size first for better gc cache locality. -// // The actual bytecodes are inlined after the end of the Method struct. // // There are bits in the access_flags telling whether inlined tables are present. @@ -64,17 +62,17 @@ // | header | // | klass | // |------------------------------------------------------| -// | ConstMethod* (oop) | +// | ConstMethod* (metadata) | // |------------------------------------------------------| -// | methodData (oop) | -// | methodCounters | +// | MethodData* (metadata) | +// | MethodCounters | // |------------------------------------------------------| // | access_flags | // | vtable_index | // |------------------------------------------------------| // | result_index (C++ interpreter only) | // |------------------------------------------------------| -// | method_size | intrinsic_id| flags | +// | method_size | intrinsic_id | flags | // |------------------------------------------------------| // | code (pointer) | // | i2i (pointer) | diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/opto/cfgnode.cpp --- a/src/share/vm/opto/cfgnode.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/opto/cfgnode.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -1018,7 +1018,7 @@ !jtkp->klass_is_exact() && // Keep exact interface klass (6894807) ttkp->is_loaded() && !ttkp->klass()->is_interface() ) { assert(ft == ttkp->cast_to_ptr_type(jtkp->ptr()) || - ft->isa_narrowoop() && ft->make_ptr() == ttkp->cast_to_ptr_type(jtkp->ptr()), ""); + ft->isa_narrowklass() && ft->make_ptr() == ttkp->cast_to_ptr_type(jtkp->ptr()), ""); jt = ft; } } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/opto/library_call.cpp --- a/src/share/vm/opto/library_call.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/opto/library_call.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -304,6 +304,7 @@ bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id); Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting); Node* get_key_start_from_aescrypt_object(Node* aescrypt_object); + Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object); bool inline_encodeISOArray(); bool inline_updateCRC32(); bool inline_updateBytesCRC32(); @@ -5936,10 +5937,22 @@ Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object); if (k_start == NULL) return false; - // Call the stub. - make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(), - stubAddr, stubName, TypePtr::BOTTOM, - src_start, dest_start, k_start); + if (Matcher::pass_original_key_for_aes()) { + // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to + // compatibility issues between Java key expansion and SPARC crypto instructions + Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object); + if (original_k_start == NULL) return false; + + // Call the stub. + make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(), + stubAddr, stubName, TypePtr::BOTTOM, + src_start, dest_start, k_start, original_k_start); + } else { + // Call the stub. + make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(), + stubAddr, stubName, TypePtr::BOTTOM, + src_start, dest_start, k_start); + } return true; } @@ -6017,14 +6030,29 @@ if (objRvec == NULL) return false; Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE); - // Call the stub, passing src_start, dest_start, k_start, r_start and src_len - make_runtime_call(RC_LEAF|RC_NO_FP, - OptoRuntime::cipherBlockChaining_aescrypt_Type(), - stubAddr, stubName, TypePtr::BOTTOM, - src_start, dest_start, k_start, r_start, len); - - // return is void so no result needs to be pushed - + Node* cbcCrypt; + if (Matcher::pass_original_key_for_aes()) { + // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to + // compatibility issues between Java key expansion and SPARC crypto instructions + Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object); + if (original_k_start == NULL) return false; + + // Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start + cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP, + OptoRuntime::cipherBlockChaining_aescrypt_Type(), + stubAddr, stubName, TypePtr::BOTTOM, + src_start, dest_start, k_start, r_start, len, original_k_start); + } else { + // Call the stub, passing src_start, dest_start, k_start, r_start and src_len + cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP, + OptoRuntime::cipherBlockChaining_aescrypt_Type(), + stubAddr, stubName, TypePtr::BOTTOM, + src_start, dest_start, k_start, r_start, len); + } + + // return cipher length (int) + Node* retvalue = _gvn.transform(new (C) ProjNode(cbcCrypt, TypeFunc::Parms)); + set_result(retvalue); return true; } @@ -6039,6 +6067,17 @@ return k_start; } +//------------------------------get_original_key_start_from_aescrypt_object----------------------- +Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) { + Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false); + assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt"); + if (objAESCryptKey == NULL) return (Node *) NULL; + + // now have the array, need to get the start address of the lastKey array + Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE); + return original_k_start; +} + //----------------------------inline_cipherBlockChaining_AESCrypt_predicate---------------------------- // Return node representing slow path of predicate check. // the pseudo code we want to emulate with this predicate is: diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/opto/matcher.hpp --- a/src/share/vm/opto/matcher.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/opto/matcher.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -286,6 +286,9 @@ // CPU supports misaligned vectors store/load. static const bool misaligned_vectors_ok(); + // Should original key array reference be passed to AES stubs + static const bool pass_original_key_for_aes(); + // Used to determine a "low complexity" 64-bit constant. (Zero is simple.) // The standard of comparison is one (StoreL ConL) vs. two (StoreI ConI). // Depends on the details of 64-bit constant generation on the CPU. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/opto/runtime.cpp --- a/src/share/vm/opto/runtime.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/opto/runtime.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -568,8 +568,7 @@ const TypeFunc *OptoRuntime::uncommon_trap_Type() { // create input type (domain) const Type **fields = TypeTuple::fields(1); - // Symbol* name of class to be loaded - fields[TypeFunc::Parms+0] = TypeInt::INT; + fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action) const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); // create result type (range) @@ -814,12 +813,18 @@ const TypeFunc* OptoRuntime::aescrypt_block_Type() { // create input type (domain) int num_args = 3; + if (Matcher::pass_original_key_for_aes()) { + num_args = 4; + } int argcnt = num_args; const Type** fields = TypeTuple::fields(argcnt); int argp = TypeFunc::Parms; fields[argp++] = TypePtr::NOTNULL; // src fields[argp++] = TypePtr::NOTNULL; // dest fields[argp++] = TypePtr::NOTNULL; // k array + if (Matcher::pass_original_key_for_aes()) { + fields[argp++] = TypePtr::NOTNULL; // original k array + } assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); @@ -856,6 +861,9 @@ const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() { // create input type (domain) int num_args = 5; + if (Matcher::pass_original_key_for_aes()) { + num_args = 6; + } int argcnt = num_args; const Type** fields = TypeTuple::fields(argcnt); int argp = TypeFunc::Parms; @@ -864,13 +872,16 @@ fields[argp++] = TypePtr::NOTNULL; // k array fields[argp++] = TypePtr::NOTNULL; // r array fields[argp++] = TypeInt::INT; // src len + if (Matcher::pass_original_key_for_aes()) { + fields[argp++] = TypePtr::NOTNULL; // original k array + } assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); - // no result type needed + // returning cipher len (int) fields = TypeTuple::fields(1); - fields[TypeFunc::Parms+0] = NULL; // void - const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); + fields[TypeFunc::Parms+0] = TypeInt::INT; + const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); return TypeFunc::make(domain, range); } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/prims/jni.cpp --- a/src/share/vm/prims/jni.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/prims/jni.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -433,8 +433,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, FindClass__entry, env, name); #else /* USDT2 */ - HOTSPOT_JNI_FINDCLASS_ENTRY( - env, (char *)name); + HOTSPOT_JNI_FINDCLASS_ENTRY(env, (char *)name); #endif /* USDT2 */ jclass result = NULL; @@ -511,8 +510,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, FromReflectedMethod__entry, env, method); #else /* USDT2 */ - HOTSPOT_JNI_FROMREFLECTEDMETHOD_ENTRY( - env, method); + HOTSPOT_JNI_FROMREFLECTEDMETHOD_ENTRY(env, method); #endif /* USDT2 */ jmethodID ret = NULL; DT_RETURN_MARK(FromReflectedMethod, jmethodID, (const jmethodID&)ret); @@ -552,8 +550,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, FromReflectedField__entry, env, field); #else /* USDT2 */ - HOTSPOT_JNI_FROMREFLECTEDFIELD_ENTRY( - env, field); + HOTSPOT_JNI_FROMREFLECTEDFIELD_ENTRY(env, field); #endif /* USDT2 */ jfieldID ret = NULL; DT_RETURN_MARK(FromReflectedField, jfieldID, (const jfieldID&)ret); @@ -601,8 +598,7 @@ #ifndef USDT2 DTRACE_PROBE4(hotspot_jni, ToReflectedMethod__entry, env, cls, method_id, isStatic); #else /* USDT2 */ - HOTSPOT_JNI_TOREFLECTEDMETHOD_ENTRY( - env, cls, (uintptr_t) method_id, isStatic); + HOTSPOT_JNI_TOREFLECTEDMETHOD_ENTRY(env, cls, (uintptr_t) method_id, isStatic); #endif /* USDT2 */ jobject ret = NULL; DT_RETURN_MARK(ToReflectedMethod, jobject, (const jobject&)ret); @@ -631,8 +627,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, GetSuperclass__entry, env, sub); #else /* USDT2 */ - HOTSPOT_JNI_GETSUPERCLASS_ENTRY( - env, sub); + HOTSPOT_JNI_GETSUPERCLASS_ENTRY(env, sub); #endif /* USDT2 */ jclass obj = NULL; DT_RETURN_MARK(GetSuperclass, jclass, (const jclass&)obj); @@ -665,8 +660,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, IsAssignableFrom__entry, env, sub, super); #else /* USDT2 */ - HOTSPOT_JNI_ISASSIGNABLEFROM_ENTRY( - env, sub, super); + HOTSPOT_JNI_ISASSIGNABLEFROM_ENTRY(env, sub, super); #endif /* USDT2 */ oop sub_mirror = JNIHandles::resolve_non_null(sub); oop super_mirror = JNIHandles::resolve_non_null(super); @@ -676,8 +670,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, IsAssignableFrom__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN( - ret); + HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN(ret); #endif /* USDT2 */ return ret; } @@ -689,8 +682,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, IsAssignableFrom__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN( - ret); + HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -707,8 +699,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, Throw__entry, env, obj); #else /* USDT2 */ - HOTSPOT_JNI_THROW_ENTRY( - env, obj); + HOTSPOT_JNI_THROW_ENTRY(env, obj); #endif /* USDT2 */ jint ret = JNI_OK; DT_RETURN_MARK(Throw, jint, (const jint&)ret); @@ -729,8 +720,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, ThrowNew__entry, env, clazz, message); #else /* USDT2 */ - HOTSPOT_JNI_THROWNEW_ENTRY( - env, clazz, (char *) message); + HOTSPOT_JNI_THROWNEW_ENTRY(env, clazz, (char *) message); #endif /* USDT2 */ jint ret = JNI_OK; DT_RETURN_MARK(ThrowNew, jint, (const jint&)ret); @@ -763,8 +753,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, ExceptionOccurred__entry, env); #else /* USDT2 */ - HOTSPOT_JNI_EXCEPTIONOCCURRED_ENTRY( - env); + HOTSPOT_JNI_EXCEPTIONOCCURRED_ENTRY(env); #endif /* USDT2 */ jni_check_async_exceptions(thread); oop exception = thread->pending_exception(); @@ -772,8 +761,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, ExceptionOccurred__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_EXCEPTIONOCCURRED_RETURN( - ret); + HOTSPOT_JNI_EXCEPTIONOCCURRED_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -784,8 +772,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, ExceptionDescribe__entry, env); #else /* USDT2 */ - HOTSPOT_JNI_EXCEPTIONDESCRIBE_ENTRY( - env); + HOTSPOT_JNI_EXCEPTIONDESCRIBE_ENTRY(env); #endif /* USDT2 */ if (thread->has_pending_exception()) { Handle ex(thread, thread->pending_exception()); @@ -825,8 +812,7 @@ #ifndef USDT2 DTRACE_PROBE(hotspot_jni, ExceptionDescribe__return); #else /* USDT2 */ - HOTSPOT_JNI_EXCEPTIONDESCRIBE_RETURN( - ); + HOTSPOT_JNI_EXCEPTIONDESCRIBE_RETURN(); #endif /* USDT2 */ JNI_END @@ -836,8 +822,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, ExceptionClear__entry, env); #else /* USDT2 */ - HOTSPOT_JNI_EXCEPTIONCLEAR_ENTRY( - env); + HOTSPOT_JNI_EXCEPTIONCLEAR_ENTRY(env); #endif /* USDT2 */ // The jni code might be using this API to clear java thrown exception. @@ -850,8 +835,7 @@ #ifndef USDT2 DTRACE_PROBE(hotspot_jni, ExceptionClear__return); #else /* USDT2 */ - HOTSPOT_JNI_EXCEPTIONCLEAR_RETURN( - ); + HOTSPOT_JNI_EXCEPTIONCLEAR_RETURN(); #endif /* USDT2 */ JNI_END @@ -861,8 +845,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, FatalError__entry, env, msg); #else /* USDT2 */ - HOTSPOT_JNI_FATALERROR_ENTRY( - env, (char *) msg); + HOTSPOT_JNI_FATALERROR_ENTRY(env, (char *) msg); #endif /* USDT2 */ tty->print_cr("FATAL ERROR in native method: %s", msg); thread->print_stack(); @@ -875,16 +858,14 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, PushLocalFrame__entry, env, capacity); #else /* USDT2 */ - HOTSPOT_JNI_PUSHLOCALFRAME_ENTRY( - env, capacity); + HOTSPOT_JNI_PUSHLOCALFRAME_ENTRY(env, capacity); #endif /* USDT2 */ //%note jni_11 if (capacity < 0 || capacity > MAX_REASONABLE_LOCAL_CAPACITY) { #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, PushLocalFrame__return, JNI_ERR); #else /* USDT2 */ - HOTSPOT_JNI_PUSHLOCALFRAME_RETURN( - (uint32_t)JNI_ERR); + HOTSPOT_JNI_PUSHLOCALFRAME_RETURN((uint32_t)JNI_ERR); #endif /* USDT2 */ return JNI_ERR; } @@ -897,8 +878,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, PushLocalFrame__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_PUSHLOCALFRAME_RETURN( - ret); + HOTSPOT_JNI_PUSHLOCALFRAME_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -909,8 +889,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, PopLocalFrame__entry, env, result); #else /* USDT2 */ - HOTSPOT_JNI_POPLOCALFRAME_ENTRY( - env, result); + HOTSPOT_JNI_POPLOCALFRAME_ENTRY(env, result); #endif /* USDT2 */ //%note jni_11 Handle result_handle(thread, JNIHandles::resolve(result)); @@ -929,8 +908,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, PopLocalFrame__return, result); #else /* USDT2 */ - HOTSPOT_JNI_POPLOCALFRAME_RETURN( - result); + HOTSPOT_JNI_POPLOCALFRAME_RETURN(result); #endif /* USDT2 */ return result; JNI_END @@ -941,16 +919,14 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, NewGlobalRef__entry, env, ref); #else /* USDT2 */ - HOTSPOT_JNI_NEWGLOBALREF_ENTRY( - env, ref); + HOTSPOT_JNI_NEWGLOBALREF_ENTRY(env, ref); #endif /* USDT2 */ Handle ref_handle(thread, JNIHandles::resolve(ref)); jobject ret = JNIHandles::make_global(ref_handle); #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, NewGlobalRef__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_NEWGLOBALREF_RETURN( - ret); + HOTSPOT_JNI_NEWGLOBALREF_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -961,15 +937,13 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, DeleteGlobalRef__entry, env, ref); #else /* USDT2 */ - HOTSPOT_JNI_DELETEGLOBALREF_ENTRY( - env, ref); + HOTSPOT_JNI_DELETEGLOBALREF_ENTRY(env, ref); #endif /* USDT2 */ JNIHandles::destroy_global(ref); #ifndef USDT2 DTRACE_PROBE(hotspot_jni, DeleteGlobalRef__return); #else /* USDT2 */ - HOTSPOT_JNI_DELETEGLOBALREF_RETURN( - ); + HOTSPOT_JNI_DELETEGLOBALREF_RETURN(); #endif /* USDT2 */ JNI_END @@ -978,15 +952,13 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, DeleteLocalRef__entry, env, obj); #else /* USDT2 */ - HOTSPOT_JNI_DELETELOCALREF_ENTRY( - env, obj); + HOTSPOT_JNI_DELETELOCALREF_ENTRY(env, obj); #endif /* USDT2 */ JNIHandles::destroy_local(obj); #ifndef USDT2 DTRACE_PROBE(hotspot_jni, DeleteLocalRef__return); #else /* USDT2 */ - HOTSPOT_JNI_DELETELOCALREF_RETURN( - ); + HOTSPOT_JNI_DELETELOCALREF_RETURN(); #endif /* USDT2 */ JNI_END @@ -995,8 +967,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, IsSameObject__entry, env, r1, r2); #else /* USDT2 */ - HOTSPOT_JNI_ISSAMEOBJECT_ENTRY( - env, r1, r2); + HOTSPOT_JNI_ISSAMEOBJECT_ENTRY(env, r1, r2); #endif /* USDT2 */ oop a = JNIHandles::resolve(r1); oop b = JNIHandles::resolve(r2); @@ -1004,8 +975,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, IsSameObject__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_ISSAMEOBJECT_RETURN( - ret); + HOTSPOT_JNI_ISSAMEOBJECT_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -1016,15 +986,13 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, NewLocalRef__entry, env, ref); #else /* USDT2 */ - HOTSPOT_JNI_NEWLOCALREF_ENTRY( - env, ref); + HOTSPOT_JNI_NEWLOCALREF_ENTRY(env, ref); #endif /* USDT2 */ jobject ret = JNIHandles::make_local(env, JNIHandles::resolve(ref)); #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, NewLocalRef__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_NEWLOCALREF_RETURN( - ret); + HOTSPOT_JNI_NEWLOCALREF_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -1034,8 +1002,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, EnsureLocalCapacity__entry, env, capacity); #else /* USDT2 */ - HOTSPOT_JNI_ENSURELOCALCAPACITY_ENTRY( - env, capacity); + HOTSPOT_JNI_ENSURELOCALCAPACITY_ENTRY(env, capacity); #endif /* USDT2 */ jint ret; if (capacity >= 0 && capacity <= MAX_REASONABLE_LOCAL_CAPACITY) { @@ -1046,8 +1013,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, EnsureLocalCapacity__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_ENSURELOCALCAPACITY_RETURN( - ret); + HOTSPOT_JNI_ENSURELOCALCAPACITY_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -1058,8 +1024,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, GetObjectRefType__entry, env, obj); #else /* USDT2 */ - HOTSPOT_JNI_GETOBJECTREFTYPE_ENTRY( - env, obj); + HOTSPOT_JNI_GETOBJECTREFTYPE_ENTRY(env, obj); #endif /* USDT2 */ jobjectRefType ret; if (JNIHandles::is_local_handle(thread, obj) || @@ -1074,8 +1039,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetObjectRefType__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_GETOBJECTREFTYPE_RETURN( - (void *) ret); + HOTSPOT_JNI_GETOBJECTREFTYPE_RETURN((void *) ret); #endif /* USDT2 */ return ret; JNI_END @@ -1391,6 +1355,10 @@ static instanceOop alloc_object(jclass clazz, TRAPS) { KlassHandle k(THREAD, java_lang_Class::as_Klass(JNIHandles::resolve_non_null(clazz))); + if (k == NULL) { + ResourceMark rm(THREAD); + THROW_(vmSymbols::java_lang_InstantiationException(), NULL); + } k()->check_valid_for_instantiation(false, CHECK_NULL); InstanceKlass::cast(k())->initialize(CHECK_NULL); instanceOop ih = InstanceKlass::cast(k())->allocate_instance(THREAD); @@ -1410,8 +1378,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, AllocObject__entry, env, clazz); #else /* USDT2 */ - HOTSPOT_JNI_ALLOCOBJECT_ENTRY( - env, clazz); + HOTSPOT_JNI_ALLOCOBJECT_ENTRY(env, clazz); #endif /* USDT2 */ jobject ret = NULL; DT_RETURN_MARK(AllocObject, jobject, (const jobject&)ret); @@ -1433,8 +1400,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, NewObjectA__entry, env, clazz, methodID); #else /* USDT2 */ - HOTSPOT_JNI_NEWOBJECTA_ENTRY( - env, clazz, (uintptr_t) methodID); + HOTSPOT_JNI_NEWOBJECTA_ENTRY(env, clazz, (uintptr_t) methodID); #endif /* USDT2 */ jobject obj = NULL; DT_RETURN_MARK(NewObjectA, jobject, (const jobject)obj); @@ -1459,8 +1425,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, NewObjectV__entry, env, clazz, methodID); #else /* USDT2 */ - HOTSPOT_JNI_NEWOBJECTV_ENTRY( - env, clazz, (uintptr_t) methodID); + HOTSPOT_JNI_NEWOBJECTV_ENTRY(env, clazz, (uintptr_t) methodID); #endif /* USDT2 */ jobject obj = NULL; DT_RETURN_MARK(NewObjectV, jobject, (const jobject&)obj); @@ -1485,8 +1450,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, NewObject__entry, env, clazz, methodID); #else /* USDT2 */ - HOTSPOT_JNI_NEWOBJECT_ENTRY( - env, clazz, (uintptr_t) methodID); + HOTSPOT_JNI_NEWOBJECT_ENTRY(env, clazz, (uintptr_t) methodID); #endif /* USDT2 */ jobject obj = NULL; DT_RETURN_MARK(NewObject, jobject, (const jobject&)obj); @@ -1508,8 +1472,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, GetObjectClass__entry, env, obj); #else /* USDT2 */ - HOTSPOT_JNI_GETOBJECTCLASS_ENTRY( - env, obj); + HOTSPOT_JNI_GETOBJECTCLASS_ENTRY(env, obj); #endif /* USDT2 */ Klass* k = JNIHandles::resolve_non_null(obj)->klass(); jclass ret = @@ -1517,8 +1480,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetObjectClass__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_GETOBJECTCLASS_RETURN( - ret); + HOTSPOT_JNI_GETOBJECTCLASS_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -1528,8 +1490,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, IsInstanceOf__entry, env, obj, clazz); #else /* USDT2 */ - HOTSPOT_JNI_ISINSTANCEOF_ENTRY( - env, obj, clazz); + HOTSPOT_JNI_ISINSTANCEOF_ENTRY(env, obj, clazz); #endif /* USDT2 */ jboolean ret = JNI_TRUE; if (obj != NULL) { @@ -1543,8 +1504,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, IsInstanceOf__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_ISINSTANCEOF_RETURN( - ret); + HOTSPOT_JNI_ISINSTANCEOF_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -1608,15 +1568,13 @@ #ifndef USDT2 DTRACE_PROBE4(hotspot_jni, GetMethodID__entry, env, clazz, name, sig); #else /* USDT2 */ - HOTSPOT_JNI_GETMETHODID_ENTRY( - env, clazz, (char *) name, (char *) sig); + HOTSPOT_JNI_GETMETHODID_ENTRY(env, clazz, (char *) name, (char *) sig); #endif /* USDT2 */ jmethodID ret = get_method_id(env, clazz, name, sig, false, thread); #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetMethodID__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_GETMETHODID_RETURN( - (uintptr_t) ret); + HOTSPOT_JNI_GETMETHODID_RETURN((uintptr_t) ret); #endif /* USDT2 */ return ret; JNI_END @@ -1628,15 +1586,13 @@ #ifndef USDT2 DTRACE_PROBE4(hotspot_jni, GetStaticMethodID__entry, env, clazz, name, sig); #else /* USDT2 */ - HOTSPOT_JNI_GETSTATICMETHODID_ENTRY( - env, (char *) clazz, (char *) name, (char *)sig); + HOTSPOT_JNI_GETSTATICMETHODID_ENTRY(env, (char *) clazz, (char *) name, (char *)sig); #endif /* USDT2 */ jmethodID ret = get_method_id(env, clazz, name, sig, true, thread); #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetStaticMethodID__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_GETSTATICMETHODID_RETURN( - (uintptr_t) ret); + HOTSPOT_JNI_GETSTATICMETHODID_RETURN((uintptr_t) ret); #endif /* USDT2 */ return ret; JNI_END @@ -1896,8 +1852,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, CallVoidMethod__entry, env, obj, methodID); #else /* USDT2 */ - HOTSPOT_JNI_CALLVOIDMETHOD_ENTRY( - env, obj, (uintptr_t) methodID); + HOTSPOT_JNI_CALLVOIDMETHOD_ENTRY(env, obj, (uintptr_t) methodID); #endif /* USDT2 */ DT_VOID_RETURN_MARK(CallVoidMethod); @@ -1915,8 +1870,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, CallVoidMethodV__entry, env, obj, methodID); #else /* USDT2 */ - HOTSPOT_JNI_CALLVOIDMETHODV_ENTRY( - env, obj, (uintptr_t) methodID); + HOTSPOT_JNI_CALLVOIDMETHODV_ENTRY(env, obj, (uintptr_t) methodID); #endif /* USDT2 */ DT_VOID_RETURN_MARK(CallVoidMethodV); @@ -1931,8 +1885,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, CallVoidMethodA__entry, env, obj, methodID); #else /* USDT2 */ - HOTSPOT_JNI_CALLVOIDMETHODA_ENTRY( - env, obj, (uintptr_t) methodID); + HOTSPOT_JNI_CALLVOIDMETHODA_ENTRY(env, obj, (uintptr_t) methodID); #endif /* USDT2 */ DT_VOID_RETURN_MARK(CallVoidMethodA); @@ -2194,8 +2147,7 @@ DTRACE_PROBE4(hotspot_jni, CallNonvirtualVoidMethod__entry, env, obj, cls, methodID); #else /* USDT2 */ - HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHOD_ENTRY( - env, obj, cls, (uintptr_t) methodID); + HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHOD_ENTRY(env, obj, cls, (uintptr_t) methodID); #endif /* USDT2 */ DT_VOID_RETURN_MARK(CallNonvirtualVoidMethod); @@ -2496,8 +2448,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, CallStaticVoidMethod__entry, env, cls, methodID); #else /* USDT2 */ - HOTSPOT_JNI_CALLSTATICVOIDMETHOD_ENTRY( - env, cls, (uintptr_t) methodID); + HOTSPOT_JNI_CALLSTATICVOIDMETHOD_ENTRY(env, cls, (uintptr_t) methodID); #endif /* USDT2 */ DT_VOID_RETURN_MARK(CallStaticVoidMethod); @@ -2515,8 +2466,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, CallStaticVoidMethodV__entry, env, cls, methodID); #else /* USDT2 */ - HOTSPOT_JNI_CALLSTATICVOIDMETHODV_ENTRY( - env, cls, (uintptr_t) methodID); + HOTSPOT_JNI_CALLSTATICVOIDMETHODV_ENTRY(env, cls, (uintptr_t) methodID); #endif /* USDT2 */ DT_VOID_RETURN_MARK(CallStaticVoidMethodV); @@ -2531,8 +2481,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, CallStaticVoidMethodA__entry, env, cls, methodID); #else /* USDT2 */ - HOTSPOT_JNI_CALLSTATICVOIDMETHODA_ENTRY( - env, cls, (uintptr_t) methodID); + HOTSPOT_JNI_CALLSTATICVOIDMETHODA_ENTRY(env, cls, (uintptr_t) methodID); #endif /* USDT2 */ DT_VOID_RETURN_MARK(CallStaticVoidMethodA); @@ -2560,8 +2509,7 @@ #ifndef USDT2 DTRACE_PROBE4(hotspot_jni, GetFieldID__entry, env, clazz, name, sig); #else /* USDT2 */ - HOTSPOT_JNI_GETFIELDID_ENTRY( - env, clazz, (char *) name, (char *) sig); + HOTSPOT_JNI_GETFIELDID_ENTRY(env, clazz, (char *) name, (char *) sig); #endif /* USDT2 */ jfieldID ret = 0; DT_RETURN_MARK(GetFieldID, jfieldID, (const jfieldID&)ret); @@ -2597,8 +2545,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, GetObjectField__entry, env, obj, fieldID); #else /* USDT2 */ - HOTSPOT_JNI_GETOBJECTFIELD_ENTRY( - env, obj, (uintptr_t) fieldID); + HOTSPOT_JNI_GETOBJECTFIELD_ENTRY(env, obj, (uintptr_t) fieldID); #endif /* USDT2 */ oop o = JNIHandles::resolve_non_null(obj); Klass* k = o->klass(); @@ -2632,8 +2579,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetObjectField__return, ret); #else /* USDT2 */ -HOTSPOT_JNI_GETOBJECTFIELD_RETURN( - ret); +HOTSPOT_JNI_GETOBJECTFIELD_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -2758,8 +2704,7 @@ #ifndef USDT2 DTRACE_PROBE4(hotspot_jni, SetObjectField__entry, env, obj, fieldID, value); #else /* USDT2 */ - HOTSPOT_JNI_SETOBJECTFIELD_ENTRY( - env, obj, (uintptr_t) fieldID, value); + HOTSPOT_JNI_SETOBJECTFIELD_ENTRY(env, obj, (uintptr_t) fieldID, value); #endif /* USDT2 */ oop o = JNIHandles::resolve_non_null(obj); Klass* k = o->klass(); @@ -2776,8 +2721,7 @@ #ifndef USDT2 DTRACE_PROBE(hotspot_jni, SetObjectField__return); #else /* USDT2 */ - HOTSPOT_JNI_SETOBJECTFIELD_RETURN( -); + HOTSPOT_JNI_SETOBJECTFIELD_RETURN(); #endif /* USDT2 */ JNI_END @@ -2880,8 +2824,7 @@ DTRACE_PROBE4(hotspot_jni, ToReflectedField__entry, env, cls, fieldID, isStatic); #else /* USDT2 */ - HOTSPOT_JNI_TOREFLECTEDFIELD_ENTRY( - env, cls, (uintptr_t) fieldID, isStatic); + HOTSPOT_JNI_TOREFLECTEDFIELD_ENTRY(env, cls, (uintptr_t) fieldID, isStatic); #endif /* USDT2 */ jobject ret = NULL; DT_RETURN_MARK(ToReflectedField, jobject, (const jobject&)ret); @@ -2925,8 +2868,7 @@ #ifndef USDT2 DTRACE_PROBE4(hotspot_jni, GetStaticFieldID__entry, env, clazz, name, sig); #else /* USDT2 */ - HOTSPOT_JNI_GETSTATICFIELDID_ENTRY( - env, clazz, (char *) name, (char *) sig); + HOTSPOT_JNI_GETSTATICFIELDID_ENTRY(env, clazz, (char *) name, (char *) sig); #endif /* USDT2 */ jfieldID ret = NULL; DT_RETURN_MARK(GetStaticFieldID, jfieldID, (const jfieldID&)ret); @@ -2966,8 +2908,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, GetStaticObjectField__entry, env, clazz, fieldID); #else /* USDT2 */ - HOTSPOT_JNI_GETSTATICOBJECTFIELD_ENTRY( - env, clazz, (uintptr_t) fieldID); + HOTSPOT_JNI_GETSTATICOBJECTFIELD_ENTRY(env, clazz, (uintptr_t) fieldID); #endif /* USDT2 */ #if INCLUDE_JNI_CHECK DEBUG_ONLY(Klass* param_k = jniCheck::validate_class(thread, clazz);) @@ -2983,8 +2924,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetStaticObjectField__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_GETSTATICOBJECTFIELD_RETURN( - ret); + HOTSPOT_JNI_GETSTATICOBJECTFIELD_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -3069,8 +3009,7 @@ #ifndef USDT2 DTRACE_PROBE4(hotspot_jni, SetStaticObjectField__entry, env, clazz, fieldID, value); #else /* USDT2 */ - HOTSPOT_JNI_SETSTATICOBJECTFIELD_ENTRY( - env, clazz, (uintptr_t) fieldID, value); + HOTSPOT_JNI_SETSTATICOBJECTFIELD_ENTRY(env, clazz, (uintptr_t) fieldID, value); #endif /* USDT2 */ JNIid* id = jfieldIDWorkaround::from_static_jfieldID(fieldID); assert(id->is_static_field_id(), "invalid static field id"); @@ -3085,8 +3024,7 @@ #ifndef USDT2 DTRACE_PROBE(hotspot_jni, SetStaticObjectField__return); #else /* USDT2 */ - HOTSPOT_JNI_SETSTATICOBJECTFIELD_RETURN( - ); + HOTSPOT_JNI_SETSTATICOBJECTFIELD_RETURN(); #endif /* USDT2 */ JNI_END @@ -3189,8 +3127,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, NewString__entry, env, unicodeChars, len); #else /* USDT2 */ - HOTSPOT_JNI_NEWSTRING_ENTRY( - env, (uint16_t *) unicodeChars, len); + HOTSPOT_JNI_NEWSTRING_ENTRY(env, (uint16_t *) unicodeChars, len); #endif /* USDT2 */ jstring ret = NULL; DT_RETURN_MARK(NewString, jstring, (const jstring&)ret); @@ -3205,8 +3142,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, GetStringLength__entry, env, string); #else /* USDT2 */ - HOTSPOT_JNI_GETSTRINGLENGTH_ENTRY( - env, string); + HOTSPOT_JNI_GETSTRINGLENGTH_ENTRY(env, string); #endif /* USDT2 */ jsize ret = 0; oop s = JNIHandles::resolve_non_null(string); @@ -3216,8 +3152,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetStringLength__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_GETSTRINGLENGTH_RETURN( - ret); + HOTSPOT_JNI_GETSTRINGLENGTH_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -3229,8 +3164,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, GetStringChars__entry, env, string, isCopy); #else /* USDT2 */ - HOTSPOT_JNI_GETSTRINGCHARS_ENTRY( - env, string, (uintptr_t *) isCopy); + HOTSPOT_JNI_GETSTRINGCHARS_ENTRY(env, string, (uintptr_t *) isCopy); #endif /* USDT2 */ jchar* buf = NULL; oop s = JNIHandles::resolve_non_null(string); @@ -3254,8 +3188,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetStringChars__return, buf); #else /* USDT2 */ - HOTSPOT_JNI_GETSTRINGCHARS_RETURN( - buf); + HOTSPOT_JNI_GETSTRINGCHARS_RETURN(buf); #endif /* USDT2 */ return buf; JNI_END @@ -3266,8 +3199,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, ReleaseStringChars__entry, env, str, chars); #else /* USDT2 */ - HOTSPOT_JNI_RELEASESTRINGCHARS_ENTRY( - env, str, (uint16_t *) chars); + HOTSPOT_JNI_RELEASESTRINGCHARS_ENTRY(env, str, (uint16_t *) chars); #endif /* USDT2 */ //%note jni_6 if (chars != NULL) { @@ -3278,8 +3210,7 @@ #ifndef USDT2 DTRACE_PROBE(hotspot_jni, ReleaseStringChars__return); #else /* USDT2 */ - HOTSPOT_JNI_RELEASESTRINGCHARS_RETURN( -); + HOTSPOT_JNI_RELEASESTRINGCHARS_RETURN(); #endif /* USDT2 */ JNI_END @@ -3298,8 +3229,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, NewStringUTF__entry, env, bytes); #else /* USDT2 */ - HOTSPOT_JNI_NEWSTRINGUTF_ENTRY( - env, (char *) bytes); + HOTSPOT_JNI_NEWSTRINGUTF_ENTRY(env, (char *) bytes); #endif /* USDT2 */ jstring ret; DT_RETURN_MARK(NewStringUTF, jstring, (const jstring&)ret); @@ -3315,8 +3245,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, GetStringUTFLength__entry, env, string); #else /* USDT2 */ - HOTSPOT_JNI_GETSTRINGUTFLENGTH_ENTRY( - env, string); + HOTSPOT_JNI_GETSTRINGUTFLENGTH_ENTRY(env, string); #endif /* USDT2 */ jsize ret = 0; oop java_string = JNIHandles::resolve_non_null(string); @@ -3326,8 +3255,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetStringUTFLength__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_GETSTRINGUTFLENGTH_RETURN( - ret); + HOTSPOT_JNI_GETSTRINGUTFLENGTH_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -3338,8 +3266,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, GetStringUTFChars__entry, env, string, isCopy); #else /* USDT2 */ - HOTSPOT_JNI_GETSTRINGUTFCHARS_ENTRY( - env, string, (uintptr_t *) isCopy); + HOTSPOT_JNI_GETSTRINGUTFCHARS_ENTRY(env, string, (uintptr_t *) isCopy); #endif /* USDT2 */ char* result = NULL; oop java_string = JNIHandles::resolve_non_null(string); @@ -3357,8 +3284,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetStringUTFChars__return, result); #else /* USDT2 */ - HOTSPOT_JNI_GETSTRINGUTFCHARS_RETURN( - result); + HOTSPOT_JNI_GETSTRINGUTFCHARS_RETURN(result); #endif /* USDT2 */ return result; JNI_END @@ -3369,8 +3295,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, ReleaseStringUTFChars__entry, env, str, chars); #else /* USDT2 */ - HOTSPOT_JNI_RELEASESTRINGUTFCHARS_ENTRY( - env, str, (char *) chars); + HOTSPOT_JNI_RELEASESTRINGUTFCHARS_ENTRY(env, str, (char *) chars); #endif /* USDT2 */ if (chars != NULL) { FreeHeap((char*) chars); @@ -3378,8 +3303,7 @@ #ifndef USDT2 DTRACE_PROBE(hotspot_jni, ReleaseStringUTFChars__return); #else /* USDT2 */ -HOTSPOT_JNI_RELEASESTRINGUTFCHARS_RETURN( -); +HOTSPOT_JNI_RELEASESTRINGUTFCHARS_RETURN(); #endif /* USDT2 */ JNI_END @@ -3389,8 +3313,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, GetArrayLength__entry, env, array); #else /* USDT2 */ - HOTSPOT_JNI_GETARRAYLENGTH_ENTRY( - env, array); + HOTSPOT_JNI_GETARRAYLENGTH_ENTRY(env, array); #endif /* USDT2 */ arrayOop a = arrayOop(JNIHandles::resolve_non_null(array)); assert(a->is_array(), "must be array"); @@ -3398,8 +3321,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetArrayLength__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_GETARRAYLENGTH_RETURN( - ret); + HOTSPOT_JNI_GETARRAYLENGTH_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -3421,8 +3343,7 @@ #ifndef USDT2 DTRACE_PROBE4(hotspot_jni, NewObjectArray__entry, env, length, elementClass, initialElement); #else /* USDT2 */ - HOTSPOT_JNI_NEWOBJECTARRAY_ENTRY( - env, length, elementClass, initialElement); + HOTSPOT_JNI_NEWOBJECTARRAY_ENTRY(env, length, elementClass, initialElement); #endif /* USDT2 */ jobjectArray ret = NULL; DT_RETURN_MARK(NewObjectArray, jobjectArray, (const jobjectArray&)ret); @@ -3453,8 +3374,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, GetObjectArrayElement__entry, env, array, index); #else /* USDT2 */ - HOTSPOT_JNI_GETOBJECTARRAYELEMENT_ENTRY( - env, array, index); + HOTSPOT_JNI_GETOBJECTARRAYELEMENT_ENTRY(env, array, index); #endif /* USDT2 */ jobject ret = NULL; DT_RETURN_MARK(GetObjectArrayElement, jobject, (const jobject&)ret); @@ -3481,8 +3401,7 @@ #ifndef USDT2 DTRACE_PROBE4(hotspot_jni, SetObjectArrayElement__entry, env, array, index, value); #else /* USDT2 */ - HOTSPOT_JNI_SETOBJECTARRAYELEMENT_ENTRY( - env, array, index, value); + HOTSPOT_JNI_SETOBJECTARRAYELEMENT_ENTRY(env, array, index, value); #endif /* USDT2 */ DT_VOID_RETURN_MARK(SetObjectArrayElement); @@ -4034,8 +3953,7 @@ #ifndef USDT2 DTRACE_PROBE4(hotspot_jni, RegisterNatives__entry, env, clazz, methods, nMethods); #else /* USDT2 */ - HOTSPOT_JNI_REGISTERNATIVES_ENTRY( - env, clazz, (void *) methods, nMethods); + HOTSPOT_JNI_REGISTERNATIVES_ENTRY(env, clazz, (void *) methods, nMethods); #endif /* USDT2 */ jint ret = 0; DT_RETURN_MARK(RegisterNatives, jint, (const jint&)ret); @@ -4077,8 +3995,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, UnregisterNatives__entry, env, clazz); #else /* USDT2 */ - HOTSPOT_JNI_UNREGISTERNATIVES_ENTRY( - env, clazz); + HOTSPOT_JNI_UNREGISTERNATIVES_ENTRY(env, clazz); #endif /* USDT2 */ Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(clazz)); //%note jni_2 @@ -4094,8 +4011,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, UnregisterNatives__return, 0); #else /* USDT2 */ - HOTSPOT_JNI_UNREGISTERNATIVES_RETURN( - 0); + HOTSPOT_JNI_UNREGISTERNATIVES_RETURN(0); #endif /* USDT2 */ return 0; JNI_END @@ -4115,8 +4031,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, MonitorEnter__entry, env, jobj); #else /* USDT2 */ - HOTSPOT_JNI_MONITORENTER_ENTRY( - env, jobj); + HOTSPOT_JNI_MONITORENTER_ENTRY(env, jobj); #endif /* USDT2 */ jint ret = JNI_ERR; DT_RETURN_MARK(MonitorEnter, jint, (const jint&)ret); @@ -4143,8 +4058,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, MonitorExit__entry, env, jobj); #else /* USDT2 */ - HOTSPOT_JNI_MONITOREXIT_ENTRY( - env, jobj); + HOTSPOT_JNI_MONITOREXIT_ENTRY(env, jobj); #endif /* USDT2 */ jint ret = JNI_ERR; DT_RETURN_MARK(MonitorExit, jint, (const jint&)ret); @@ -4177,8 +4091,7 @@ #ifndef USDT2 DTRACE_PROBE5(hotspot_jni, GetStringRegion__entry, env, string, start, len, buf); #else /* USDT2 */ - HOTSPOT_JNI_GETSTRINGREGION_ENTRY( - env, string, start, len, buf); + HOTSPOT_JNI_GETSTRINGREGION_ENTRY(env, string, start, len, buf); #endif /* USDT2 */ DT_VOID_RETURN_MARK(GetStringRegion); oop s = JNIHandles::resolve_non_null(string); @@ -4206,8 +4119,7 @@ #ifndef USDT2 DTRACE_PROBE5(hotspot_jni, GetStringUTFRegion__entry, env, string, start, len, buf); #else /* USDT2 */ - HOTSPOT_JNI_GETSTRINGUTFREGION_ENTRY( - env, string, start, len, buf); + HOTSPOT_JNI_GETSTRINGUTFREGION_ENTRY(env, string, start, len, buf); #endif /* USDT2 */ DT_VOID_RETURN_MARK(GetStringUTFRegion); oop s = JNIHandles::resolve_non_null(string); @@ -4237,8 +4149,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, GetPrimitiveArrayCritical__entry, env, array, isCopy); #else /* USDT2 */ - HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_ENTRY( - env, array, (uintptr_t *) isCopy); + HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_ENTRY(env, array, (uintptr_t *) isCopy); #endif /* USDT2 */ GC_locker::lock_critical(thread); if (isCopy != NULL) { @@ -4256,8 +4167,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetPrimitiveArrayCritical__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_RETURN( - ret); + HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -4268,16 +4178,14 @@ #ifndef USDT2 DTRACE_PROBE4(hotspot_jni, ReleasePrimitiveArrayCritical__entry, env, array, carray, mode); #else /* USDT2 */ - HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY( - env, array, carray, mode); + HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY(env, array, carray, mode); #endif /* USDT2 */ // The array, carray and mode arguments are ignored GC_locker::unlock_critical(thread); #ifndef USDT2 DTRACE_PROBE(hotspot_jni, ReleasePrimitiveArrayCritical__return); #else /* USDT2 */ -HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN( -); +HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN(); #endif /* USDT2 */ JNI_END @@ -4287,8 +4195,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, GetStringCritical__entry, env, string, isCopy); #else /* USDT2 */ - HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY( - env, string, (uintptr_t *) isCopy); + HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY(env, string, (uintptr_t *) isCopy); #endif /* USDT2 */ GC_locker::lock_critical(thread); if (isCopy != NULL) { @@ -4307,8 +4214,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetStringCritical__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_GETSTRINGCRITICAL_RETURN( - (uint16_t *) ret); + HOTSPOT_JNI_GETSTRINGCRITICAL_RETURN((uint16_t *) ret); #endif /* USDT2 */ return ret; JNI_END @@ -4319,16 +4225,14 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, ReleaseStringCritical__entry, env, str, chars); #else /* USDT2 */ - HOTSPOT_JNI_RELEASESTRINGCRITICAL_ENTRY( - env, str, (uint16_t *) chars); + HOTSPOT_JNI_RELEASESTRINGCRITICAL_ENTRY(env, str, (uint16_t *) chars); #endif /* USDT2 */ // The str and chars arguments are ignored GC_locker::unlock_critical(thread); #ifndef USDT2 DTRACE_PROBE(hotspot_jni, ReleaseStringCritical__return); #else /* USDT2 */ -HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN( -); +HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN(); #endif /* USDT2 */ JNI_END @@ -4338,16 +4242,14 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, NewWeakGlobalRef__entry, env, ref); #else /* USDT2 */ - HOTSPOT_JNI_NEWWEAKGLOBALREF_ENTRY( - env, ref); + HOTSPOT_JNI_NEWWEAKGLOBALREF_ENTRY(env, ref); #endif /* USDT2 */ Handle ref_handle(thread, JNIHandles::resolve(ref)); jweak ret = JNIHandles::make_weak_global(ref_handle); #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, NewWeakGlobalRef__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_NEWWEAKGLOBALREF_RETURN( - ret); + HOTSPOT_JNI_NEWWEAKGLOBALREF_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -4358,15 +4260,13 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, DeleteWeakGlobalRef__entry, env, ref); #else /* USDT2 */ - HOTSPOT_JNI_DELETEWEAKGLOBALREF_ENTRY( - env, ref); + HOTSPOT_JNI_DELETEWEAKGLOBALREF_ENTRY(env, ref); #endif /* USDT2 */ JNIHandles::destroy_weak_global(ref); #ifndef USDT2 DTRACE_PROBE(hotspot_jni, DeleteWeakGlobalRef__return); #else /* USDT2 */ - HOTSPOT_JNI_DELETEWEAKGLOBALREF_RETURN( - ); + HOTSPOT_JNI_DELETEWEAKGLOBALREF_RETURN(); #endif /* USDT2 */ JNI_END @@ -4376,16 +4276,14 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, ExceptionCheck__entry, env); #else /* USDT2 */ - HOTSPOT_JNI_EXCEPTIONCHECK_ENTRY( - env); + HOTSPOT_JNI_EXCEPTIONCHECK_ENTRY(env); #endif /* USDT2 */ jni_check_async_exceptions(thread); jboolean ret = (thread->has_pending_exception()) ? JNI_TRUE : JNI_FALSE; #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, ExceptionCheck__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_EXCEPTIONCHECK_RETURN( - ret); + HOTSPOT_JNI_EXCEPTIONCHECK_RETURN(ret); #endif /* USDT2 */ return ret; JNI_END @@ -4481,8 +4379,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, NewDirectByteBuffer__entry, env, address, capacity); #else /* USDT2 */ - HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_ENTRY( - env, address, capacity); + HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_ENTRY(env, address, capacity); #endif /* USDT2 */ if (!directBufferSupportInitializeEnded) { @@ -4490,8 +4387,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, NewDirectByteBuffer__return, NULL); #else /* USDT2 */ - HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN( - NULL); + HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN(NULL); #endif /* USDT2 */ return NULL; } @@ -4506,8 +4402,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, NewDirectByteBuffer__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN( - ret); + HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN(ret); #endif /* USDT2 */ return ret; } @@ -4528,8 +4423,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, GetDirectBufferAddress__entry, env, buf); #else /* USDT2 */ - HOTSPOT_JNI_GETDIRECTBUFFERADDRESS_ENTRY( - env, buf); + HOTSPOT_JNI_GETDIRECTBUFFERADDRESS_ENTRY(env, buf); #endif /* USDT2 */ void* ret = NULL; DT_RETURN_MARK(GetDirectBufferAddress, void*, (const void*&)ret); @@ -4564,8 +4458,7 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, GetDirectBufferCapacity__entry, env, buf); #else /* USDT2 */ - HOTSPOT_JNI_GETDIRECTBUFFERCAPACITY_ENTRY( - env, buf); + HOTSPOT_JNI_GETDIRECTBUFFERCAPACITY_ENTRY(env, buf); #endif /* USDT2 */ jlong ret = -1; DT_RETURN_MARK(GetDirectBufferCapacity, jlong, (const jlong&)ret); @@ -4596,14 +4489,12 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetVersion__entry, env); #else /* USDT2 */ - HOTSPOT_JNI_GETVERSION_ENTRY( - env); + HOTSPOT_JNI_GETVERSION_ENTRY(env); #endif /* USDT2 */ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetVersion__return, CurrentVersion); #else /* USDT2 */ - HOTSPOT_JNI_GETVERSION_RETURN( - CurrentVersion); + HOTSPOT_JNI_GETVERSION_RETURN(CurrentVersion); #endif /* USDT2 */ return CurrentVersion; JNI_END @@ -4615,15 +4506,13 @@ #ifndef USDT2 DTRACE_PROBE2(hotspot_jni, GetJavaVM__entry, env, vm); #else /* USDT2 */ - HOTSPOT_JNI_GETJAVAVM_ENTRY( - env, (void **) vm); + HOTSPOT_JNI_GETJAVAVM_ENTRY(env, (void **) vm); #endif /* USDT2 */ *vm = (JavaVM *)(&main_vm); #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetJavaVM__return, JNI_OK); #else /* USDT2 */ - HOTSPOT_JNI_GETJAVAVM_RETURN( - JNI_OK); + HOTSPOT_JNI_GETJAVAVM_RETURN(JNI_OK); #endif /* USDT2 */ return JNI_OK; JNI_END @@ -5014,8 +4903,7 @@ #ifndef USDT2 HS_DTRACE_PROBE1(hotspot_jni, GetDefaultJavaVMInitArgs__entry, args_); #else /* USDT2 */ - HOTSPOT_JNI_GETDEFAULTJAVAVMINITARGS_ENTRY( - args_); + HOTSPOT_JNI_GETDEFAULTJAVAVMINITARGS_ENTRY(args_); #endif /* USDT2 */ JDK1_1InitArgs *args = (JDK1_1InitArgs *)args_; jint ret = JNI_ERR; @@ -5061,6 +4949,7 @@ void TestMetaspaceAux_test(); void TestMetachunk_test(); void TestVirtualSpaceNode_test(); +void TestOldFreeSpaceCalculation_test(); #if INCLUDE_ALL_GCS void TestG1BiasedArray_test(); #endif @@ -5081,6 +4970,7 @@ run_unit_test(QuickSort::test_quick_sort()); run_unit_test(AltHashing::test_alt_hash()); run_unit_test(test_loggc_filename()); + run_unit_test(TestOldFreeSpaceCalculation_test()); #if INCLUDE_VM_STRUCTS run_unit_test(VMStructs::test()); #endif @@ -5108,8 +4998,7 @@ #ifndef USDT2 HS_DTRACE_PROBE3(hotspot_jni, CreateJavaVM__entry, vm, penv, args); #else /* USDT2 */ - HOTSPOT_JNI_CREATEJAVAVM_ENTRY( - (void **) vm, penv, args); + HOTSPOT_JNI_CREATEJAVAVM_ENTRY((void **) vm, penv, args); #endif /* USDT2 */ jint result = JNI_ERR; @@ -5166,6 +5055,7 @@ result = Threads::create_vm((JavaVMInitArgs*) args, &can_try_again); if (result == JNI_OK) { JavaThread *thread = JavaThread::current(); + assert(!thread->has_pending_exception(), "should have returned not OK"); /* thread is thread_in_vm here */ *vm = (JavaVM *)(&main_vm); *(JNIEnv**)penv = thread->jni_environment(); @@ -5202,6 +5092,19 @@ // Since this is not a JVM_ENTRY we have to set the thread state manually before leaving. ThreadStateTransition::transition_and_fence(thread, _thread_in_vm, _thread_in_native); } else { + // If create_vm exits because of a pending exception, exit with that + // exception. In the future when we figure out how to reclaim memory, + // we may be able to exit with JNI_ERR and allow the calling application + // to continue. + if (Universe::is_fully_initialized()) { + // otherwise no pending exception possible - VM will already have aborted + JavaThread* THREAD = JavaThread::current(); + if (HAS_PENDING_EXCEPTION) { + HandleMark hm; + vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION)); + } + } + if (can_try_again) { // reset safe_to_recreate_vm to 1 so that retrial would be possible safe_to_recreate_vm = 1; @@ -5231,8 +5134,7 @@ HS_DTRACE_PROBE3(hotspot_jni, GetCreatedJavaVMs__entry, \ vm_buf, bufLen, numVMs); #else /* USDT2 */ - HOTSPOT_JNI_GETCREATEDJAVAVMS_ENTRY( - (void **) vm_buf, bufLen, (uintptr_t *) numVMs); + HOTSPOT_JNI_GETCREATEDJAVAVMS_ENTRY((void **) vm_buf, bufLen, (uintptr_t *) numVMs); #endif /* USDT2 */ if (vm_created) { if (numVMs != NULL) *numVMs = 1; @@ -5243,8 +5145,7 @@ #ifndef USDT2 HS_DTRACE_PROBE1(hotspot_jni, GetCreatedJavaVMs__return, JNI_OK); #else /* USDT2 */ - HOTSPOT_JNI_GETCREATEDJAVAVMS_RETURN( - JNI_OK); + HOTSPOT_JNI_GETCREATEDJAVAVMS_RETURN(JNI_OK); #endif /* USDT2 */ return JNI_OK; } @@ -5262,8 +5163,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, DestroyJavaVM__entry, vm); #else /* USDT2 */ - HOTSPOT_JNI_DESTROYJAVAVM_ENTRY( - vm); + HOTSPOT_JNI_DESTROYJAVAVM_ENTRY(vm); #endif /* USDT2 */ jint res = JNI_ERR; DT_RETURN_MARK(DestroyJavaVM, jint, (const jint&)res); @@ -5419,15 +5319,13 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, AttachCurrentThread__entry, vm, penv, _args); #else /* USDT2 */ - HOTSPOT_JNI_ATTACHCURRENTTHREAD_ENTRY( - vm, penv, _args); + HOTSPOT_JNI_ATTACHCURRENTTHREAD_ENTRY(vm, penv, _args); #endif /* USDT2 */ if (!vm_created) { #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, AttachCurrentThread__return, JNI_ERR); #else /* USDT2 */ - HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN( - (uint32_t) JNI_ERR); + HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN((uint32_t) JNI_ERR); #endif /* USDT2 */ return JNI_ERR; } @@ -5437,8 +5335,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, AttachCurrentThread__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN( - ret); + HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN(ret); #endif /* USDT2 */ return ret; } @@ -5448,8 +5345,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, DetachCurrentThread__entry, vm); #else /* USDT2 */ - HOTSPOT_JNI_DETACHCURRENTTHREAD_ENTRY( - vm); + HOTSPOT_JNI_DETACHCURRENTTHREAD_ENTRY(vm); #endif /* USDT2 */ VM_Exit::block_if_vm_exited(); @@ -5460,8 +5356,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, DetachCurrentThread__return, JNI_OK); #else /* USDT2 */ - HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN( - JNI_OK); + HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(JNI_OK); #endif /* USDT2 */ return JNI_OK; } @@ -5471,8 +5366,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, DetachCurrentThread__return, JNI_ERR); #else /* USDT2 */ - HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN( - (uint32_t) JNI_ERR); + HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN((uint32_t) JNI_ERR); #endif /* USDT2 */ // Can't detach a thread that's running java, that can't work. return JNI_ERR; @@ -5497,8 +5391,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, DetachCurrentThread__return, JNI_OK); #else /* USDT2 */ - HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN( - JNI_OK); + HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(JNI_OK); #endif /* USDT2 */ return JNI_OK; } @@ -5514,8 +5407,7 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, GetEnv__entry, vm, penv, version); #else /* USDT2 */ - HOTSPOT_JNI_GETENV_ENTRY( - vm, penv, version); + HOTSPOT_JNI_GETENV_ENTRY(vm, penv, version); #endif /* USDT2 */ jint ret = JNI_ERR; DT_RETURN_MARK(GetEnv, jint, (const jint&)ret); @@ -5573,15 +5465,13 @@ #ifndef USDT2 DTRACE_PROBE3(hotspot_jni, AttachCurrentThreadAsDaemon__entry, vm, penv, _args); #else /* USDT2 */ - HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_ENTRY( - vm, penv, _args); + HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_ENTRY(vm, penv, _args); #endif /* USDT2 */ if (!vm_created) { #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, AttachCurrentThreadAsDaemon__return, JNI_ERR); #else /* USDT2 */ - HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN( - (uint32_t) JNI_ERR); + HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN((uint32_t) JNI_ERR); #endif /* USDT2 */ return JNI_ERR; } @@ -5591,8 +5481,7 @@ #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, AttachCurrentThreadAsDaemon__return, ret); #else /* USDT2 */ - HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN( - ret); + HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN(ret); #endif /* USDT2 */ return ret; } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/prims/jvmtiEnvThreadState.cpp --- a/src/share/vm/prims/jvmtiEnvThreadState.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/prims/jvmtiEnvThreadState.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -272,7 +272,7 @@ // There can be a race condition between a VM_Operation reaching a safepoint // and the target thread exiting from Java execution. // We must recheck the last Java frame still exists. - if (_thread->has_last_Java_frame()) { + if (!_thread->is_exiting() && _thread->has_last_Java_frame()) { javaVFrame* vf = _thread->last_java_vframe(&rm); assert(vf != NULL, "must have last java frame"); Method* method = vf->method(); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/prims/jvmtiRedefineClasses.cpp --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -147,6 +147,9 @@ _scratch_classes[i] = NULL; } + // Disable any dependent concurrent compilations + SystemDictionary::notice_modification(); + // Set flag indicating that some invariants are no longer true. // See jvmtiExport.hpp for detailed explanation. JvmtiExport::set_has_redefined_a_class(); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/advancedThresholdPolicy.cpp --- a/src/share/vm/runtime/advancedThresholdPolicy.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -306,7 +306,7 @@ * profiling can start at level 0 and finish at level 3. * * b. 0 -> 2 -> 3 -> 4. - * This case occures when the load on C2 is deemed too high. So, instead of transitioning + * This case occurs when the load on C2 is deemed too high. So, instead of transitioning * into state 3 directly and over-profiling while a method is in the C2 queue we transition to * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs. * diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/arguments.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -178,7 +178,7 @@ PropertyList_add(&_system_properties, new SystemProperty("java.vm.name", VM_Version::vm_name(), false)); PropertyList_add(&_system_properties, new SystemProperty("java.vm.info", VM_Version::vm_info_string(), true)); - // following are JVMTI agent writeable properties. + // Following are JVMTI agent writable properties. // Properties values are set to NULL and they are // os specific they are initialized in os::init_system_properties_values(). _java_ext_dirs = new SystemProperty("java.ext.dirs", NULL, true); @@ -878,7 +878,7 @@ arg_len = equal_sign - argname; } - Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true); + Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true, true); if (found_flag != NULL) { char locked_message_buf[BUFLEN]; found_flag->get_locked_message(locked_message_buf, BUFLEN); @@ -1306,7 +1306,7 @@ if (!FLAG_IS_DEFAULT(OldPLABSize)) { if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) { // OldPLABSize is not the default value but CMSParPromoteBlocksToClaim - // is. In this situtation let CMSParPromoteBlocksToClaim follow + // is. In this situation let CMSParPromoteBlocksToClaim follow // the value (either from the command line or ergonomics) of // OldPLABSize. Following OldPLABSize is an ergonomics decision. FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, OldPLABSize); @@ -1569,6 +1569,16 @@ vm_exit(1); } + if (UseAdaptiveSizePolicy) { + // We don't want to limit adaptive heap sizing's freedom to adjust the heap + // unless the user actually sets these flags. + if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) { + FLAG_SET_DEFAULT(MinHeapFreeRatio, 0); + } + if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) { + FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100); + } + } // If InitialSurvivorRatio or MinSurvivorRatio were not specified, but the // SurvivorRatio has been set, reset their default values to SurvivorRatio + @@ -1844,7 +1854,7 @@ } bool Arguments::verify_percentage(uintx value, const char* name) { - if (value <= 100) { + if (is_percentage(value)) { return true; } jio_fprintf(defaultStream::error_stream(), @@ -1932,6 +1942,34 @@ return count_p < 2 && count_t < 2; } +bool Arguments::verify_MinHeapFreeRatio(FormatBuffer<80>& err_msg, uintx min_heap_free_ratio) { + if (!is_percentage(min_heap_free_ratio)) { + err_msg.print("MinHeapFreeRatio must have a value between 0 and 100"); + return false; + } + if (min_heap_free_ratio > MaxHeapFreeRatio) { + err_msg.print("MinHeapFreeRatio (" UINTX_FORMAT ") must be less than or " + "equal to MaxHeapFreeRatio (" UINTX_FORMAT ")", min_heap_free_ratio, + MaxHeapFreeRatio); + return false; + } + return true; +} + +bool Arguments::verify_MaxHeapFreeRatio(FormatBuffer<80>& err_msg, uintx max_heap_free_ratio) { + if (!is_percentage(max_heap_free_ratio)) { + err_msg.print("MaxHeapFreeRatio must have a value between 0 and 100"); + return false; + } + if (max_heap_free_ratio < MinHeapFreeRatio) { + err_msg.print("MaxHeapFreeRatio (" UINTX_FORMAT ") must be greater than or " + "equal to MinHeapFreeRatio (" UINTX_FORMAT ")", max_heap_free_ratio, + MinHeapFreeRatio); + return false; + } + return true; +} + // Check consistency of GC selection bool Arguments::check_gc_consistency() { check_gclog_consistency(); @@ -2037,8 +2075,6 @@ status = status && verify_interval(AdaptiveSizePolicyWeight, 0, 100, "AdaptiveSizePolicyWeight"); status = status && verify_percentage(ThresholdTolerance, "ThresholdTolerance"); - status = status && verify_percentage(MinHeapFreeRatio, "MinHeapFreeRatio"); - status = status && verify_percentage(MaxHeapFreeRatio, "MaxHeapFreeRatio"); // Divide by bucket size to prevent a large size from causing rollover when // calculating amount of memory needed to be allocated for the String table. @@ -2048,15 +2084,19 @@ status = status && verify_interval(SymbolTableSize, minimumSymbolTableSize, (max_uintx / SymbolTable::bucket_size()), "SymbolTable size"); - if (MinHeapFreeRatio > MaxHeapFreeRatio) { - jio_fprintf(defaultStream::error_stream(), - "MinHeapFreeRatio (" UINTX_FORMAT ") must be less than or " - "equal to MaxHeapFreeRatio (" UINTX_FORMAT ")\n", - MinHeapFreeRatio, MaxHeapFreeRatio); - status = false; + { + // Using "else if" below to avoid printing two error messages if min > max. + // This will also prevent us from reporting both min>100 and max>100 at the + // same time, but that is less annoying than printing two identical errors IMHO. + FormatBuffer<80> err_msg(""); + if (!verify_MinHeapFreeRatio(err_msg, MinHeapFreeRatio)) { + jio_fprintf(defaultStream::error_stream(), "%s\n", err_msg.buffer()); + status = false; + } else if (!verify_MaxHeapFreeRatio(err_msg, MaxHeapFreeRatio)) { + jio_fprintf(defaultStream::error_stream(), "%s\n", err_msg.buffer()); + status = false; + } } - // Keeping the heap 100% free is hard ;-) so limit it to 99%. - MinHeapFreeRatio = MIN2(MinHeapFreeRatio, (uintx) 99); // Min/MaxMetaspaceFreeRatio status = status && verify_percentage(MinMetaspaceFreeRatio, "MinMetaspaceFreeRatio"); @@ -2689,7 +2729,7 @@ } else if (match_option(option, "-Xmaxf", &tail)) { char* err; int maxf = (int)(strtod(tail, &err) * 100); - if (*err != '\0' || maxf < 0 || maxf > 100) { + if (*err != '\0' || *tail == '\0' || maxf < 0 || maxf > 100) { jio_fprintf(defaultStream::error_stream(), "Bad max heap free percentage size: %s\n", option->optionString); @@ -2701,7 +2741,7 @@ } else if (match_option(option, "-Xminf", &tail)) { char* err; int minf = (int)(strtod(tail, &err) * 100); - if (*err != '\0' || minf < 0 || minf > 100) { + if (*err != '\0' || *tail == '\0' || minf < 0 || minf > 100) { jio_fprintf(defaultStream::error_stream(), "Bad min heap free percentage size: %s\n", option->optionString); @@ -3646,9 +3686,9 @@ // Set per-collector flags if (UseParallelGC || UseParallelOldGC) { set_parallel_gc_flags(); - } else if (UseConcMarkSweepGC) { // should be done before ParNew check below + } else if (UseConcMarkSweepGC) { // Should be done before ParNew check below set_cms_and_parnew_gc_flags(); - } else if (UseParNewGC) { // skipped if CMS is set above + } else if (UseParNewGC) { // Skipped if CMS is set above set_parnew_gc_flags(); } else if (UseG1GC) { set_g1_gc_flags(); @@ -3662,22 +3702,26 @@ " using -XX:ParallelGCThreads=N"); } } + if (MinHeapFreeRatio == 100) { + // Keeping the heap 100% free is hard ;-) so limit it to 99%. + FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99); + } #else // INCLUDE_ALL_GCS assert(verify_serial_gc_flags(), "SerialGC unset"); #endif // INCLUDE_ALL_GCS - // Initialize Metaspace flags and alignments. + // Initialize Metaspace flags and alignments Metaspace::ergo_initialize(); // Set bytecode rewriting flags set_bytecode_flags(); - // Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled. + // Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled set_aggressive_opts_flags(); // Turn off biased locking for locking debug mode flags, - // which are subtlely different from each other but neither works with - // biased locking. + // which are subtly different from each other but neither works with + // biased locking if (UseHeavyMonitors #ifdef COMPILER1 || !UseFastLocking @@ -3727,10 +3771,6 @@ // Doing the replace in parent maps helps speculation FLAG_SET_DEFAULT(ReplaceInParentMaps, true); } -#ifndef X86 - // Only on x86 for now - FLAG_SET_DEFAULT(TypeProfileLevel, 0); -#endif #endif if (PrintAssembly && FLAG_IS_DEFAULT(DebugNonSafepoints)) { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/arguments.hpp --- a/src/share/vm/runtime/arguments.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/arguments.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -27,6 +27,7 @@ #include "runtime/java.hpp" #include "runtime/perfData.hpp" +#include "utilities/debug.hpp" #include "utilities/top.hpp" // Arguments parses the command line and recognizes options @@ -370,11 +371,16 @@ static jint parse_vm_init_args(const JavaVMInitArgs* args); static jint parse_each_vm_init_arg(const JavaVMInitArgs* args, SysClassPath* scp_p, bool* scp_assembly_required_p, Flag::Flags origin); static jint finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_required); - static bool is_bad_option(const JavaVMOption* option, jboolean ignore, - const char* option_type); + static bool is_bad_option(const JavaVMOption* option, jboolean ignore, const char* option_type); + static bool is_bad_option(const JavaVMOption* option, jboolean ignore) { return is_bad_option(option, ignore, NULL); } + + static bool is_percentage(uintx val) { + return val <= 100; + } + static bool verify_interval(uintx val, uintx min, uintx max, const char* name); static bool verify_min_value(intx val, intx min, const char* name); @@ -440,11 +446,20 @@ static jint apply_ergo(); // Adjusts the arguments after the OS have adjusted the arguments static jint adjust_after_os(); + + // Verifies that the given value will fit as a MinHeapFreeRatio. If not, an error + // message is returned in the provided buffer. + static bool verify_MinHeapFreeRatio(FormatBuffer<80>& err_msg, uintx min_heap_free_ratio); + + // Verifies that the given value will fit as a MaxHeapFreeRatio. If not, an error + // message is returned in the provided buffer. + static bool verify_MaxHeapFreeRatio(FormatBuffer<80>& err_msg, uintx max_heap_free_ratio); + // Check for consistency in the selection of the garbage collector. static bool check_gc_consistency(); static void check_deprecated_gcs(); static void check_deprecated_gc_flags(); - // Check consistecy or otherwise of VM argument settings + // Check consistency or otherwise of VM argument settings static bool check_vm_args_consistency(); // Check stack pages settings static bool check_stack_pages(); @@ -494,7 +509,7 @@ // -Xprof static bool has_profile() { return _has_profile; } - // -Xms, -Xmx + // -Xms static uintx min_heap_size() { return _min_heap_size; } static void set_min_heap_size(uintx v) { _min_heap_size = v; } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/compilationPolicy.cpp --- a/src/share/vm/runtime/compilationPolicy.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/compilationPolicy.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -233,7 +233,7 @@ } void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) { - // Delay next back-branch event but pump up invocation counter to triger + // Delay next back-branch event but pump up invocation counter to trigger // whole method compilation. MethodCounters* mcs = m->method_counters(); assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); @@ -251,7 +251,7 @@ // // CounterDecay // -// Interates through invocation counters and decrements them. This +// Iterates through invocation counters and decrements them. This // is done at each safepoint. // class CounterDecay : public AllStatic { @@ -321,7 +321,7 @@ } // This method can be called by any component of the runtime to notify the policy -// that it's recommended to delay the complation of this method. +// that it's recommended to delay the compilation of this method. void NonTieredCompPolicy::delay_compilation(Method* method) { MethodCounters* mcs = method->method_counters(); if (mcs != NULL) { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/compilationPolicy.hpp --- a/src/share/vm/runtime/compilationPolicy.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/compilationPolicy.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -72,7 +72,7 @@ // reprofile request virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) = 0; // delay_compilation(method) can be called by any component of the runtime to notify the policy - // that it's recommended to delay the complation of this method. + // that it's recommended to delay the compilation of this method. virtual void delay_compilation(Method* method) = 0; // disable_compilation() is called whenever the runtime decides to disable compilation of the // specified method. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/deoptimization.cpp --- a/src/share/vm/runtime/deoptimization.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/deoptimization.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -380,7 +380,7 @@ frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller - // It's possible that the number of paramters at the call site is + // It's possible that the number of parameters at the call site is // different than number of arguments in the callee when method // handles are used. If the caller is interpreted get the real // value so that the proper amount of space can be added to it's @@ -540,7 +540,7 @@ // popframe condition bit set, we should always clear it now thread->clear_popframe_condition(); #else - // C++ interpeter will clear has_pending_popframe when it enters + // C++ interpreter will clear has_pending_popframe when it enters // with method_resume. For deopt_resume2 we clear it now. if (thread->popframe_forcing_deopt_reexecution()) thread->clear_popframe_condition(); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/deoptimization.hpp --- a/src/share/vm/runtime/deoptimization.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/deoptimization.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -206,7 +206,7 @@ // Called by assembly stub after execution has returned to // deoptimized frame and after the stack unrolling. // @argument thread. Thread where stub_frame resides. - // @argument exec_mode. Determines how execution should be continuted in top frame. + // @argument exec_mode. Determines how execution should be continued in top frame. // 0 means continue after current byte code // 1 means exception has happened, handle exception // 2 means reexecute current bytecode (for uncommon traps). diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/frame.cpp --- a/src/share/vm/runtime/frame.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/frame.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -649,7 +649,7 @@ #endif } -// Return whether the frame is in the VM or os indicating a Hotspot problem. +// Print whether the frame is in the VM or OS indicating a HotSpot problem. // Otherwise, it's likely a bug in the native library that the Java code calls, // hopefully indicating where to submit bugs. void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) { @@ -928,7 +928,7 @@ // klass, and the klass needs to be kept alive while executing. The GCs // don't trace through method pointers, so typically in similar situations // the mirror or the class loader of the klass are installed as a GC root. - // To minimze the overhead of doing that here, we ask the GC to pass down a + // To minimize the overhead of doing that here, we ask the GC to pass down a // closure that knows how to keep klasses alive given a ClassLoaderData. cld_f->do_cld(m->method_holder()->class_loader_data()); } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/globals.cpp --- a/src/share/vm/runtime/globals.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/globals.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -31,6 +31,7 @@ #include "utilities/ostream.hpp" #include "utilities/macros.hpp" #include "utilities/top.hpp" +#include "trace/tracing.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/g1/g1_globals.hpp" #endif // INCLUDE_ALL_GCS @@ -62,6 +63,14 @@ MATERIALIZE_FLAGS_EXT +static bool is_product_build() { +#ifdef PRODUCT + return true; +#else + return false; +#endif +} + void Flag::check_writable() { if (is_constant_in_binary()) { fatal(err_msg("flag is constant: %s", _name)); @@ -235,6 +244,27 @@ // Get custom message for this locked flag, or return NULL if // none is available. void Flag::get_locked_message(char* buf, int buflen) const { + buf[0] = '\0'; + if (is_diagnostic() && !is_unlocked()) { + jio_snprintf(buf, buflen, "Error: VM option '%s' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions.\n", + _name); + return; + } + if (is_experimental() && !is_unlocked()) { + jio_snprintf(buf, buflen, "Error: VM option '%s' is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions.\n", + _name); + return; + } + if (is_develop() && is_product_build()) { + jio_snprintf(buf, buflen, "Error: VM option '%s' is develop and is available only in debug version of VM.\n", + _name); + return; + } + if (is_notproduct() && is_product_build()) { + jio_snprintf(buf, buflen, "Error: VM option '%s' is notproduct and is available only in debug version of VM.\n", + _name); + return; + } get_locked_message_ext(buf, buflen); } @@ -464,13 +494,13 @@ } // Search the flag table for a named flag -Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked) { +Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked, bool return_flag) { for (Flag* current = &flagTable[0]; current->_name != NULL; current++) { if (str_equal(current->_name, name, length)) { // Found a matching entry. // Don't report notproduct and develop flags in product builds. if (current->is_constant_in_binary()) { - return NULL; + return (return_flag == true ? current : NULL); } // Report locked flags only if allowed. if (!(current->is_unlocked() || current->is_unlocker())) { @@ -564,6 +594,17 @@ return true; } +template +static void trace_flag_changed(const char* name, const T old_value, const T new_value, const Flag::Flags origin) +{ + E e; + e.set_name(name); + e.set_old_value(old_value); + e.set_new_value(new_value); + e.set_origin(origin); + e.commit(); +} + bool CommandLineFlags::boolAt(char* name, size_t len, bool* value) { Flag* result = Flag::find_flag(name, len); if (result == NULL) return false; @@ -577,6 +618,7 @@ if (result == NULL) return false; if (!result->is_bool()) return false; bool old_value = result->get_bool(); + trace_flag_changed(name, old_value, *value, origin); result->set_bool(*value); *value = old_value; result->set_origin(origin); @@ -586,6 +628,7 @@ void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, Flag::Flags origin) { Flag* faddr = address_of_flag(flag); guarantee(faddr != NULL && faddr->is_bool(), "wrong flag type"); + trace_flag_changed(faddr->_name, faddr->get_bool(), value, origin); faddr->set_bool(value); faddr->set_origin(origin); } @@ -603,6 +646,7 @@ if (result == NULL) return false; if (!result->is_intx()) return false; intx old_value = result->get_intx(); + trace_flag_changed(name, old_value, *value, origin); result->set_intx(*value); *value = old_value; result->set_origin(origin); @@ -612,6 +656,7 @@ void CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, Flag::Flags origin) { Flag* faddr = address_of_flag(flag); guarantee(faddr != NULL && faddr->is_intx(), "wrong flag type"); + trace_flag_changed(faddr->_name, faddr->get_intx(), value, origin); faddr->set_intx(value); faddr->set_origin(origin); } @@ -629,6 +674,7 @@ if (result == NULL) return false; if (!result->is_uintx()) return false; uintx old_value = result->get_uintx(); + trace_flag_changed(name, old_value, *value, origin); result->set_uintx(*value); *value = old_value; result->set_origin(origin); @@ -638,6 +684,7 @@ void CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin) { Flag* faddr = address_of_flag(flag); guarantee(faddr != NULL && faddr->is_uintx(), "wrong flag type"); + trace_flag_changed(faddr->_name, faddr->get_uintx(), value, origin); faddr->set_uintx(value); faddr->set_origin(origin); } @@ -655,6 +702,7 @@ if (result == NULL) return false; if (!result->is_uint64_t()) return false; uint64_t old_value = result->get_uint64_t(); + trace_flag_changed(name, old_value, *value, origin); result->set_uint64_t(*value); *value = old_value; result->set_origin(origin); @@ -664,6 +712,7 @@ void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin) { Flag* faddr = address_of_flag(flag); guarantee(faddr != NULL && faddr->is_uint64_t(), "wrong flag type"); + trace_flag_changed(faddr->_name, faddr->get_uint64_t(), value, origin); faddr->set_uint64_t(value); faddr->set_origin(origin); } @@ -681,6 +730,7 @@ if (result == NULL) return false; if (!result->is_double()) return false; double old_value = result->get_double(); + trace_flag_changed(name, old_value, *value, origin); result->set_double(*value); *value = old_value; result->set_origin(origin); @@ -690,6 +740,7 @@ void CommandLineFlagsEx::doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin) { Flag* faddr = address_of_flag(flag); guarantee(faddr != NULL && faddr->is_double(), "wrong flag type"); + trace_flag_changed(faddr->_name, faddr->get_double(), value, origin); faddr->set_double(value); faddr->set_origin(origin); } @@ -707,6 +758,7 @@ if (result == NULL) return false; if (!result->is_ccstr()) return false; ccstr old_value = result->get_ccstr(); + trace_flag_changed(name, old_value, *value, origin); char* new_value = NULL; if (*value != NULL) { new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1, mtInternal); @@ -728,6 +780,7 @@ Flag* faddr = address_of_flag(flag); guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type"); ccstr old_value = faddr->get_ccstr(); + trace_flag_changed(faddr->_name, old_value, value, origin); char* new_value = NEW_C_HEAP_ARRAY(char, strlen(value)+1, mtInternal); strcpy(new_value, value); faddr->set_ccstr(new_value); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/globals.hpp --- a/src/share/vm/runtime/globals.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/globals.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -241,7 +241,7 @@ // number of flags static size_t numFlags; - static Flag* find_flag(const char* name, size_t length, bool allow_locked = false); + static Flag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false); static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false); void check_writable(); @@ -3135,15 +3135,15 @@ "Maximum size of class area in Metaspace when compressed " \ "class pointers are used") \ \ - product(uintx, MinHeapFreeRatio, 40, \ + manageable(uintx, MinHeapFreeRatio, 40, \ "The minimum percentage of heap free after GC to avoid expansion."\ - " For most GCs this applies to the old generation. In G1 it" \ - " applies to the whole heap. Not supported by ParallelGC.") \ - \ - product(uintx, MaxHeapFreeRatio, 70, \ + " For most GCs this applies to the old generation. In G1 and" \ + " ParallelGC it applies to the whole heap.") \ + \ + manageable(uintx, MaxHeapFreeRatio, 70, \ "The maximum percentage of heap free after GC to avoid shrinking."\ - " For most GCs this applies to the old generation. In G1 it" \ - " applies to the whole heap. Not supported by ParallelGC.") \ + " For most GCs this applies to the old generation. In G1 and" \ + " ParallelGC it applies to the whole heap.") \ \ product(intx, SoftRefLRUPolicyMSPerMB, 1000, \ "Number of milliseconds per MB of free space in the heap") \ @@ -3639,7 +3639,7 @@ product(uintx, MaxDirectMemorySize, 0, \ "Maximum total size of NIO direct-buffer allocations") \ \ - /* temporary developer defined flags */ \ + /* Flags used for temporary code during development */ \ \ diagnostic(bool, UseNewCode, false, \ "Testing Only: Use the new version while testing") \ diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/globals_extension.hpp --- a/src/share/vm/runtime/globals_extension.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/globals_extension.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -31,7 +31,7 @@ // Construct enum of Flag_ constants. -// Parens left off in the following for the enum decl below. +// Parenthesis left off in the following for the enum decl below. #define FLAG_MEMBER(flag) Flag_##flag #define RUNTIME_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/handles.hpp --- a/src/share/vm/runtime/handles.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/handles.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -267,7 +267,7 @@ // HandleMarks manually. // // A HandleMark constructor will record the current handle area top, and the -// desctructor will reset the top, destroying all handles allocated in between. +// destructor will reset the top, destroying all handles allocated in between. // The following code will therefore NOT work: // // Handle h; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/java.cpp --- a/src/share/vm/runtime/java.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/java.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -608,6 +608,7 @@ HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); #else /* USDT2 */ HOTSPOT_VM_SHUTDOWN(); + HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); #endif /* USDT2 */ } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/javaCalls.cpp --- a/src/share/vm/runtime/javaCalls.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/javaCalls.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -302,7 +302,7 @@ // Check if we need to wrap a potential OS exception handler around thread // This is used for e.g. Win32 structured exception handlers assert(THREAD->is_Java_thread(), "only JavaThreads can make JavaCalls"); - // Need to wrap each and everytime, since there might be native code down the + // Need to wrap each and every time, since there might be native code down the // stack that has installed its own exception handlers os::os_exception_wrapper(call_helper, result, &method, args, THREAD); } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/jniHandles.cpp --- a/src/share/vm/runtime/jniHandles.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/jniHandles.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -195,8 +195,10 @@ int _count; public: CountHandleClosure(): _count(0) {} - virtual void do_oop(oop* unused) { - _count++; + virtual void do_oop(oop* ooph) { + if (*ooph != JNIHandles::deleted_handle()) { + _count++; + } } virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); } int count() { return _count; } @@ -461,7 +463,7 @@ // Append new block Thread* thread = Thread::current(); Handle obj_handle(thread, obj); - // This can block, so we need to preserve obj accross call. + // This can block, so we need to preserve obj across call. _last->_next = JNIHandleBlock::allocate_block(thread); _last = _last->_next; _allocate_before_rebuild--; @@ -528,7 +530,7 @@ return result; } -// This method is not thread-safe, i.e., must be called whule holding a lock on the +// This method is not thread-safe, i.e., must be called while holding a lock on the // structure. long JNIHandleBlock::memory_usage() const { return length() * sizeof(JNIHandleBlock); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/jniHandles.hpp --- a/src/share/vm/runtime/jniHandles.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/jniHandles.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -106,7 +106,7 @@ JNIHandleBlock* _next; // Link to next block // The following instance variables are only used by the first block in a chain. - // Having two types of blocks complicates the code and the space overhead in negligble. + // Having two types of blocks complicates the code and the space overhead in negligible. JNIHandleBlock* _last; // Last block in use JNIHandleBlock* _pop_frame_link; // Block to restore on PopLocalFrame call oop* _free_list; // Handle free list diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/mutex.cpp --- a/src/share/vm/runtime/mutex.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/mutex.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -507,7 +507,7 @@ _OnDeck = NULL ; // Note that we current drop the inner lock (clear OnDeck) in the slow-path - // epilog immediately after having acquired the outer lock. + // epilogue immediately after having acquired the outer lock. // But instead we could consider the following optimizations: // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation. // This might avoid potential reacquisition of the inner lock in IUlock(). @@ -931,7 +931,7 @@ check_block_state(Self); if (Self->is_Java_thread()) { - // Horribile dictu - we suffer through a state transition + // Horrible dictu - we suffer through a state transition assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex"); ThreadBlockInVM tbivm ((JavaThread *) Self) ; ILock (Self) ; @@ -963,7 +963,7 @@ } -// Returns true if thread succeceed [sic] in grabbing the lock, otherwise false. +// Returns true if thread succeeds in grabbing the lock, otherwise false. bool Monitor::try_lock() { Thread * const Self = Thread::current(); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/mutex.hpp --- a/src/share/vm/runtime/mutex.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/mutex.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -90,7 +90,7 @@ // A special lock: Is a lock where you are guaranteed not to block while you are // holding it, i.e., no vm operation can happen, taking other locks, etc. // NOTE: It is critical that the rank 'special' be the lowest (earliest) - // (except for "event"?) for the deadlock dection to work correctly. + // (except for "event"?) for the deadlock detection to work correctly. // The rank native is only for use in Mutex's created by JVM_RawMonitorCreate, // which being external to the VM are not subject to deadlock detection. // The rank safepoint is used only for synchronization in reaching a @@ -241,7 +241,7 @@ // // Currently, however, the base object is a monitor. Monitor contains all the // logic for wait(), notify(), etc. Mutex extends monitor and restricts the -// visiblity of wait(), notify(), and notify_all(). +// visibility of wait(), notify(), and notify_all(). // // Another viable alternative would have been to have Monitor extend Mutex and // implement all the normal mutex and wait()-notify() logic in Mutex base class. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/mutexLocker.hpp --- a/src/share/vm/runtime/mutexLocker.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/mutexLocker.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -43,7 +43,7 @@ // Mutexes used in the VM. extern Mutex* Patching_lock; // a lock used to guard code patching of compiled code -extern Monitor* SystemDictionary_lock; // a lock on the system dictonary +extern Monitor* SystemDictionary_lock; // a lock on the system dictionary extern Mutex* PackageTable_lock; // a lock on the class loader package table extern Mutex* CompiledIC_lock; // a lock used to guard compiled IC patching and access extern Mutex* InlineCacheBuffer_lock; // a lock used to guard the InlineCacheBuffer @@ -345,8 +345,8 @@ // - reentrant locking // - locking out of order // -// Only too be used for verify code, where we can relaxe out dead-lock -// dection code a bit (unsafe, but probably ok). This code is NEVER to +// Only to be used for verify code, where we can relax out dead-lock +// detection code a bit (unsafe, but probably ok). This code is NEVER to // be included in a product version. // class VerifyMutexLocker: StackObj { @@ -358,7 +358,7 @@ _mutex = mutex; _reentrant = mutex->owned_by_self(); if (!_reentrant) { - // We temp. diable strict safepoint checking, while we require the lock + // We temp. disable strict safepoint checking, while we require the lock FlagSetting fs(StrictSafepointChecks, false); _mutex->lock(); } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/objectMonitor.cpp --- a/src/share/vm/runtime/objectMonitor.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/objectMonitor.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -234,7 +234,7 @@ // * Taken together, the cxq and the EntryList constitute or form a // single logical queue of threads stalled trying to acquire the lock. // We use two distinct lists to improve the odds of a constant-time -// dequeue operation after acquisition (in the ::enter() epilog) and +// dequeue operation after acquisition (in the ::enter() epilogue) and // to reduce heat on the list ends. (c.f. Michael Scott's "2Q" algorithm). // A key desideratum is to minimize queue & monitor metadata manipulation // that occurs while holding the monitor lock -- that is, we want to @@ -677,7 +677,7 @@ // non-null and elect a new "Responsible" timer thread. // // This thread executes: - // ST Responsible=null; MEMBAR (in enter epilog - here) + // ST Responsible=null; MEMBAR (in enter epilogue - here) // LD cxq|EntryList (in subsequent exit) // // Entering threads in the slow/contended path execute: @@ -2031,7 +2031,7 @@ TEVENT (Spin abort -- too many spinners) ; return 0 ; } - // Slighty racy, but benign ... + // Slightly racy, but benign ... Adjust (&_Spinner, 1) ; } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/objectMonitor.hpp --- a/src/share/vm/runtime/objectMonitor.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/objectMonitor.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -101,7 +101,7 @@ static int Spinner_offset_in_bytes() { return offset_of(ObjectMonitor, _Spinner); } public: - // Eventaully we'll make provisions for multiple callbacks, but + // Eventually we'll make provisions for multiple callbacks, but // now one will suffice. static int (*SpinCallbackFunction)(intptr_t, int) ; static intptr_t SpinCallbackArgument ; @@ -272,7 +272,7 @@ // type int, or int32_t but not intptr_t. There's no reason // to use 64-bit fields for these variables on a 64-bit JVM. - volatile intptr_t _count; // reference count to prevent reclaimation/deflation + volatile intptr_t _count; // reference count to prevent reclamation/deflation // at stop-the-world time. See deflate_idle_monitors(). // _count is approximately |_WaitSet| + |_EntryList| protected: diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/orderAccess.hpp --- a/src/share/vm/runtime/orderAccess.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/orderAccess.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -61,13 +61,13 @@ // // Ensures that Load1 completes before Store2 and any subsequent store // operations. Loads before Load1 may *not* float below Store2 and any -// subseqeuent store operations. +// subsequent store operations. // // StoreLoad: Store1(s); StoreLoad; Load2 // // Ensures that Store1 completes before Load2 and any subsequent load // operations. Stores before Store1 may *not* float below Load2 and any -// subseqeuent load operations. +// subsequent load operations. // // // We define two further operations, 'release' and 'acquire'. They are @@ -176,7 +176,7 @@ // compilers that we currently use (SunStudio, gcc and VC++) respect the // semantics of volatile here. If you build HotSpot using other // compilers, you may need to verify that no compiler reordering occurs -// across the sequence point respresented by the volatile access. +// across the sequence point represented by the volatile access. // // // os::is_MP Considered Redundant @@ -311,7 +311,7 @@ private: // This is a helper that invokes the StubRoutines::fence_entry() // routine if it exists, It should only be used by platforms that - // don't another way to do the inline eassembly. + // don't have another way to do the inline assembly. static void StubRoutines_fence(); }; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/os.cpp --- a/src/share/vm/runtime/os.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/os.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -236,7 +236,7 @@ while (true) { int sig; { - // FIXME : Currently we have not decieded what should be the status + // FIXME : Currently we have not decided what should be the status // for this java thread blocked here. Once we decide about // that we should fix this. sig = os::signal_wait(); @@ -583,7 +583,7 @@ ptrdiff_t size = *size_addr_from_base(start_of_prev_block); u_char* obj = start_of_prev_block + space_before; if (size <= 0 ) { - // start is bad; mayhave been confused by OS data inbetween objects + // start is bad; may have been confused by OS data in between objects // search one more backwards start_of_prev_block = find_cushion_backwards(start_of_prev_block); size = *size_addr_from_base(start_of_prev_block); @@ -1011,7 +1011,7 @@ if (Universe::heap()->is_in(addr)) { HeapWord* p = Universe::heap()->block_start(addr); bool print = false; - // If we couldn't find it it just may mean that heap wasn't parseable + // If we couldn't find it it just may mean that heap wasn't parsable // See if we were just given an oop directly if (p != NULL && Universe::heap()->block_is_obj(p)) { print = true; @@ -1443,7 +1443,7 @@ // >= 2 physical CPU's and >=2GB of memory, with some fuzz // because the graphics memory (?) sometimes masks physical memory. // If you want to change the definition of a server class machine -// on some OS or platform, e.g., >=4GB on Windohs platforms, +// on some OS or platform, e.g., >=4GB on Windows platforms, // then you'll have to parameterize this method based on that state, // as was done for logical processors here, or replicate and // specialize this method for each platform. (Or fix os to have diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/os.hpp --- a/src/share/vm/runtime/os.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/os.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -395,7 +395,7 @@ // was equal. However, some platforms mask off faulting addresses // to the page size, so now we just check that the address is // within the page. This makes the thread argument unnecessary, - // but we retain the NULL check to preserve existing behaviour. + // but we retain the NULL check to preserve existing behavior. if (thread == NULL) return false; address page = (address) _mem_serialize_page; return addr >= page && addr < (page + os::vm_page_size()); @@ -430,7 +430,10 @@ static intx current_thread_id(); static int current_process_id(); static int sleep(Thread* thread, jlong ms, bool interruptable); - static int naked_sleep(); + // Short standalone OS sleep suitable for slow path spin loop. + // Ignores Thread.interrupt() (so keep it short). + // ms = 0, will sleep for the least amount of time allowed by the OS. + static void naked_short_sleep(jlong ms); static void infinite_sleep(); // never returns, use with CAUTION static void yield(); // Yields to all threads with same priority enum YieldResult { @@ -540,7 +543,7 @@ // Loads .dll/.so and // in case of error it checks if .dll/.so was built for the - // same architecture as Hotspot is running on + // same architecture as HotSpot is running on static void* dll_load(const char *name, char *ebuf, int ebuflen); // lookup symbol in a shared library diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/park.cpp --- a/src/share/vm/runtime/park.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/park.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -59,58 +59,22 @@ // Start by trying to recycle an existing but unassociated // ParkEvent from the global free list. - for (;;) { - ev = FreeList ; - if (ev == NULL) break ; - // 1: Detach - sequester or privatize the list - // Tantamount to ev = Swap (&FreeList, NULL) - if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) { - continue ; + // Using a spin lock since we are part of the mutex impl. + // 8028280: using concurrent free list without memory management can leak + // pretty badly it turns out. + Thread::SpinAcquire(&ListLock, "ParkEventFreeListAllocate"); + { + ev = FreeList; + if (ev != NULL) { + FreeList = ev->FreeNext; } - - // We've detached the list. The list in-hand is now - // local to this thread. This thread can operate on the - // list without risk of interference from other threads. - // 2: Extract -- pop the 1st element from the list. - ParkEvent * List = ev->FreeNext ; - if (List == NULL) break ; - for (;;) { - // 3: Try to reattach the residual list - guarantee (List != NULL, "invariant") ; - ParkEvent * Arv = (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ; - if (Arv == NULL) break ; - - // New nodes arrived. Try to detach the recent arrivals. - if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) { - continue ; - } - guarantee (Arv != NULL, "invariant") ; - // 4: Merge Arv into List - ParkEvent * Tail = List ; - while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ; - Tail->FreeNext = Arv ; - } - break ; } + Thread::SpinRelease(&ListLock); if (ev != NULL) { guarantee (ev->AssociatedWith == NULL, "invariant") ; } else { // Do this the hard way -- materialize a new ParkEvent. - // In rare cases an allocating thread might detach a long list -- - // installing null into FreeList -- and then stall or be obstructed. - // A 2nd thread calling Allocate() would see FreeList == null. - // The list held privately by the 1st thread is unavailable to the 2nd thread. - // In that case the 2nd thread would have to materialize a new ParkEvent, - // even though free ParkEvents existed in the system. In this case we end up - // with more ParkEvents in circulation than we need, but the race is - // rare and the outcome is benign. Ideally, the # of extant ParkEvents - // is equal to the maximum # of threads that existed at any one time. - // Because of the race mentioned above, segments of the freelist - // can be transiently inaccessible. At worst we may end up with the - // # of ParkEvents in circulation slightly above the ideal. - // Note that if we didn't have the TSM/immortal constraint, then - // when reattaching, above, we could trim the list. ev = new ParkEvent () ; guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ; } @@ -124,13 +88,14 @@ if (ev == NULL) return ; guarantee (ev->FreeNext == NULL , "invariant") ; ev->AssociatedWith = NULL ; - for (;;) { - // Push ev onto FreeList - // The mechanism is "half" lock-free. - ParkEvent * List = FreeList ; - ev->FreeNext = List ; - if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ; + // Note that if we didn't have the TSM/immortal constraint, then + // when reattaching we could trim the list. + Thread::SpinAcquire(&ListLock, "ParkEventFreeListRelease"); + { + ev->FreeNext = FreeList; + FreeList = ev; } + Thread::SpinRelease(&ListLock); } // Override operator new and delete so we can ensure that the @@ -152,7 +117,7 @@ // 6399321 As a temporary measure we copied & modified the ParkEvent:: // allocate() and release() code for use by Parkers. The Parker:: forms -// will eventually be removed as we consolide and shift over to ParkEvents +// will eventually be removed as we consolidate and shift over to ParkEvents // for both builtin synchronization and JSR166 operations. volatile int Parker::ListLock = 0 ; @@ -164,56 +129,21 @@ // Start by trying to recycle an existing but unassociated // Parker from the global free list. - for (;;) { - p = FreeList ; - if (p == NULL) break ; - // 1: Detach - // Tantamount to p = Swap (&FreeList, NULL) - if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) { - continue ; + // 8028280: using concurrent free list without memory management can leak + // pretty badly it turns out. + Thread::SpinAcquire(&ListLock, "ParkerFreeListAllocate"); + { + p = FreeList; + if (p != NULL) { + FreeList = p->FreeNext; } - - // We've detached the list. The list in-hand is now - // local to this thread. This thread can operate on the - // list without risk of interference from other threads. - // 2: Extract -- pop the 1st element from the list. - Parker * List = p->FreeNext ; - if (List == NULL) break ; - for (;;) { - // 3: Try to reattach the residual list - guarantee (List != NULL, "invariant") ; - Parker * Arv = (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ; - if (Arv == NULL) break ; - - // New nodes arrived. Try to detach the recent arrivals. - if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) { - continue ; - } - guarantee (Arv != NULL, "invariant") ; - // 4: Merge Arv into List - Parker * Tail = List ; - while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ; - Tail->FreeNext = Arv ; - } - break ; } + Thread::SpinRelease(&ListLock); if (p != NULL) { guarantee (p->AssociatedWith == NULL, "invariant") ; } else { // Do this the hard way -- materialize a new Parker.. - // In rare cases an allocating thread might detach - // a long list -- installing null into FreeList --and - // then stall. Another thread calling Allocate() would see - // FreeList == null and then invoke the ctor. In this case we - // end up with more Parkers in circulation than we need, but - // the race is rare and the outcome is benign. - // Ideally, the # of extant Parkers is equal to the - // maximum # of threads that existed at any one time. - // Because of the race mentioned above, segments of the - // freelist can be transiently inaccessible. At worst - // we may end up with the # of Parkers in circulation - // slightly above the ideal. p = new Parker() ; } p->AssociatedWith = t ; // Associate p with t @@ -227,11 +157,12 @@ guarantee (p->AssociatedWith != NULL, "invariant") ; guarantee (p->FreeNext == NULL , "invariant") ; p->AssociatedWith = NULL ; - for (;;) { - // Push p onto FreeList - Parker * List = FreeList ; - p->FreeNext = List ; - if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ; + + Thread::SpinAcquire(&ListLock, "ParkerFreeListRelease"); + { + p->FreeNext = FreeList; + FreeList = p; } + Thread::SpinRelease(&ListLock); } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/perfData.cpp --- a/src/share/vm/runtime/perfData.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/perfData.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -39,7 +39,7 @@ PerfDataList* PerfDataManager::_constants = NULL; /* - * The jvmstat global and subsysem jvmstat counter name spaces. The top + * The jvmstat global and subsystem jvmstat counter name spaces. The top * level name spaces imply the interface stability level of the counter, * which generally follows the Java package, class, and property naming * conventions. The CounterNS enumeration values should be used to index diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/perfData.hpp --- a/src/share/vm/runtime/perfData.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/perfData.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -116,7 +116,7 @@ * * A PerfData subtype is not required to provide an implementation for * each variability classification. For example, the String type provides - * Variable and Constant variablility classifications in the PerfStringVariable + * Variable and Constant variability classifications in the PerfStringVariable * and PerfStringConstant classes, but does not provide a counter type. * * Performance data are also described by a unit of measure. Units allow @@ -172,10 +172,10 @@ * foo_counter->inc(); * * Creating a performance counter that holds a variably change long - * data value with untis specified in U_Bytes in the "com.sun.ci + * data value with units specified in U_Bytes in the "com.sun.ci * name space. * - * PerfLongVariable* bar_varible; + * PerfLongVariable* bar_variable; * bar_variable = PerfDataManager::create_long_variable(COM_CI, "bar", .* PerfData::U_Bytes, * optionalInitialValue, @@ -203,7 +203,7 @@ * In this example, the PerfData pointer can be ignored as the caller * is relying on the StatSampler PeriodicTask to sample the given * address at a regular interval. The interval is defined by the - * PerfDataSamplingInterval global variable, and is applyied on + * PerfDataSamplingInterval global variable, and is applied on * a system wide basis, not on an per-counter basis. * * Creating a performance counter in an arbitrary name space that utilizes @@ -234,7 +234,7 @@ * the UsePerfData flag. Counters will be created on the c-heap * if UsePerfData is false. * - * Until further noice, all PerfData objects should be created and + * Until further notice, all PerfData objects should be created and * manipulated within a guarded block. The guard variable is * UsePerfData, a product flag set to true by default. This flag may * be removed from the product in the future. @@ -586,7 +586,7 @@ * * The abstraction is not complete. A more general container class * would provide an Iterator abstraction that could be used to - * traverse the lists. This implementation still relys upon integer + * traverse the lists. This implementation still relies upon integer * iterators and the at(int index) method. However, the GrowableArray * is not directly visible outside this class and can be replaced by * some other implementation, as long as that implementation provides diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/perfMemory.hpp --- a/src/share/vm/runtime/perfMemory.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/perfMemory.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -55,7 +55,7 @@ * of the fields must be changed along with their counterparts in the * PerfDataBuffer Java class. The first four bytes of this structure * should never change, or compatibility problems between the monitoring - * applications and Hotspot VMs will result. The reserved fields are + * applications and HotSpot VMs will result. The reserved fields are * available for future enhancements. */ typedef struct { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/reflection.cpp --- a/src/share/vm/runtime/reflection.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/reflection.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -482,7 +482,7 @@ ik = InstanceKlass::cast(hc); // There's no way to make a host class loop short of patching memory. - // Therefore there cannot be a loop here unles there's another bug. + // Therefore there cannot be a loop here unless there's another bug. // Still, let's check for it. assert(--inf_loop_check > 0, "no host_klass loop"); } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/reflection.hpp --- a/src/share/vm/runtime/reflection.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/reflection.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -138,9 +138,9 @@ static BasicType basic_type_mirror_to_basic_type(oop basic_type_mirror, TRAPS); public: - // Method invokation through java.lang.reflect.Method + // Method invocation through java.lang.reflect.Method static oop invoke_method(oop method_mirror, Handle receiver, objArrayHandle args, TRAPS); - // Method invokation through java.lang.reflect.Constructor + // Method invocation through java.lang.reflect.Constructor static oop invoke_constructor(oop method_mirror, objArrayHandle args, TRAPS); }; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/registerMap.hpp --- a/src/share/vm/runtime/registerMap.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/registerMap.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -70,7 +70,7 @@ // 3) The RegisterMap keeps track of the values of callee-saved registers // from frame to frame (hence, the name). For some stack traversal the // values of the callee-saved registers does not matter, e.g., if you -// only need the static properies such as frame type, pc, and such. +// only need the static properties such as frame type, pc, and such. // Updating of the RegisterMap can be turned off by instantiating the // register map as: RegisterMap map(thread, false); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/relocator.cpp --- a/src/share/vm/runtime/relocator.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/relocator.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -141,7 +141,7 @@ } // size is the new size of the instruction at bci. Hence, if size is less than the current -// instruction sice, we will shrink the code. +// instruction size, we will shrink the code. methodHandle Relocator::insert_space_at(int bci, int size, u_char inst_buffer[], TRAPS) { _changes = new GrowableArray (10); _changes->push(new ChangeWiden(bci, size, inst_buffer)); @@ -192,7 +192,7 @@ // Execute operation if (!ci->handle_code_change(this)) return false; - // Shuffel items up + // Shuffle items up for (int index = 1; index < _changes->length(); index++) { _changes->at_put(index-1, _changes->at(index)); } @@ -214,7 +214,7 @@ } // We need a special instruction size method, since lookupswitches and tableswitches might not be -// properly alligned during relocation +// properly aligned during relocation int Relocator::rc_instr_len(int bci) { Bytecodes::Code bc= code_at(bci); switch (bc) { @@ -611,7 +611,7 @@ // In case we have shrunken a tableswitch/lookupswitch statement, we store the last // bytes that get overwritten. We have to copy the bytes after the change_jumps method - // has been called, since it is likly to update last offset in a tableswitch/lookupswitch + // has been called, since it is likely to update last offset in a tableswitch/lookupswitch if (delta < 0) { assert(delta>=-3, "we cannot overwrite more than 3 bytes"); memcpy(_overwrite, addr_at(bci + ilen + delta), -delta); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/safepoint.cpp --- a/src/share/vm/runtime/safepoint.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/safepoint.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -156,7 +156,7 @@ // stopped by different mechanisms: // // 1. Running interpreted - // The interpeter dispatch table is changed to force it to + // The interpreter dispatch table is changed to force it to // check for a safepoint condition between bytecodes. // 2. Running in native code // When returning from the native code, a Java thread must check @@ -282,7 +282,7 @@ // See the comments in synchronizer.cpp for additional remarks on spinning. // // In the future we might: - // 1. Modify the safepoint scheme to avoid potentally unbounded spinning. + // 1. Modify the safepoint scheme to avoid potentially unbounded spinning. // This is tricky as the path used by a thread exiting the JVM (say on // on JNI call-out) simply stores into its state field. The burden // is placed on the VM thread, which must poll (spin). @@ -489,7 +489,7 @@ ConcurrentGCThread::safepoint_desynchronize(); } #endif // INCLUDE_ALL_GCS - // record this time so VMThread can keep track how much time has elasped + // record this time so VMThread can keep track how much time has elapsed // since last safepoint. _end_of_last_safepoint = os::javaTimeMillis(); } @@ -826,7 +826,7 @@ void SafepointSynchronize::print_safepoint_timeout(SafepointTimeoutReason reason) { if (!timeout_error_printed) { timeout_error_printed = true; - // Print out the thread infor which didn't reach the safepoint for debugging + // Print out the thread info which didn't reach the safepoint for debugging // purposes (useful when there are lots of threads in the debugger). tty->print_cr(""); tty->print_cr("# SafepointSynchronize::begin: Timeout detected:"); @@ -1093,7 +1093,7 @@ if (caller_fr.is_deoptimized_frame()) { // The exception patch will destroy registers that are still // live and will be needed during deoptimization. Defer the - // Async exception should have defered the exception until the + // Async exception should have deferred the exception until the // next safepoint which will be detected when we get into // the interpreter so if we have an exception now things // are messed up. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/safepoint.hpp --- a/src/share/vm/runtime/safepoint.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/safepoint.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -59,7 +59,7 @@ public: enum SynchronizeState { _not_synchronized = 0, // Threads not synchronized at a safepoint - // Keep this value 0. See the coment in do_call_back() + // Keep this value 0. See the comment in do_call_back() _synchronizing = 1, // Synchronizing in progress _synchronized = 2 // All Java threads are stopped at a safepoint. Only VM thread is running }; @@ -91,7 +91,7 @@ } SafepointStats; private: - static volatile SynchronizeState _state; // Threads might read this flag directly, without acquireing the Threads_lock + static volatile SynchronizeState _state; // Threads might read this flag directly, without acquiring the Threads_lock static volatile int _waiting_to_block; // number of threads we are waiting for to block static int _current_jni_active_count; // Counts the number of active critical natives during the safepoint @@ -106,7 +106,7 @@ private: static long _end_of_last_safepoint; // Time of last safepoint in milliseconds - // statistics + // Statistics static jlong _safepoint_begin_time; // time when safepoint begins static SafepointStats* _safepoint_stats; // array of SafepointStats struct static int _cur_stat_index; // current index to the above array @@ -155,7 +155,7 @@ _current_jni_active_count++; } - // Called when a thread volantary blocks + // Called when a thread voluntarily blocks static void block(JavaThread *thread); static void signal_thread_at_safepoint() { _waiting_to_block--; } @@ -172,7 +172,7 @@ static bool is_cleanup_needed(); static void do_cleanup_tasks(); - // debugging + // Debugging static void print_state() PRODUCT_RETURN; static void safepoint_msg(const char* format, ...) PRODUCT_RETURN; @@ -183,7 +183,7 @@ static void set_is_at_safepoint() { _state = _synchronized; } static void set_is_not_at_safepoint() { _state = _not_synchronized; } - // assembly support + // Assembly support static address address_of_state() { return (address)&_state; } static address safepoint_counter_addr() { return (address)&_safepoint_counter; } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/sharedRuntime.cpp --- a/src/share/vm/runtime/sharedRuntime.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/sharedRuntime.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -472,7 +472,7 @@ return (jdouble)x; JRT_END -// Exception handling accross interpreter/compiler boundaries +// Exception handling across interpreter/compiler boundaries // // exception_handler_for_return_address(...) returns the continuation address. // The continuation address is the entry point of the exception handler of the @@ -694,8 +694,8 @@ // Allow abbreviated catch tables. The idea is to allow a method // to materialize its exceptions without committing to the exact // routing of exceptions. In particular this is needed for adding - // a synthethic handler to unlock monitors when inlining - // synchonized methods since the unlock path isn't represented in + // a synthetic handler to unlock monitors when inlining + // synchronized methods since the unlock path isn't represented in // the bytecodes. t = table.entry_for(catch_pco, -1, 0); } @@ -819,7 +819,7 @@ // Exception happened in CodeCache. Must be either: // 1. Inline-cache check in C2I handler blob, // 2. Inline-cache check in nmethod, or - // 3. Implict null exception in nmethod + // 3. Implicit null exception in nmethod if (!cb->is_nmethod()) { bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(); @@ -2850,7 +2850,7 @@ // called from very start of a compiled OSR nmethod. A temp array is // allocated to hold the interesting bits of the interpreter frame. All // active locks are inflated to allow them to move. The displaced headers and -// active interpeter locals are copied into the temp buffer. Then we return +// active interpreter locals are copied into the temp buffer. Then we return // back to the compiled code. The compiled code then pops the current // interpreter frame off the stack and pushes a new compiled frame. Then it // copies the interpreter locals and displaced headers where it wants. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/sharedRuntime.hpp --- a/src/share/vm/runtime/sharedRuntime.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/sharedRuntime.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -382,7 +382,7 @@ // present if we see that compiled code is present the compiled call site // will be patched/re-resolved so that later calls will run compiled. - // Aditionally a c2i blob need to have a unverified entry because it can be reached + // Additionally a c2i blob need to have a unverified entry because it can be reached // in situations where the call site is an inlined cache site and may go megamorphic. // A i2c adapter is simpler than the c2i adapter. This is because it is assumed @@ -576,7 +576,7 @@ // arguments for a Java-compiled call, and jumps to Rmethod-> code()-> // code_begin(). It is broken to call it without an nmethod assigned. // The usual behavior is to lift any register arguments up out of the -// stack and possibly re-pack the extra arguments to be contigious. +// stack and possibly re-pack the extra arguments to be contiguous. // I2C adapters will save what the interpreter's stack pointer will be // after arguments are popped, then adjust the interpreter's frame // size to force alignment and possibly to repack the arguments. @@ -593,7 +593,7 @@ // outgoing stack args will be dead after the copy. // // Native wrappers, like adapters, marshal arguments. Unlike adapters they -// also perform an offical frame push & pop. They have a call to the native +// also perform an official frame push & pop. They have a call to the native // routine in their middles and end in a return (instead of ending in a jump). // The native wrappers are stored in real nmethods instead of the BufferBlobs // used by the adapters. The code generation happens here because it's very @@ -610,7 +610,7 @@ #ifdef ASSERT // Captures code and signature used to generate this adapter when - // verifing adapter equivalence. + // verifying adapter equivalence. unsigned char* _saved_code; int _saved_code_length; #endif diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/sharedRuntimeTrans.cpp --- a/src/share/vm/runtime/sharedRuntimeTrans.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/sharedRuntimeTrans.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -113,7 +113,7 @@ } /* __ieee754_log(x) - * Return the logrithm of x + * Return the logarithm of x * * Method : * 1. Argument Reduction: find k and f such that diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/sharedRuntimeTrig.cpp --- a/src/share/vm/runtime/sharedRuntimeTrig.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/sharedRuntimeTrig.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -223,7 +223,7 @@ * * fq[] final product of x*(2/pi) in fq[0],..,fq[jk] * - * ih integer. If >0 it indicats q[] is >= 0.5, hence + * ih integer. If >0 it indicates q[] is >= 0.5, hence * it also indicates the *sign* of the result. * */ @@ -347,7 +347,7 @@ if(z==0.0) { jz -= 1; q0 -= 24; while(iq[jz]==0) { jz--; q0-=24;} - } else { /* break z into 24-bit if neccessary */ + } else { /* break z into 24-bit if necessary */ z = scalbnA(z,-q0); if(z>=two24B) { fw = (double)((int)(twon24*z)); @@ -409,7 +409,7 @@ /* * ==================================================== - * Copyright (c) 1993 Oracle and/or its affilates. All rights reserved. + * Copyright (c) 1993 Oracle and/or its affiliates. All rights reserved. * * Developed at SunPro, a Sun Microsystems, Inc. business. * Permission to use, copy, modify, and distribute this diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/signature.cpp --- a/src/share/vm/runtime/signature.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/signature.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -152,7 +152,7 @@ _parameter_index = 0; } -// Optimized version of iterat_parameters when fingerprint is known +// Optimized version of iterate_parameters when fingerprint is known void SignatureIterator::iterate_parameters( uint64_t fingerprint ) { uint64_t saved_fingerprint = fingerprint; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/simpleThresholdPolicy.cpp --- a/src/share/vm/runtime/simpleThresholdPolicy.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/simpleThresholdPolicy.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -387,7 +387,7 @@ int bci, CompLevel level, nmethod* nm, JavaThread* thread) { // If the method is already compiling, quickly bail out. if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) { - // Use loop event as an opportinity to also check there's been + // Use loop event as an opportunity to also check there's been // enough calls. CompLevel cur_level = comp_level(mh()); CompLevel next_level = call_event(mh(), cur_level); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/statSampler.cpp --- a/src/share/vm/runtime/statSampler.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/statSampler.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -222,8 +222,8 @@ * The list of System Properties that have corresponding PerfData * string instrumentation created by retrieving the named property's * value from System.getProperty() and unconditionally creating a - * PerfStringConstant object initialized to the retreived value. This - * is not an exhustive list of Java properties with corresponding string + * PerfStringConstant object initialized to the retrieved value. This + * is not an exhaustive list of Java properties with corresponding string * instrumentation as the create_system_property_instrumentation() method * creates other property based instrumentation conditionally. */ @@ -325,7 +325,7 @@ // create string instrumentation for various Java properties. create_system_property_instrumentation(CHECK); - // hotspot flags (from .hotspotrc) and args (from command line) + // HotSpot flags (from .hotspotrc) and args (from command line) // PerfDataManager::create_string_constant(JAVA_RT, "vmFlags", Arguments::jvm_flags(), CHECK); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/stubCodeGenerator.hpp --- a/src/share/vm/runtime/stubCodeGenerator.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/stubCodeGenerator.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -111,7 +111,7 @@ }; -// Stack-allocated helper class used to assciate a stub code with a name. +// Stack-allocated helper class used to associate a stub code with a name. // All stub code generating functions that use a StubCodeMark will be registered // in the global StubCodeDesc list and the generated stub code can be identified // later via an address pointing into it. diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/synchronizer.cpp --- a/src/share/vm/runtime/synchronizer.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/synchronizer.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -737,10 +737,10 @@ } // Be aware of this method could revoke bias of the lock object. -// This method querys the ownership of the lock handle specified by 'h_obj'. +// This method queries the ownership of the lock handle specified by 'h_obj'. // If the current thread owns the lock, it returns owner_self. If no // thread owns the lock, it returns owner_none. Otherwise, it will return -// ower_other. +// owner_other. ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership (JavaThread *self, Handle h_obj) { // The caller must beware this method can revoke bias, and diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/synchronizer.hpp --- a/src/share/vm/runtime/synchronizer.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/synchronizer.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -49,7 +49,7 @@ // to use enter() and exit() in order to make sure user be ware // of the performance and semantics difference. They are normally // used by ObjectLocker etc. The interpreter and compiler use - // assembly copies of these routines. Please keep them synchornized. + // assembly copies of these routines. Please keep them synchronized. // // attempt_rebias flag is used by UseBiasedLocking implementation static void fast_enter (Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/thread.cpp --- a/src/share/vm/runtime/thread.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/thread.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -767,7 +767,7 @@ void JavaThread::record_jump(address target, address instr, const char* file, int line) { // This should not need to be atomic as the only way for simultaneous - // updates is via interrupts. Even then this should be rare or non-existant + // updates is via interrupts. Even then this should be rare or non-existent // and we don't care that much anyway. int index = _jmp_ring_index; @@ -925,10 +925,10 @@ // Threads_lock is special, since the safepoint synchronization will not start before this is // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock, // since it is used to transfer control between JavaThreads and the VMThread - // Do not *exclude* any locks unless you are absolutly sure it is correct. Ask someone else first! + // Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first! if ( (cur->allow_vm_block() && cur != Threads_lock && - cur != Compile_lock && // Temporary: should not be necessary when we get spearate compilation + cur != Compile_lock && // Temporary: should not be necessary when we get separate compilation cur != VMOperationRequest_lock && cur != VMOperationQueue_lock) || cur->rank() == Mutex::special) { @@ -1271,7 +1271,7 @@ time_slept = 0; time_before_loop = now; } else { - // need to recalulate since we might have new tasks in _tasks + // need to recalculate since we might have new tasks in _tasks time_slept = (int) ((now - time_before_loop) / 1000000); } @@ -1638,7 +1638,7 @@ // initialize thread-local alloc buffer related fields this->initialize_tlab(); - // used to test validitity of stack trace backs + // used to test validity of stack trace backs this->record_base_of_stack_pointer(); // Record real stack base and size. @@ -3301,6 +3301,58 @@ // If CompilerThreads ever become non-JavaThreads, add them here } + +void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) { + TraceTime timer("Initialize java.lang classes", TraceStartupTime); + + if (EagerXrunInit && Arguments::init_libraries_at_startup()) { + create_vm_init_libraries(); + } + + initialize_class(vmSymbols::java_lang_String(), CHECK); + + // Initialize java_lang.System (needed before creating the thread) + initialize_class(vmSymbols::java_lang_System(), CHECK); + initialize_class(vmSymbols::java_lang_ThreadGroup(), CHECK); + Handle thread_group = create_initial_thread_group(CHECK); + Universe::set_main_thread_group(thread_group()); + initialize_class(vmSymbols::java_lang_Thread(), CHECK); + oop thread_object = create_initial_thread(thread_group, main_thread, CHECK); + main_thread->set_threadObj(thread_object); + // Set thread status to running since main thread has + // been started and running. + java_lang_Thread::set_thread_status(thread_object, + java_lang_Thread::RUNNABLE); + + // The VM creates & returns objects of this class. Make sure it's initialized. + initialize_class(vmSymbols::java_lang_Class(), CHECK); + + // The VM preresolves methods to these classes. Make sure that they get initialized + initialize_class(vmSymbols::java_lang_reflect_Method(), CHECK); + initialize_class(vmSymbols::java_lang_ref_Finalizer(), CHECK); + call_initializeSystemClass(CHECK); + + // get the Java runtime name after java.lang.System is initialized + JDK_Version::set_runtime_name(get_java_runtime_name(THREAD)); + JDK_Version::set_runtime_version(get_java_runtime_version(THREAD)); + + // an instance of OutOfMemory exception has been allocated earlier + initialize_class(vmSymbols::java_lang_OutOfMemoryError(), CHECK); + initialize_class(vmSymbols::java_lang_NullPointerException(), CHECK); + initialize_class(vmSymbols::java_lang_ClassCastException(), CHECK); + initialize_class(vmSymbols::java_lang_ArrayStoreException(), CHECK); + initialize_class(vmSymbols::java_lang_ArithmeticException(), CHECK); + initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK); + initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK); + initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK); +} + +void Threads::initialize_jsr292_core_classes(TRAPS) { + initialize_class(vmSymbols::java_lang_invoke_MethodHandle(), CHECK); + initialize_class(vmSymbols::java_lang_invoke_MemberName(), CHECK); + initialize_class(vmSymbols::java_lang_invoke_MethodHandleNatives(), CHECK); +} + jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { extern void JDK_Version_init(); @@ -3320,7 +3372,7 @@ // Initialize system properties. Arguments::init_system_properties(); - // So that JDK version can be used as a discrimintor when parsing arguments + // So that JDK version can be used as a discriminator when parsing arguments JDK_Version_init(); // Update/Initialize System properties after JDK version number is known @@ -3359,7 +3411,7 @@ jint adjust_after_os_result = Arguments::adjust_after_os(); if (adjust_after_os_result != JNI_OK) return adjust_after_os_result; - // intialize TLS + // initialize TLS ThreadLocalStorage::init(); // Bootstrap native memory tracking, so it can start recording memory @@ -3470,13 +3522,13 @@ VMThread::execute(&verify_op); } - EXCEPTION_MARK; + Thread* THREAD = Thread::current(); // At this point, the Universe is initialized, but we have not executed // any byte code. Now is a good time (the only time) to dump out the // internal state of the JVM for sharing. if (DumpSharedSpaces) { - MetaspaceShared::preload_and_dump(CHECK_0); + MetaspaceShared::preload_and_dump(CHECK_JNI_ERR); ShouldNotReachHere(); } @@ -3487,74 +3539,12 @@ // Notify JVMTI agents that VM has started (JNI is up) - nop if no agents. JvmtiExport::post_vm_start(); - { - TraceTime timer("Initialize java.lang classes", TraceStartupTime); - - if (EagerXrunInit && Arguments::init_libraries_at_startup()) { - create_vm_init_libraries(); - } - - initialize_class(vmSymbols::java_lang_String(), CHECK_0); - - // Initialize java_lang.System (needed before creating the thread) - initialize_class(vmSymbols::java_lang_System(), CHECK_0); - initialize_class(vmSymbols::java_lang_ThreadGroup(), CHECK_0); - Handle thread_group = create_initial_thread_group(CHECK_0); - Universe::set_main_thread_group(thread_group()); - initialize_class(vmSymbols::java_lang_Thread(), CHECK_0); - oop thread_object = create_initial_thread(thread_group, main_thread, CHECK_0); - main_thread->set_threadObj(thread_object); - // Set thread status to running since main thread has - // been started and running. - java_lang_Thread::set_thread_status(thread_object, - java_lang_Thread::RUNNABLE); - - // The VM creates & returns objects of this class. Make sure it's initialized. - initialize_class(vmSymbols::java_lang_Class(), CHECK_0); - - // The VM preresolves methods to these classes. Make sure that they get initialized - initialize_class(vmSymbols::java_lang_reflect_Method(), CHECK_0); - initialize_class(vmSymbols::java_lang_ref_Finalizer(), CHECK_0); - call_initializeSystemClass(CHECK_0); - - // get the Java runtime name after java.lang.System is initialized - JDK_Version::set_runtime_name(get_java_runtime_name(THREAD)); - JDK_Version::set_runtime_version(get_java_runtime_version(THREAD)); - - // an instance of OutOfMemory exception has been allocated earlier - initialize_class(vmSymbols::java_lang_OutOfMemoryError(), CHECK_0); - initialize_class(vmSymbols::java_lang_NullPointerException(), CHECK_0); - initialize_class(vmSymbols::java_lang_ClassCastException(), CHECK_0); - initialize_class(vmSymbols::java_lang_ArrayStoreException(), CHECK_0); - initialize_class(vmSymbols::java_lang_ArithmeticException(), CHECK_0); - initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK_0); - initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK_0); - initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK_0); - } - - // See : bugid 4211085. - // Background : the static initializer of java.lang.Compiler tries to read - // property"java.compiler" and read & write property "java.vm.info". - // When a security manager is installed through the command line - // option "-Djava.security.manager", the above properties are not - // readable and the static initializer for java.lang.Compiler fails - // resulting in a NoClassDefFoundError. This can happen in any - // user code which calls methods in java.lang.Compiler. - // Hack : the hack is to pre-load and initialize this class, so that only - // system domains are on the stack when the properties are read. - // Currently even the AWT code has calls to methods in java.lang.Compiler. - // On the classic VM, java.lang.Compiler is loaded very early to load the JIT. - // Future Fix : the best fix is to grant everyone permissions to read "java.compiler" and - // read and write"java.vm.info" in the default policy file. See bugid 4211383 - // Once that is done, we should remove this hack. - initialize_class(vmSymbols::java_lang_Compiler(), CHECK_0); - - // More hackery - the static initializer of java.lang.Compiler adds the string "nojit" to - // the java.vm.info property if no jit gets loaded through java.lang.Compiler (the hotspot - // compiler does not get loaded through java.lang.Compiler). "java -version" with the - // hotspot vm says "nojit" all the time which is confusing. So, we reset it here. - // This should also be taken out as soon as 4211383 gets fixed. - reset_vm_info_property(CHECK_0); + initialize_java_lang_classes(main_thread, CHECK_JNI_ERR); + + // We need this for ClassDataSharing - the initial vm.info property is set + // with the default value of CDS "sharing" which may be reset through + // command line options. + reset_vm_info_property(CHECK_JNI_ERR); quicken_jni_functions(); @@ -3583,10 +3573,7 @@ // Note that we do not use CHECK_0 here since we are inside an EXCEPTION_MARK and // set_init_completed has just been called, causing exceptions not to be shortcut // anymore. We call vm_exit_during_initialization directly instead. - SystemDictionary::compute_java_system_loader(THREAD); - if (HAS_PENDING_EXCEPTION) { - vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION)); - } + SystemDictionary::compute_java_system_loader(CHECK_JNI_ERR); #if INCLUDE_ALL_GCS // Support for ConcurrentMarkSweep. This should be cleaned up @@ -3594,12 +3581,9 @@ // once things are properly refactored. XXX YSR if (UseConcMarkSweepGC || UseG1GC) { if (UseConcMarkSweepGC) { - ConcurrentMarkSweepThread::makeSurrogateLockerThread(THREAD); + ConcurrentMarkSweepThread::makeSurrogateLockerThread(CHECK_JNI_ERR); } else { - ConcurrentMarkThread::makeSurrogateLockerThread(THREAD); - } - if (HAS_PENDING_EXCEPTION) { - vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION)); + ConcurrentMarkThread::makeSurrogateLockerThread(CHECK_JNI_ERR); } } #endif // INCLUDE_ALL_GCS @@ -3642,19 +3626,16 @@ CompileBroker::compilation_init(); #endif + // Pre-initialize some JSR292 core classes to avoid deadlock during class loading. + // It is done after compilers are initialized, because otherwise compilations of + // signature polymorphic MH intrinsics can be missed + // (see SystemDictionary::find_method_handle_intrinsic). if (EnableInvokeDynamic) { - // Pre-initialize some JSR292 core classes to avoid deadlock during class loading. - // It is done after compilers are initialized, because otherwise compilations of - // signature polymorphic MH intrinsics can be missed - // (see SystemDictionary::find_method_handle_intrinsic). - initialize_class(vmSymbols::java_lang_invoke_MethodHandle(), CHECK_0); - initialize_class(vmSymbols::java_lang_invoke_MemberName(), CHECK_0); - initialize_class(vmSymbols::java_lang_invoke_MethodHandleNatives(), CHECK_0); + initialize_jsr292_core_classes(CHECK_JNI_ERR); } #if INCLUDE_MANAGEMENT Management::initialize(THREAD); -#endif // INCLUDE_MANAGEMENT if (HAS_PENDING_EXCEPTION) { // management agent fails to start possibly due to @@ -3662,6 +3643,7 @@ // stack trace if appropriate. Simply exit VM. vm_exit(1); } +#endif // INCLUDE_MANAGEMENT if (Arguments::has_profile()) FlatProfiler::engage(main_thread, true); if (MemProfiling) MemProfiler::engage(); @@ -4156,7 +4138,7 @@ // but the garbage collector must provide a safe context for them to run. // In particular, these things should never be called when the Threads_lock // is held by some other thread. (Note: the Safepoint abstraction also -// uses the Threads_lock to gurantee this property. It also makes sure that +// uses the Threads_lock to guarantee this property. It also makes sure that // all threads gets blocked when exiting or starting). void Threads::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) { @@ -4446,9 +4428,7 @@ ++ctr ; if ((ctr & 0xFFF) == 0 || !os::is_MP()) { if (Yields > 5) { - // Consider using a simple NakedSleep() instead. - // Then SpinAcquire could be called by non-JVM threads - Thread::current()->_ParkEvent->park(1) ; + os::naked_short_sleep(1); } else { os::NakedYield() ; ++Yields ; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/thread.hpp --- a/src/share/vm/runtime/thread.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/thread.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1231,7 +1231,7 @@ void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; } vframeArray* vframe_array_head() const { return _vframe_array_head; } - // Side structure for defering update of java frame locals until deopt occurs + // Side structure for deferring update of java frame locals until deopt occurs GrowableArray* deferred_locals() const { return _deferred_locals_updates; } void set_deferred_locals(GrowableArray* vf) { _deferred_locals_updates = vf; } @@ -1891,6 +1891,8 @@ static bool _vm_complete; #endif + static void initialize_java_lang_classes(JavaThread* main_thread, TRAPS); + static void initialize_jsr292_core_classes(TRAPS); public: // Thread management // force_daemon is a concession to JNI, where we may need to add a diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/unhandledOops.hpp --- a/src/share/vm/runtime/unhandledOops.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/unhandledOops.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -34,7 +34,7 @@ // destructor. The constructor adds the oop address on a list // off each thread and the destructor removes the oop. At a potential // safepoint, the stack addresses of the local variable oops are trashed -// with a recognizeable value. If the local variable is used again, it +// with a recognizable value. If the local variable is used again, it // will segfault, indicating an unsafe use of that oop. // eg: // oop o; //register &o on list diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/vframeArray.hpp --- a/src/share/vm/runtime/vframeArray.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/vframeArray.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -53,7 +53,7 @@ frame _frame; // the interpreter frame we will unpack into int _bci; // raw bci for this vframe - bool _reexecute; // whether sould we reexecute this bytecode + bool _reexecute; // whether we should reexecute this bytecode Method* _method; // the method for this vframe MonitorChunk* _monitors; // active monitors for this vframe StackValueCollection* _locals; @@ -158,7 +158,7 @@ // Tells whether index is within bounds. bool is_within_bounds(int index) const { return 0 <= index && index < frames(); } - // Accessores for instance variable + // Accessories for instance variable int frames() const { return _frames; } static vframeArray* allocate(JavaThread* thread, int frame_size, GrowableArray* chunk, diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/virtualspace.cpp --- a/src/share/vm/runtime/virtualspace.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/virtualspace.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -551,10 +551,10 @@ // Determine which regions need to grow in this expand_by call. // If you are growing in the lower region, high() must be in that - // region so calcuate the size based on high(). For the middle and + // region so calculate the size based on high(). For the middle and // upper regions, determine the starting point of growth based on the // location of high(). By getting the MAX of the region's low address - // (or the prevoius region's high address) and high(), we can tell if it + // (or the previous region's high address) and high(), we can tell if it // is an intra or inter region growth. size_t lower_needs = 0; if (aligned_lower_new_high > lower_high()) { diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/runtime/vm_operations.hpp --- a/src/share/vm/runtime/vm_operations.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/runtime/vm_operations.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -154,7 +154,7 @@ void set_next(VM_Operation *next) { _next = next; } void set_prev(VM_Operation *prev) { _prev = prev; } - // Configuration. Override these appropriatly in subclasses. + // Configuration. Override these appropriately in subclasses. virtual VMOp_Type type() const = 0; virtual Mode evaluation_mode() const { return _safepoint; } virtual bool allow_nested_vm_operations() const { return false; } diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/services/attachListener.cpp --- a/src/share/vm/services/attachListener.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/services/attachListener.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -282,6 +282,20 @@ return JNI_ERR; } } + + if (strncmp(name, "MaxHeapFreeRatio", 17) == 0) { + FormatBuffer<80> err_msg(""); + if (!Arguments::verify_MaxHeapFreeRatio(err_msg, value)) { + out->print_cr(err_msg.buffer()); + return JNI_ERR; + } + } else if (strncmp(name, "MinHeapFreeRatio", 17) == 0) { + FormatBuffer<80> err_msg(""); + if (!Arguments::verify_MinHeapFreeRatio(err_msg, value)) { + out->print_cr(err_msg.buffer()); + return JNI_ERR; + } + } bool res = CommandLineFlags::uintxAtPut((char*)name, &value, Flag::ATTACH_ON_DEMAND); if (! res) { out->print_cr("setting flag %s failed", name); diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/services/diagnosticCommand.cpp --- a/src/share/vm/services/diagnosticCommand.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/services/diagnosticCommand.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "gc_implementation/shared/vmGCOperations.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/os.hpp" #include "services/diagnosticArgument.hpp" #include "services/diagnosticCommand.hpp" #include "services/diagnosticFramework.hpp" @@ -44,6 +45,7 @@ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); @@ -610,8 +612,7 @@ } JMXStartLocalDCmd::JMXStartLocalDCmd(outputStream *output, bool heap_allocated) : - DCmd(output, heap_allocated) -{ + DCmd(output, heap_allocated) { // do nothing } @@ -632,7 +633,6 @@ JavaCalls::call_static(&result, ik, vmSymbols::startLocalAgent_name(), vmSymbols::void_method_signature(), CHECK); } - void JMXStopRemoteDCmd::execute(DCmdSource source, TRAPS) { ResourceMark rm(THREAD); HandleMark hm(THREAD); @@ -650,3 +650,12 @@ JavaCalls::call_static(&result, ik, vmSymbols::stopRemoteAgent_name(), vmSymbols::void_method_signature(), CHECK); } +VMDynamicLibrariesDCmd::VMDynamicLibrariesDCmd(outputStream *output, bool heap_allocated) : + DCmd(output, heap_allocated) { + // do nothing +} + +void VMDynamicLibrariesDCmd::execute(DCmdSource source, TRAPS) { + os::print_dll_info(output()); + output()->cr(); +} diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/services/diagnosticCommand.hpp --- a/src/share/vm/services/diagnosticCommand.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/services/diagnosticCommand.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -132,6 +132,29 @@ virtual void execute(DCmdSource source, TRAPS); }; +class VMDynamicLibrariesDCmd : public DCmd { +public: + VMDynamicLibrariesDCmd(outputStream* output, bool heap); + static const char* name() { + return "VM.dynlibs"; + } + static const char* description() { + return "Print loaded dynamic libraries."; + } + static const char* impact() { + return "Low"; + } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", + "monitor", NULL}; + return p; + } + static int num_arguments() { + return 0; + }; + virtual void execute(DCmdSource source, TRAPS); +}; + class VMUptimeDCmd : public DCmdWithParser { protected: DCmdArgument _date; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/services/management.cpp --- a/src/share/vm/services/management.cpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/services/management.cpp Tue Feb 11 11:26:05 2014 -0800 @@ -1833,6 +1833,18 @@ succeed = CommandLineFlags::intxAtPut(name, &ivalue, Flag::MANAGEMENT); } else if (flag->is_uintx()) { uintx uvalue = (uintx)new_value.j; + + if (strncmp(name, "MaxHeapFreeRatio", 17) == 0) { + FormatBuffer<80> err_msg(""); + if (!Arguments::verify_MaxHeapFreeRatio(err_msg, uvalue)) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), err_msg.buffer()); + } + } else if (strncmp(name, "MinHeapFreeRatio", 17) == 0) { + FormatBuffer<80> err_msg(""); + if (!Arguments::verify_MinHeapFreeRatio(err_msg, uvalue)) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), err_msg.buffer()); + } + } succeed = CommandLineFlags::uintxAtPut(name, &uvalue, Flag::MANAGEMENT); } else if (flag->is_uint64_t()) { uint64_t uvalue = (uint64_t)new_value.j; diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/trace/trace.xml --- a/src/share/vm/trace/trace.xml Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/trace/trace.xml Tue Feb 11 11:26:05 2014 -0800 @@ -122,6 +122,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/trace/tracetypes.xml --- a/src/share/vm/trace/tracetypes.xml Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/trace/tracetypes.xml Tue Feb 11 11:26:05 2014 -0800 @@ -85,12 +85,6 @@ - - - - - @@ -116,17 +110,6 @@ - - - - - - - - - - @@ -167,6 +150,11 @@ + + + + @@ -351,6 +339,10 @@ + + + diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/utilities/dtrace.hpp --- a/src/share/vm/utilities/dtrace.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/utilities/dtrace.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -38,7 +38,10 @@ #define HS_DTRACE_WORKAROUND_TAIL_CALL_BUG() \ do { volatile size_t dtrace_workaround_tail_call_bug = 1; } while (0) -#define USDT1 1 +#define USDT2 1 +#include "dtracefiles/hotspot.h" +#include "dtracefiles/hotspot_jni.h" +#include "dtracefiles/hs_private.h" #elif defined(LINUX) #define HS_DTRACE_WORKAROUND_TAIL_CALL_BUG() #define USDT1 1 diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/utilities/exceptions.hpp --- a/src/share/vm/utilities/exceptions.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/utilities/exceptions.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -200,6 +200,7 @@ #define CHECK_NH CHECK_(Handle()) #define CHECK_NULL CHECK_(NULL) #define CHECK_false CHECK_(false) +#define CHECK_JNI_ERR CHECK_(JNI_ERR) #define CHECK_AND_CLEAR THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return; } (void)(0 #define CHECK_AND_CLEAR_(result) THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return result; } (void)(0 diff -r 4c8bda53850f -r b2fee789d23f src/share/vm/utilities/globalDefinitions.hpp --- a/src/share/vm/utilities/globalDefinitions.hpp Thu Feb 06 13:08:44 2014 -0800 +++ b/src/share/vm/utilities/globalDefinitions.hpp Tue Feb 11 11:26:05 2014 -0800 @@ -149,7 +149,7 @@ // The larger HeapWordSize for 64bit requires larger heaps // for the same application running in 64bit. See bug 4967770. // The minimum alignment to a heap word size is done. Other -// parts of the memory system may required additional alignment +// parts of the memory system may require additional alignment // and are responsible for those alignments. #ifdef _LP64 #define ScaleForWordSize(x) align_size_down_((x) * 13 / 10, HeapWordSize) diff -r 4c8bda53850f -r b2fee789d23f test/TEST.groups --- a/test/TEST.groups Thu Feb 06 13:08:44 2014 -0800 +++ b/test/TEST.groups Tue Feb 11 11:26:05 2014 -0800 @@ -86,7 +86,8 @@ runtime/RedefineObject/TestRedefineObject.java \ runtime/XCheckJniJsig/XCheckJSig.java \ serviceability/attach/AttachWithStalePidFile.java \ - serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java + serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java \ + serviceability/dcmd/DynLibDcmdTest.java # JRE adds further tests to compact3 diff -r 4c8bda53850f -r b2fee789d23f test/compiler/7184394/TestAESMain.java --- a/test/compiler/7184394/TestAESMain.java Thu Feb 06 13:08:44 2014 -0800 +++ b/test/compiler/7184394/TestAESMain.java Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2014 Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,20 +39,32 @@ System.out.println(iters + " iterations"); TestAESEncode etest = new TestAESEncode(); etest.prepare(); + // warm-up for 20K iterations + System.out.println("Starting encryption warm-up"); + for (int i=0; i<20000; i++) { + etest.run(); + } + System.out.println("Finished encryption warm-up"); long start = System.nanoTime(); for (int i=0; i 0) { + System.out.println("FAILED"); + System.exit(97); + } + System.out.println("PASSED"); + } +} diff -r 4c8bda53850f -r b2fee789d23f test/compiler/inlining/DefaultAndConcreteMethodsCHA.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/inlining/DefaultAndConcreteMethodsCHA.java Tue Feb 11 11:26:05 2014 -0800 @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8031695 + * @summary CHA ignores default methods during analysis leading to incorrect code generation + * + * @run main/othervm -Xbatch DefaultAndConcreteMethodsCHA + */ +interface I { + default int m() { return 0; } +} + +class A implements I {} + +class C extends A { } +class D extends A { public int m() { return 1; } } + +public class DefaultAndConcreteMethodsCHA { + public static int test(A obj) { + return obj.m(); + } + public static void main(String[] args) { + for (int i = 0; i < 10000; i++) { + int idC = test(new C()); + if (idC != 0) { + throw new Error("C.m didn't invoke I.m: id "+idC); + } + + int idD = test(new D()); + if (idD != 1) { + throw new Error("D.m didn't invoke D.m: id "+idD); + } + } + + } +} diff -r 4c8bda53850f -r b2fee789d23f test/gc/g1/Test2GbHeap.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/g1/Test2GbHeap.java Tue Feb 11 11:26:05 2014 -0800 @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test Test2GbHeap + * @bug 8031686 + * @summary Regression test to ensure we can start G1 with 2gb heap. + * @key gc + * @key regression + * @library /testlibrary + */ + +import java.util.ArrayList; + +import com.oracle.java.testlibrary.OutputAnalyzer; +import com.oracle.java.testlibrary.ProcessTools; + +public class Test2GbHeap { + public static void main(String[] args) throws Exception { + ArrayList testArguments = new ArrayList(); + + testArguments.add("-XX:+UseG1GC"); + testArguments.add("-Xmx2g"); + testArguments.add("-version"); + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(testArguments.toArray(new String[0])); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + // Avoid failing test for setups not supported. + if (output.getOutput().contains("Could not reserve enough space for 2097152KB object heap")) { + // Will fail on machines with too little memory (and Windows 32-bit VM), ignore such failures. + output.shouldHaveExitValue(1); + } else if (output.getOutput().contains("G1 GC is disabled in this release")) { + // G1 is not supported on embedded, ignore such failures. + output.shouldHaveExitValue(1); + } else { + // Normally everything should be fine. + output.shouldHaveExitValue(0); + } + } +} diff -r 4c8bda53850f -r b2fee789d23f test/gc/g1/TestStringSymbolTableStats.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/g1/TestStringSymbolTableStats.java Tue Feb 11 11:26:05 2014 -0800 @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestStringSymbolTableStats.java + * @bug 8027476 8027455 + * @summary Ensure that the G1TraceStringSymbolTableScrubbing prints the expected message. + * @key gc + * @library /testlibrary + */ + +import com.oracle.java.testlibrary.ProcessTools; +import com.oracle.java.testlibrary.OutputAnalyzer; + +public class TestStringSymbolTableStats { + public static void main(String[] args) throws Exception { + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+G1TraceStringSymbolTableScrubbing", + SystemGCTest.class.getName()); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + System.out.println("Output:\n" + output.getOutput()); + + output.shouldContain("Cleaned string and symbol table"); + output.shouldHaveExitValue(0); + } + + static class SystemGCTest { + public static void main(String [] args) { + System.out.println("Calling System.gc()"); + System.gc(); + } + } +} diff -r 4c8bda53850f -r b2fee789d23f test/runtime/6626217/Test6626217.sh --- a/test/runtime/6626217/Test6626217.sh Thu Feb 06 13:08:44 2014 -0800 +++ b/test/runtime/6626217/Test6626217.sh Tue Feb 11 11:26:05 2014 -0800 @@ -48,6 +48,11 @@ # A Clean Compile: this line will probably fail within jtreg as have a clean dir: ${RM} -f *.class *.impl many_loader.java +# Make sure that the compilation steps occurs in the future as not to allow fast systems +# to copy and compile bug_21227.java so fast as to make the class and java have the same +# time stamp, which later on would make the compilation step of many_loader.java fail +sleep 2 + # Compile all the usual suspects, including the default 'many_loader' ${CP} many_loader1.java.foo many_loader.java ${JAVAC} ${TESTJAVACOPTS} -source 1.4 -target 1.4 -Xlint *.java diff -r 4c8bda53850f -r b2fee789d23f test/runtime/6929067/Test6929067.sh --- a/test/runtime/6929067/Test6929067.sh Thu Feb 06 13:08:44 2014 -0800 +++ b/test/runtime/6929067/Test6929067.sh Tue Feb 11 11:26:05 2014 -0800 @@ -1,15 +1,13 @@ #!/bin/sh ## -## @ignore 8028740 ## @test Test6929067.sh ## @bug 6929067 ## @bug 8021296 ## @summary Stack guard pages should be removed when thread is detached -## @compile T.java ## @run shell Test6929067.sh ## -set -x + if [ "${TESTSRC}" = "" ] then TESTSRC=${PWD} @@ -114,10 +112,8 @@ LD_LIBRARY_PATH=.:${COMPILEJAVA}/jre/lib/${ARCH}/${VMTYPE}:/usr/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH -cp ${TESTSRC}${FS}invoke.c . - -# Copy the result of our @compile action: -cp ${TESTCLASSES}${FS}T.class . +cp ${TESTSRC}${FS}*.java ${THIS_DIR} +${TESTJAVA}${FS}bin${FS}javac *.java echo "Architecture: ${ARCH}" echo "Compilation flag: ${COMP_FLAG}" @@ -127,9 +123,9 @@ # for /usr/lib/`uname -m`-linux-gnu version ensure to add that path to below compilation. $gcc_cmd -DLINUX ${COMP_FLAG} -o invoke \ - -I${COMPILEJAVA}/include -I${COMPILEJAVA}/include/linux \ - -L${COMPILEJAVA}/jre/lib/${ARCH}/${VMTYPE} \ - -ljvm -lpthread invoke.c + -I${TESTJAVA}/include -I${TESTJAVA}/include/linux \ + -L${TESTJAVA}/jre/lib/${ARCH}/${VMTYPE} \ + ${TESTSRC}${FS}invoke.c -ljvm -lpthread ./invoke exit $? diff -r 4c8bda53850f -r b2fee789d23f test/runtime/CommandLine/CompilerConfigFileWarning.java --- a/test/runtime/CommandLine/CompilerConfigFileWarning.java Thu Feb 06 13:08:44 2014 -0800 +++ b/test/runtime/CommandLine/CompilerConfigFileWarning.java Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,8 +33,7 @@ public class CompilerConfigFileWarning { public static void main(String[] args) throws Exception { - String vmVersion = System.getProperty("java.vm.version"); - if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) { + if (Platform.isDebugBuild()) { System.out.println("Skip on debug builds since we'll always read the file there"); return; } diff -r 4c8bda53850f -r b2fee789d23f test/runtime/CommandLine/ConfigFileWarning.java --- a/test/runtime/CommandLine/ConfigFileWarning.java Thu Feb 06 13:08:44 2014 -0800 +++ b/test/runtime/CommandLine/ConfigFileWarning.java Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,8 +33,7 @@ public class ConfigFileWarning { public static void main(String[] args) throws Exception { - String vmVersion = System.getProperty("java.vm.version"); - if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) { + if (Platform.isDebugBuild()) { System.out.println("Skip on debug builds since we'll always read the file there"); return; } diff -r 4c8bda53850f -r b2fee789d23f test/runtime/CommandLine/VMOptionWarning.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/CommandLine/VMOptionWarning.java Tue Feb 11 11:26:05 2014 -0800 @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8027314 + * @summary Warn if diagnostic or experimental vm option is used and -XX:+UnlockDiagnosticVMOptions or -XX:+UnlockExperimentalVMOptions, respectively, isn't specified. Warn if develop or notproduct vm option is used with product version of VM. + * @library /testlibrary + */ + +import com.oracle.java.testlibrary.*; + +public class VMOptionWarning { + public static void main(String[] args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+PredictedLoadedClassCount", "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Error: VM option 'PredictedLoadedClassCount' is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions."); + + if (Platform.isDebugBuild()) { + System.out.println("Skip the rest of the tests on debug builds since diagnostic, develop, and notproduct options are available on debug builds."); + return; + } + + pb = ProcessTools.createJavaProcessBuilder("-XX:+PrintInlining", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Error: VM option 'PrintInlining' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions."); + + pb = ProcessTools.createJavaProcessBuilder("-XX:+TraceJNICalls", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Error: VM option 'TraceJNICalls' is develop and is available only in debug version of VM."); + + pb = ProcessTools.createJavaProcessBuilder("-XX:+TraceJVMCalls", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Error: VM option 'TraceJVMCalls' is notproduct and is available only in debug version of VM."); + } +} diff -r 4c8bda53850f -r b2fee789d23f test/runtime/LoadClass/LoadClassNegative.java --- a/test/runtime/LoadClass/LoadClassNegative.java Thu Feb 06 13:08:44 2014 -0800 +++ b/test/runtime/LoadClass/LoadClassNegative.java Tue Feb 11 11:26:05 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +22,6 @@ */ /* - * @ignore 8028095 * @test * @key regression * @bug 8020675 diff -r 4c8bda53850f -r b2fee789d23f test/runtime/SharedArchiveFile/ArchiveDoesNotExist.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/SharedArchiveFile/ArchiveDoesNotExist.java Tue Feb 11 11:26:05 2014 -0800 @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test ArchiveDoesNotExist + * @summary Test how VM handles "file does not exist" situation while + * attempting to use CDS archive. JVM should exit gracefully + * when sharing mode is ON, and continue w/o sharing if sharing + * mode is AUTO. + * @library /testlibrary + * @run main ArchiveDoesNotExist + */ + +import com.oracle.java.testlibrary.*; +import java.io.File; + +public class ArchiveDoesNotExist { + public static void main(String[] args) throws Exception { + String fileName = "test.jsa"; + + File cdsFile = new File(fileName); + if (cdsFile.exists()) + throw new RuntimeException("Test error: cds file already exists"); + + // Sharing: on + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./" + fileName, + "-Xshare:on", + "-version"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Specified shared archive not found"); + output.shouldHaveExitValue(1); + + // Sharing: auto + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./" + fileName, + "-Xshare:auto", + "-version"); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("java version"); + output.shouldNotContain("sharing"); + output.shouldHaveExitValue(0); + } +} diff -r 4c8bda53850f -r b2fee789d23f test/runtime/SharedArchiveFile/CdsWriteError.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/SharedArchiveFile/CdsWriteError.java Tue Feb 11 11:26:05 2014 -0800 @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test CdsWriteError + * @summary Test how VM handles situation when it is impossible to write the + * CDS archive. VM is expected to exit gracefully and display the + * correct reason for the error. + * @library /testlibrary + * @run main CdsWriteError + */ + +import com.oracle.java.testlibrary.*; +import java.io.File; + +public class CdsWriteError { + public static void main(String[] args) throws Exception { + + if (Platform.isWindows()) { + System.out.println("This test is ignored on Windows. This test " + + "manipulates folder writable attribute, which is known to be " + + "often ignored by Windows"); + + return; + } + + String folderName = "tmp"; + String fileName = folderName + File.separator + "empty.jsa"; + + // create an empty archive file and make it read only + File folder = new File(folderName); + if (!folder.mkdir()) + throw new RuntimeException("Error when creating a tmp folder"); + + File cdsFile = new File(fileName); + if (!cdsFile.createNewFile()) + throw new RuntimeException("Error when creating an empty CDS file"); + if (!cdsFile.setWritable(false)) + throw new RuntimeException("Error: could not set writable attribute on cds file"); + if (!folder.setWritable(false)) + throw new RuntimeException("Error: could not set writable attribute on the cds folder"); + + try { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./" + fileName, "-Xshare:dump"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Unable to create shared archive file"); + output.shouldHaveExitValue(1); + } finally { + // doing this, just in case, to make sure that files can be deleted by the harness + // on any subsequent run + folder.setWritable(true); + cdsFile.setWritable(true); + } + } +} + diff -r 4c8bda53850f -r b2fee789d23f test/runtime/SharedArchiveFile/DefaultUseWithClient.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/SharedArchiveFile/DefaultUseWithClient.java Tue Feb 11 11:26:05 2014 -0800 @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test DefaultUseWithClient + * @summary Test default behavior of sharing with -client + * @library /testlibrary + * @run main DefaultUseWithClient + */ + +import com.oracle.java.testlibrary.*; +import java.io.File; + +public class DefaultUseWithClient { + public static void main(String[] args) throws Exception { + String fileName = "test.jsa"; + + // On 32-bit windows CDS should be on by default in "-client" config + // Skip this test on any other platform + boolean is32BitWindows = (Platform.isWindows() && Platform.is32bit()); + if (!is32BitWindows) { + System.out.println("Test only applicable on 32-bit Windows. Skipping"); + return; + } + + // create the archive + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./" + fileName, + "-Xshare:dump"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./" + fileName, + "-client", + "-version"); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("sharing"); + output.shouldHaveExitValue(0); + } +} diff -r 4c8bda53850f -r b2fee789d23f test/runtime/finalStatic/FinalStatic.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/finalStatic/FinalStatic.java Tue Feb 11 11:26:05 2014 -0800 @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8028553 + * @summary Test that VerifyError is not thrown when 'overriding' a static method. + * @run main FinalStatic + */ + +import java.lang.reflect.*; +import jdk.internal.org.objectweb.asm.ClassWriter; +import jdk.internal.org.objectweb.asm.MethodVisitor; +import jdk.internal.org.objectweb.asm.Opcodes; + +/* + * class A { static final int m() {return FAIL; } } + * class B extends A { int m() { return PASS; } } + * class FinalStatic { + * public static void main () { + * Object b = new B(); + * b.m(); + * } + * } + */ +public class FinalStatic { + + static final String CLASS_NAME_A = "A"; + static final String CLASS_NAME_B = "B"; + static final int FAILED = 0; + static final int EXPECTED = 1234; + + static class TestClassLoader extends ClassLoader implements Opcodes { + + @Override + public Class findClass(String name) throws ClassNotFoundException { + byte[] b; + try { + b = loadClassData(name); + } catch (Throwable th) { + // th.printStackTrace(); + throw new ClassNotFoundException("Loading error", th); + } + return defineClass(name, b, 0, b.length); + } + + private byte[] loadClassData(String name) throws Exception { + ClassWriter cw = new ClassWriter(0); + MethodVisitor mv; + switch (name) { + case CLASS_NAME_A: + cw.visit(52, ACC_SUPER | ACC_PUBLIC, CLASS_NAME_A, null, "java/lang/Object", null); + { + mv = cw.visitMethod(ACC_PUBLIC, "", "()V", null, null); + mv.visitCode(); + mv.visitVarInsn(ALOAD, 0); + mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "", "()V"); + mv.visitInsn(RETURN); + mv.visitMaxs(1, 1); + mv.visitEnd(); + + mv = cw.visitMethod(ACC_FINAL | ACC_STATIC, "m", "()I", null, null); + mv.visitCode(); + mv.visitLdcInsn(FAILED); + mv.visitInsn(IRETURN); + mv.visitMaxs(1, 1); + mv.visitEnd(); + } + break; + case CLASS_NAME_B: + cw.visit(52, ACC_SUPER | ACC_PUBLIC, CLASS_NAME_B, null, CLASS_NAME_A, null); + { + mv = cw.visitMethod(ACC_PUBLIC, "", "()V", null, null); + mv.visitCode(); + mv.visitVarInsn(ALOAD, 0); + mv.visitMethodInsn(INVOKESPECIAL, CLASS_NAME_A, "", "()V"); + mv.visitInsn(RETURN); + mv.visitMaxs(1, 1); + mv.visitEnd(); + + mv = cw.visitMethod(ACC_PUBLIC, "m", "()I", null, null); + mv.visitCode(); + mv.visitLdcInsn(EXPECTED); + mv.visitInsn(IRETURN); + mv.visitMaxs(1, 1); + mv.visitEnd(); + + } + break; + default: + break; + } + cw.visitEnd(); + + return cw.toByteArray(); + } + } + + public static void main(String[] args) throws Exception { + TestClassLoader tcl = new TestClassLoader(); + Class a = tcl.loadClass(CLASS_NAME_A); + Class b = tcl.loadClass(CLASS_NAME_B); + Object inst = b.newInstance(); + Method[] meths = b.getDeclaredMethods(); + + Method m = meths[0]; + int mod = m.getModifiers(); + if ((mod & Modifier.FINAL) != 0) { + throw new Exception("FAILED: " + m + " is FINAL"); + } + if ((mod & Modifier.STATIC) != 0) { + throw new Exception("FAILED: " + m + " is STATIC"); + } + + m.setAccessible(true); + if (!m.invoke(inst).equals(EXPECTED)) { + throw new Exception("FAILED: " + EXPECTED + " from " + m); + } + + System.out.println("Passed."); + } +} diff -r 4c8bda53850f -r b2fee789d23f test/runtime/jsig/Test8017498.sh --- a/test/runtime/jsig/Test8017498.sh Thu Feb 06 13:08:44 2014 -0800 +++ b/test/runtime/jsig/Test8017498.sh Tue Feb 11 11:26:05 2014 -0800 @@ -1,7 +1,7 @@ #!/bin/sh # -# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ ## @bug 8017498 ## @bug 8020791 ## @bug 8021296 +## @bug 8022301 ## @summary sigaction(sig) results in process hang/timed-out if sig is much greater than SIGRTMAX ## @run shell/timeout=30 Test8017498.sh ## @@ -42,6 +43,8 @@ ## Adding common setup Variables for running shell tests. . ${TESTSRC}/../../test_env.sh +EXTRA_CFLAG= + # set platform-dependent variables OS=`uname -s` case "$OS" in @@ -57,6 +60,7 @@ MY_LD_PRELOAD=${TESTJAVA}${FS}jre${FS}lib${FS}amd64${FS}libjsig.so else MY_LD_PRELOAD=${TESTJAVA}${FS}jre${FS}lib${FS}i386${FS}libjsig.so + EXTRA_CFLAG=-m32 fi echo MY_LD_PRELOAD = ${MY_LD_PRELOAD} ;; @@ -72,6 +76,7 @@ ${TESTJAVA}${FS}bin${FS}javac *.java $gcc_cmd -DLINUX -fPIC -shared \ + ${EXTRA_CFLAG} -z noexecstack \ -o ${TESTSRC}${FS}libTestJNI.so \ -I${TESTJAVA}${FS}include \ -I${TESTJAVA}${FS}include${FS}linux \ diff -r 4c8bda53850f -r b2fee789d23f test/runtime/lambda-features/InvokespecialInterface.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/lambda-features/InvokespecialInterface.java Tue Feb 11 11:26:05 2014 -0800 @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test + * @bug 8032024 + * @bug 8025937 + * @summary [JDK 8] Test invokespecial and invokeinterface with the same JVM_CONSTANT_InterfaceMethodref + * @run main InvokespecialInterface + */ +import java.util.function.*; +import java.util.*; + +interface I { + default void imethod() { System.out.println("I::imethod"); } +} + +class C implements I { + public void foo() { I.super.imethod(); } // invokespecial InterfaceMethod + public void bar() { I i = this; i.imethod(); } // invokeinterface same + public void doSomeInvokedynamic() { + String str = "world"; + Supplier foo = ()->"hello, "+str; + String res = foo.get(); + System.out.println(res); + } +} + +public class InvokespecialInterface { + public static void main(java.lang.String[] unused) { + // need to create C and call I::foo() + C c = new C(); + c.foo(); + c.bar(); + c.doSomeInvokedynamic(); + } +}; + + diff -r 4c8bda53850f -r b2fee789d23f test/runtime/memory/ReadFromNoaccessArea.java --- a/test/runtime/memory/ReadFromNoaccessArea.java Thu Feb 06 13:08:44 2014 -0800 +++ b/test/runtime/memory/ReadFromNoaccessArea.java Tue Feb 11 11:26:05 2014 -0800 @@ -22,7 +22,6 @@ */ /* - * @ignore 8028398 * @test * @summary Test that touching noaccess area in class ReservedHeapSpace results in SIGSEGV/ACCESS_VIOLATION * @library /testlibrary /testlibrary/whitebox diff -r 4c8bda53850f -r b2fee789d23f test/serviceability/dcmd/DcmdUtil.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/serviceability/dcmd/DcmdUtil.java Tue Feb 11 11:26:05 2014 -0800 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import sun.management.ManagementFactoryHelper; + +import com.sun.management.DiagnosticCommandMBean; + +public class DcmdUtil +{ + public static String executeDcmd(String cmd, String ... args) { + DiagnosticCommandMBean dcmd = ManagementFactoryHelper.getDiagnosticCommandMBean(); + Object[] dcmdArgs = {args}; + String[] signature = {String[].class.getName()}; + + try { + System.out.print("> " + cmd + " "); + for (String s : args) { + System.out.print(s + " "); + } + System.out.println(":"); + String result = (String) dcmd.invoke(transform(cmd), dcmdArgs, signature); + System.out.println(result); + return result; + } catch(Exception ex) { + ex.printStackTrace(); + } + return null; + } + + private static String transform(String name) { + StringBuilder sb = new StringBuilder(); + boolean toLower = true; + boolean toUpper = false; + for (int i = 0; i < name.length(); i++) { + char c = name.charAt(i); + if (c == '.' || c == '_') { + toLower = false; + toUpper = true; + } else { + if (toUpper) { + toUpper = false; + sb.append(Character.toUpperCase(c)); + } else if(toLower) { + sb.append(Character.toLowerCase(c)); + } else { + sb.append(c); + } + } + } + return sb.toString(); + } + +} diff -r 4c8bda53850f -r b2fee789d23f test/serviceability/dcmd/DynLibDcmdTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/serviceability/dcmd/DynLibDcmdTest.java Tue Feb 11 11:26:05 2014 -0800 @@ -0,0 +1,67 @@ +import java.util.HashSet; +import java.util.Set; +import com.oracle.java.testlibrary.Platform; + +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test of VM.dynlib diagnostic command via MBean + * @library /testlibrary + * @compile DcmdUtil.java + * @run main DynLibDcmdTest + */ + +public class DynLibDcmdTest { + + public static void main(String[] args) throws Exception { + String result = DcmdUtil.executeDcmd("VM.dynlibs"); + + String osDependentBaseString = null; + if (Platform.isSolaris()) { + osDependentBaseString = "lib%s.so"; + } else if (Platform.isWindows()) { + osDependentBaseString = "%s.dll"; + } else if (Platform.isOSX()) { + osDependentBaseString = "lib%s.dylib"; + } else if (Platform.isLinux()) { + osDependentBaseString = "lib%s.so"; + } + + if (osDependentBaseString == null) { + throw new Exception("Unsupported OS"); + } + + Set expectedContent = new HashSet<>(); + expectedContent.add(String.format(osDependentBaseString, "jvm")); + expectedContent.add(String.format(osDependentBaseString, "java")); + expectedContent.add(String.format(osDependentBaseString, "management")); + + for(String expected : expectedContent) { + if (!result.contains(expected)) { + throw new Exception("Dynamic library list output did not contain the expected string: '" + expected + "'"); + } + } + } +} diff -r 4c8bda53850f -r b2fee789d23f test/serviceability/sa/jmap-hashcode/Test8028623.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/serviceability/sa/jmap-hashcode/Test8028623.java Tue Feb 11 11:26:05 2014 -0800 @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8028623 + * @summary Test hashing of extended characters in Serviceability Agent. + * @library /testlibrary + * @compile -encoding utf8 Test8028623.java + * @run main Test8028623 + */ + +import com.oracle.java.testlibrary.JDKToolLauncher; +import com.oracle.java.testlibrary.OutputBuffer; +import com.oracle.java.testlibrary.ProcessTools; + +import java.io.File; + +public class Test8028623 { + + public static int à = 1; + public static String dumpFile = "heap.out"; + + public static void main (String[] args) { + + System.out.println(Ã); + + try { + int pid = ProcessTools.getProcessId(); + JDKToolLauncher jmap = JDKToolLauncher.create("jmap") + .addToolArg("-F") + .addToolArg("-dump:live,format=b,file=" + dumpFile) + .addToolArg(Integer.toString(pid)); + ProcessBuilder pb = new ProcessBuilder(jmap.getCommand()); + OutputBuffer output = ProcessTools.getOutput(pb); + Process p = pb.start(); + int e = p.waitFor(); + System.out.println("stdout:"); + System.out.println(output.getStdout()); + System.out.println("stderr:"); + System.out.println(output.getStderr()); + + if (e != 0) { + throw new RuntimeException("jmap returns: " + e); + } + if (! new File(dumpFile).exists()) { + throw new RuntimeException("dump file NOT created: '" + dumpFile + "'"); + } + } catch (Throwable t) { + t.printStackTrace(); + throw new RuntimeException("Test failed with: " + t); + } + } +}