changeset 14460:8a9bb7821e28

Merge
author kvn
date Wed, 19 Feb 2014 12:08:49 -0800
parents e8ef156f0bc9 (diff) 194e8b7fe9ca (current diff)
children a13badbb8b8e
files make/Makefile make/linux/makefiles/buildtree.make src/os/bsd/vm/os_bsd.cpp src/os/linux/vm/os_linux.cpp src/os/posix/vm/os_posix.cpp src/os/solaris/vm/os_solaris.cpp src/share/vm/compiler/compileBroker.cpp src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp src/share/vm/memory/allocation.hpp src/share/vm/memory/barrierSet.hpp src/share/vm/memory/cardTableModRefBS.cpp src/share/vm/memory/modRefBarrierSet.hpp src/share/vm/memory/space.hpp src/share/vm/memory/universe.cpp src/share/vm/oops/method.hpp src/share/vm/opto/runtime.cpp src/share/vm/prims/jvm.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/deoptimization.cpp src/share/vm/runtime/frame.cpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/mutexLocker.hpp src/share/vm/runtime/objectMonitor.cpp src/share/vm/runtime/os.cpp src/share/vm/runtime/os.hpp src/share/vm/runtime/sharedRuntime.cpp src/share/vm/runtime/sharedRuntime.hpp src/share/vm/runtime/sharedRuntimeTrans.cpp src/share/vm/runtime/sharedRuntimeTrig.cpp src/share/vm/runtime/synchronizer.cpp src/share/vm/runtime/thread.cpp src/share/vm/runtime/thread.hpp src/share/vm/runtime/virtualspace.cpp src/share/vm/utilities/bitMap.cpp src/share/vm/utilities/globalDefinitions.hpp
diffstat 286 files changed, 4489 insertions(+), 3317 deletions(-) [+]
line wrap: on
line diff
--- a/agent/make/mkinstall	Thu Jan 30 14:30:01 2014 +0100
+++ b/agent/make/mkinstall	Wed Feb 19 12:08:49 2014 -0800
@@ -27,7 +27,9 @@
 
 cp ../src/os/solaris/proc/amd64/libsaproc.so $SA_NAME/solaris/amd64
 cp ../src/os/solaris/proc/sparc/libsaproc.so $SA_NAME/solaris/sparc
+cp ../src/os/solaris/proc/sparc/libsaproc_audit.so $SA_NAME/solaris/sparc
 cp ../src/os/solaris/proc/sparcv9/libsaproc.so $SA_NAME/solaris/sparcv9
+cp ../src/os/solaris/proc/sparcv9/libsaproc_audit.so $SA_NAME/solaris/sparcv9
 cp ../src/os/solaris/proc/i386/libsaproc.so $SA_NAME/solaris/i386
 cp ../src/os/linux/i386/libsaproc.so $SA_NAME/linux/i386
 cp ../src/os/linux/ia64/libsaproc.so $SA_NAME/linux/ia64
--- a/agent/make/saenv.sh	Thu Jan 30 14:30:01 2014 +0100
+++ b/agent/make/saenv.sh	Wed Feb 19 12:08:49 2014 -0800
@@ -48,16 +48,17 @@
      CPU=i386
    fi
 else
-   # configure audit helper library if SA_ALTROOT is set
-   if [ -n "$SA_ALTROOT" ]; then
-     LD_AUDIT_32=$STARTDIR/../src/os/solaris/proc/`uname -p`/libsaproc_audit.so
-     export LD_AUDIT_32
-     if [ ! -f $LD_AUDIT_32 ]; then
-       echo "SA_ALTROOT is set and can't find libsaproc_audit.so."
-       echo "Make sure to build it with 'make natives'."
-       exit 1
-     fi
+   # configure audit helper library for solaris
+   LD_AUDIT_32=$STARTDIR/../src/os/solaris/proc/`uname -p`/libsaproc_audit.so
+   if [ ! -f $LD_AUDIT_32 ]; then
+     LD_AUDIT_32=$STARTDIR/solaris/`uname -p`/libsaproc_audit.so
+   fi  
+   if [ ! -f $LD_AUDIT_32 ]; then
+      echo "Can't find libsaproc_audit.so."
+      echo "Make sure to build it with 'make natives'."
+      exit 1
    fi
+   export LD_AUDIT_32
    SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/`uname -p`:$STARTDIR/solaris/`uname -p`
    OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger"
    CPU=sparc
--- a/agent/make/saenv64.sh	Thu Jan 30 14:30:01 2014 +0100
+++ b/agent/make/saenv64.sh	Wed Feb 19 12:08:49 2014 -0800
@@ -43,16 +43,19 @@
   fi
 fi
 
-# configure audit helper library if SA_ALTROOT is set
-if [ -n "$SA_ALTROOT" ]; then
-  LD_AUDIT_64=$STARTDIR/../src/os/solaris/proc/$CPU/libsaproc_audit.so
-  export LD_AUDIT_64
-  if [ ! -f $LD_AUDIT_64 ]; then
-      echo "SA_ALTROOT is set and can't find libsaproc_audit.so."
-      echo "Make sure to build it with 'make natives'."
-      exit 1
-  fi
+# configure audit helper library
+LD_AUDIT_64=$STARTDIR/../src/os/solaris/proc/$CPU/libsaproc_audit.so
+if [ ! -f $LD_AUDIT_64 ]; then
+  LD_AUDIT_64=$STARTDIR/solaris/$CPU/libsaproc_audit.so
 fi
+
+if [ ! -f $LD_AUDIT_64 ]; then
+   echo "Can't find libsaproc_audit.so."
+   echo "Make sure to build it with 'make natives'."
+   exit 1
+fi
+
+export LD_AUDIT_64
 SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/$CPU:$STARTDIR/solaris/$CPU
 
 OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger"
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Thu Jan 30 14:30:01 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -152,7 +152,7 @@
 
   private long indexOffset(long index) {
     if (Assert.ASSERTS_ENABLED) {
-      Assert.that(index > 0 && index < getLength(),  "invalid cp index " + index + " " + getLength());
+      Assert.that(index >= 0 && index < getLength(),  "invalid cp index " + index + " " + getLength());
     }
     return (index * getElementSize()) + headerSize;
   }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java	Thu Jan 30 14:30:01 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java	Wed Feb 19 12:08:49 2014 -0800
@@ -103,11 +103,12 @@
       }
 
       SystemDictionary dict = VM.getVM().getSystemDictionary();
-      dict.classesDo(new SystemDictionary.ClassAndLoaderVisitor() {
-                        public void visit(Klass k, Oop loader) {
+      dict.classesDo(new SystemDictionary.ClassVisitor() {
+                        public void visit(Klass k) {
                            if (! (k instanceof InstanceKlass)) {
                               return;
                            }
+                           Oop loader = ((InstanceKlass) k).getClassLoader();
                            LoaderData ld = (loader != null) ? (LoaderData)loaderMap.get(loader)
                                                             : bootstrapLoaderData;
                            if (ld != null) {
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java	Thu Jan 30 14:30:01 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -98,11 +98,14 @@
            break;
        default: throw new IllegalArgumentException();
        }
+
        if (cpCache == null) {
           return (short) cpCacheIndex;
        } else if (fmt.indexOf("JJJJ") >= 0) {
-          // change byte-ordering and go via secondary cache entry
-           throw new InternalError("unimplemented");
+          // Invokedynamic require special handling
+          cpCacheIndex = ~cpCacheIndex;
+          cpCacheIndex = bytes.swapInt(cpCacheIndex);
+          return (short) cpCache.getEntryAt(cpCacheIndex).getConstantPoolIndex();
        } else if (fmt.indexOf("JJ") >= 0) {
           // change byte-ordering and go via cache
           return (short) cpCache.getEntryAt((int) (0xFFFF & bytes.swapShort((short)cpCacheIndex))).getConstantPoolIndex();
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java	Thu Jan 30 14:30:01 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,8 +61,9 @@
     long h = 0;
     int s = 0;
     int len = buf.length;
+    // Emulate the unsigned int in java_lang_String::hash_code
     while (len-- > 0) {
-      h = 31*h + (0xFFL & buf[s]);
+      h = 31*h + (0xFFFFFFFFL & buf[s]);
       s++;
     }
     return h & 0xFFFFFFFFL;
--- a/make/Makefile	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/Makefile	Wed Feb 19 12:08:49 2014 -0800
@@ -607,11 +607,11 @@
 
 
 # Testing the built JVM
-RUN_JVM=JAVA_HOME=$(JDK_IMPORT_PATH) $(JDK_IMPORT_PATH)/bin/java -d$(ARCH_DATA_MODEL) -Dsun.java.launcher=gamma
+RUN_JVM=JAVA_HOME=$(JDK_IMPORT_PATH) $(JDK_IMPORT_PATH)/bin/java -d$(ARCH_DATA_MODEL) -XXaltjvm=$(ALTJVM_DIR) -Dsun.java.launcher.is_altjvm=true
 generic_test:
 	@$(ECHO) "Running with: $(ALTJVM_DIR)"
-	@$(RUN_JVM) -XXaltjvm=$(ALTJVM_DIR) -Xinternalversion
-	@$(RUN_JVM) -XXaltjvm=$(ALTJVM_DIR) -showversion -help
+	@$(RUN_JVM) -Xinternalversion
+	@$(RUN_JVM) -showversion -help
 
 # C2 test targets
 test_product test_optimized test_fastdebug test_debug:
--- a/make/bsd/makefiles/dtrace.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/bsd/makefiles/dtrace.make	Wed Feb 19 12:08:49 2014 -0800
@@ -53,6 +53,7 @@
 GENOFFS = generate$(JVMOFFS)
 
 DTRACE_SRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/dtrace
+DTRACE_COMMON_SRCDIR = $(GAMMADIR)/src/os/posix/dtrace
 DTRACE = dtrace
 DTRACE.o = $(DTRACE).o
 
@@ -68,11 +69,9 @@
 
 # Use mapfile with libjvm_db.so
 LIBJVM_DB_MAPFILE = # no mapfile for usdt2 # $(MAKEFILES_DIR)/mapfile-vers-jvm_db
-#LFLAGS_JVM_DB += $(MAPFLAG:FILENAME=$(LIBJVM_DB_MAPFILE))
 
 # Use mapfile with libjvm_dtrace.so
 LIBJVM_DTRACE_MAPFILE = # no mapfile for usdt2 # $(MAKEFILES_DIR)/mapfile-vers-jvm_dtrace
-#LFLAGS_JVM_DTRACE += $(MAPFLAG:FILENAME=$(LIBJVM_DTRACE_MAPFILE))
 
 LFLAGS_JVM_DB += $(PICFLAG) # -D_REENTRANT
 LFLAGS_JVM_DTRACE += $(PICFLAG) # -D_REENTRANT
@@ -260,116 +259,38 @@
   endif
 endif
 
-#$(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \
-#             $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
-#	$(QUIETLY) cat $^ > $@
 
 $(DtraceOutDir):
 	mkdir $(DtraceOutDir)
 
-$(DtraceOutDir)/hotspot.h: $(DTRACE_SRCDIR)/hotspot.d | $(DtraceOutDir)
-	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_SRCDIR)/hotspot.d
-
-$(DtraceOutDir)/hotspot_jni.h: $(DTRACE_SRCDIR)/hotspot_jni.d | $(DtraceOutDir)
-	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_SRCDIR)/hotspot_jni.d
+$(DtraceOutDir)/hotspot.h: $(DTRACE_COMMON_SRCDIR)/hotspot.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot.d
 
-$(DtraceOutDir)/hs_private.h: $(DTRACE_SRCDIR)/hs_private.d | $(DtraceOutDir)
-	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_SRCDIR)/hs_private.d
+$(DtraceOutDir)/hotspot_jni.h: $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d
 
-$(DtraceOutDir)/jhelper.h: $(DTRACE_SRCDIR)/jhelper.d $(JVMOFFS).o | $(DtraceOutDir)
-	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_SRCDIR)/jhelper.d
+$(DtraceOutDir)/hs_private.h: $(DTRACE_COMMON_SRCDIR)/hs_private.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hs_private.d
 
-# jhelper currently disabled
 dtrace_gen_headers: $(DtraceOutDir)/hotspot.h $(DtraceOutDir)/hotspot_jni.h $(DtraceOutDir)/hs_private.h 
 
-DTraced_Files = ciEnv.o \
-                classLoadingService.o \
-                compileBroker.o \
-                hashtable.o \
-                instanceKlass.o \
-                java.o \
-                jni.o \
-                jvm.o \
-                memoryManager.o \
-                nmethod.o \
-                objectMonitor.o \
-                runtimeService.o \
-                sharedRuntime.o \
-                synchronizer.o \
-                thread.o \
-                unsafe.o \
-                vmThread.o \
-                vmCMSOperations.o \
-                vmPSOperations.o \
-                vmGCOperations.o \
-
-# Dtrace is available, so we build $(DTRACE.o)  
-#$(DTRACE.o): $(DTRACE).d $(JVMOFFS).h $(JVMOFFS)Index.h $(DTraced_Files)
-#	@echo Compiling $(DTRACE).d
-
-#	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -xlazyload -o $@ -s $(DTRACE).d \
-#     $(DTraced_Files) ||\
-#  STATUS=$$?;\
-#	if [ x"$$STATUS" = x"1" -a \
-#       x`uname -r` = x"5.10" -a \
-#       x`uname -p` = x"sparc" ]; then\
-#    echo "*****************************************************************";\
-#    echo "* If you are building server compiler, and the error message is ";\
-#    echo "* \"incorrect ELF machine type...\", you have run into solaris bug ";\
-#    echo "* 6213962, \"dtrace -G doesn't work on sparcv8+ object files\".";\
-#    echo "* Either patch/upgrade your system (>= S10u1_15), or set the ";\
-#    echo "* environment variable HOTSPOT_DISABLE_DTRACE_PROBES to disable ";\
-#    echo "* dtrace probes for this build.";\
-#    echo "*****************************************************************";\
-#  fi;\
-#  exit $$STATUS
-  # Since some DTraced_Files are in LIBJVM.o and they are touched by this
-  # command, and libgenerateJvmOffsets.so depends on LIBJVM.o, 'make' will
-  # think it needs to rebuild libgenerateJvmOffsets.so and thus JvmOffsets*
-  # files, but it doesn't, so we touch the necessary files to prevent later
-  # recompilation. Note: we only touch the necessary files if they already
-  # exist in order to close a race where an empty file can be created
-  # before the real build rule is executed.
-  # But, we can't touch the *.h files:  This rule depends
-  # on them, and that would cause an infinite cycle of rebuilding.
-  # Neither the *.h or *.ccp files need to be touched, since they have
-  # rules which do not update them when the generator file has not
-  # changed their contents.
-#	$(QUIETLY) if [ -f lib$(GENOFFS).so ]; then touch lib$(GENOFFS).so; fi
-#	$(QUIETLY) if [ -f $(GENOFFS) ]; then touch $(GENOFFS); fi
-#	$(QUIETLY) if [ -f $(JVMOFFS.o) ]; then touch $(JVMOFFS.o); fi
 
 .PHONY: dtraceCheck
 
-#SYSTEM_DTRACE_H = /usr/include/dtrace.h
 SYSTEM_DTRACE_PROG = /usr/sbin/dtrace
-#PATCH_DTRACE_PROG = /opt/SUNWdtrd/sbin/dtrace
 systemDtraceFound := $(wildcard ${SYSTEM_DTRACE_PROG})
-#patchDtraceFound := $(wildcard ${PATCH_DTRACE_PROG})
-#systemDtraceHdrFound := $(wildcard $(SYSTEM_DTRACE_H))
 
-#ifneq ("$(systemDtraceHdrFound)", "") 
-#CFLAGS += -DHAVE_DTRACE_H
-#endif
-
-#ifneq ("$(patchDtraceFound)", "")
-#DTRACE_PROG=$(PATCH_DTRACE_PROG)
-#DTRACE_INCL=-I/opt/SUNWdtrd/include
-#else
 ifneq ("$(systemDtraceFound)", "")
 DTRACE_PROG=$(SYSTEM_DTRACE_PROG)
 else
 
-endif # ifneq ("$(systemDtraceFound)", "")
-#endif # ifneq ("$(patchDtraceFound)", "")
+endif
 
 ifneq ("${DTRACE_PROG}", "")
 ifeq ("${HOTSPOT_DISABLE_DTRACE_PROBES}", "")
 
 DTRACE_OBJS = $(DTRACE.o) #$(JVMOFFS.o)
 CFLAGS += -DDTRACE_ENABLED #$(DTRACE_INCL)
-#clangCFLAGS += -DDTRACE_ENABLED -fno-optimize-sibling-calls
-#MAPFILE_DTRACE_OPT = $(MAPFILE_DTRACE)
 
 
 dtraceCheck:
--- a/make/bsd/makefiles/jsig.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/bsd/makefiles/jsig.make	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -91,13 +91,13 @@
 install_jsig: $(LIBJSIG)
 	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
 ifeq ($(OS_VENDOR), Darwin)
-	$(QUIETLY) test -d $(LIBJSIG_DEBUGINFO) && \
+	$(QUIETLY) test ! -d $(LIBJSIG_DEBUGINFO) || \
 	    cp -f -r $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
 else
-	$(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \
+	$(QUIETLY) test ! -f $(LIBJSIG_DEBUGINFO) || \
 	    cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
 endif
-	$(QUIETLY) test -f $(LIBJSIG_DIZ) && \
+	$(QUIETLY) test ! -f $(LIBJSIG_DIZ) || \
 	    cp -f $(LIBJSIG_DIZ) $(DEST_JSIG_DIZ)
 	$(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done"
 
--- a/make/bsd/makefiles/optimized.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/bsd/makefiles/optimized.make	Wed Feb 19 12:08:49 2014 -0800
@@ -22,7 +22,7 @@
 #  
 #
 
-# Sets make macros for making optimized version of Gamma VM
+# Sets make macros for making optimized version of HotSpot VM
 # (This is the "product", not the "release" version.)
 
 # Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
--- a/make/bsd/makefiles/product.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/bsd/makefiles/product.make	Wed Feb 19 12:08:49 2014 -0800
@@ -22,7 +22,7 @@
 #  
 #
 
-# Sets make macros for making optimized version of Gamma VM
+# Sets make macros for making optimized version of HotSpot VM
 # (This is the "product", not the "release" version.)
 
 # Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
--- a/make/bsd/makefiles/saproc.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/bsd/makefiles/saproc.make	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -153,13 +153,13 @@
 install_saproc: $(BUILDLIBSAPROC)
 	@echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)"
 ifeq ($(OS_VENDOR), Darwin)
-	$(QUIETLY) test -d $(LIBSAPROC_DEBUGINFO) && \
+	$(QUIETLY) test ! -d $(LIBSAPROC_DEBUGINFO) || \
 	    cp -f -r $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO)
 else
-	$(QUIETLY) test -f $(LIBSAPROC_DEBUGINFO) && \
+	$(QUIETLY) test ! -f $(LIBSAPROC_DEBUGINFO) || \
 	    cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO)
 endif
-	$(QUIETLY) test -f $(LIBSAPROC_DIZ) && \
+	$(QUIETLY) test ! -f $(LIBSAPROC_DIZ) || \
 	    cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ)
 	$(QUIETLY) cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done"
 
--- a/make/bsd/makefiles/top.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/bsd/makefiles/top.make	Wed Feb 19 12:08:49 2014 -0800
@@ -128,7 +128,7 @@
 	@$(UpdatePCH)
 	@$(MAKE) -f vm.make $(MFLAGS-adjusted)
 
-install gamma: the_vm
+install : the_vm
 	@$(MAKE) -f vm.make $@
 
 # next rules support "make foo.[ois]"
--- a/make/bsd/makefiles/vm.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/bsd/makefiles/vm.make	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -367,13 +367,13 @@
 install_jvm: $(LIBJVM)
 	@echo "Copying $(LIBJVM) to $(DEST_JVM)"
 ifeq ($(OS_VENDOR), Darwin)
-	$(QUIETLY) test -d $(LIBJVM_DEBUGINFO) && \
+	$(QUIETLY) test ! -d $(LIBJVM_DEBUGINFO) || \
 	    cp -f -r $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
 else
-	$(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \
+	$(QUIETLY) test ! -f $(LIBJVM_DEBUGINFO) || \
 	    cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
 endif
-	$(QUIETLY) test -f $(LIBJVM_DIZ) && \
+	$(QUIETLY) test ! -f $(LIBJVM_DIZ) || \
 	    cp -f $(LIBJVM_DIZ) $(DEST_JVM_DIZ)
 	$(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done"
 
--- a/make/hotspot.script	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/hotspot.script	Wed Feb 19 12:08:49 2014 -0800
@@ -49,7 +49,7 @@
     GDB=gdb
 fi
 
-# This is the name of the gdb binary to use
+# This is the name of the dbx binary to use
 if [ ! "$DBX" ]
 then
     DBX=dbx
@@ -68,9 +68,16 @@
 # End of user changeable parameters -----------------------------------------
 #
 
+OS=`uname -s`
+
 # Make sure the paths are fully specified, i.e. they must begin with /.
 REL_MYDIR=`dirname $0`
 MYDIR=`cd $REL_MYDIR && pwd`
+case "$OS" in
+CYGWIN*)
+    MYDIR=`cygpath -m "$MYDIR"`
+    ;;
+esac
 
 #
 # Look whether the user wants to run inside gdb
@@ -102,8 +109,17 @@
     JDK=@@JDK_IMPORT_PATH@@
 fi
 
-if [ "${JDK}" = "" ]; then
-    echo "Failed to find JDK.  Either ALT_JAVA_HOME is not set or JDK_IMPORT_PATH is empty."
+if [ "${JDK}" != "" ]; then
+    case "$OS" in
+    CYGWIN*)
+        JDK=`cygpath -m "$JDK"`
+        ;;
+	esac
+
+else
+    echo "Failed to find JDK." \
+        "Either ALT_JAVA_HOME is not set or JDK_IMPORT_PATH is empty."
+    exit 1
 fi
 
 # We will set the LD_LIBRARY_PATH as follows:
@@ -120,7 +136,6 @@
 
 
 # Set up a suitable LD_LIBRARY_PATH or DYLD_LIBRARY_PATH
-OS=`uname -s`
 if [ "${OS}" = "Darwin" ]
 then
     if [ -z "$DYLD_LIBRARY_PATH" ]
@@ -141,7 +156,7 @@
     export LD_LIBRARY_PATH
 fi
 
-JPARMS="-Dsun.java.launcher=gamma -XXaltjvm=$MYDIR $@ $JAVA_ARGS";
+JPARMS="-XXaltjvm=$MYDIR -Dsun.java.launcher.is_altjvm=true $@ $JAVA_ARGS";
 
 # Locate the java launcher
 LAUNCHER=$JDK/bin/java
@@ -152,6 +167,11 @@
 
 GDBSRCDIR=$MYDIR
 BASEDIR=`cd $MYDIR/../../.. && pwd`
+case "$OS" in
+CYGWIN*)
+    BASEDIR=`cygpath -m "$BASEDIR"`
+    ;;
+esac
 
 init_gdb() {
 # Create a gdb script in case we should run inside gdb
--- a/make/linux/makefiles/buildtree.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/linux/makefiles/buildtree.make	Wed Feb 19 12:08:49 2014 -0800
@@ -124,7 +124,7 @@
 # For dependencies and recursive makes.
 BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
 
-BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make
+BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make dtrace.make
 
 BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
 	SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
@@ -362,6 +362,16 @@
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
 	) > $@
 
+dtrace.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
 FORCE:
 
 .PHONY:  all FORCE
--- a/make/linux/makefiles/dtrace.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/linux/makefiles/dtrace.make	Wed Feb 19 12:08:49 2014 -0800
@@ -42,18 +42,39 @@
 else
   SDT_H_FILE = /usr/include/sys/sdt.h
 endif
+
 DTRACE_ENABLED = $(shell test -f $(SDT_H_FILE) && echo $(SDT_H_FILE))
 REASON = "$(SDT_H_FILE) not found"
 
+endif # GCC version
+endif # OPENJDK
+
+
+DTRACE_COMMON_SRCDIR = $(GAMMADIR)/src/os/posix/dtrace
+DTRACE_PROG = dtrace
+DtraceOutDir = $(GENERATED)/dtracefiles
+
+$(DtraceOutDir):
+	mkdir $(DtraceOutDir)
+
+$(DtraceOutDir)/hotspot.h: $(DTRACE_COMMON_SRCDIR)/hotspot.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot.d
+
+$(DtraceOutDir)/hotspot_jni.h: $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d
+
+$(DtraceOutDir)/hs_private.h: $(DTRACE_COMMON_SRCDIR)/hs_private.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hs_private.d
+
 ifneq ($(DTRACE_ENABLED),)
-  CFLAGS += -DDTRACE_ENABLED
-endif
-
-endif
+CFLAGS += -DDTRACE_ENABLED
+dtrace_gen_headers: $(DtraceOutDir)/hotspot.h $(DtraceOutDir)/hotspot_jni.h $(DtraceOutDir)/hs_private.h
+else
+dtrace_gen_headers:
+	$(QUIETLY) echo "**NOTICE** Dtrace support disabled: $(REASON)"
 endif
 
 # Phony target used in vm.make build target to check whether enabled.
-.PHONY: dtraceCheck
 ifeq ($(DTRACE_ENABLED),)
 dtraceCheck:
 	$(QUIETLY) echo "**NOTICE** Dtrace support disabled: $(REASON)"
@@ -61,5 +82,7 @@
 dtraceCheck:
 endif
 
+.PHONY: dtrace_gen_headers dtraceCheck
+
 # It doesn't support HAVE_DTRACE_H though.
 
--- a/make/linux/makefiles/jsig.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/linux/makefiles/jsig.make	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -74,9 +74,9 @@
 
 install_jsig: $(LIBJSIG)
 	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
-	$(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \
+	$(QUIETLY) test ! -f $(LIBJSIG_DEBUGINFO) || \
 	    cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
-	$(QUIETLY) test -f $(LIBJSIG_DIZ) && \
+	$(QUIETLY) test ! -f $(LIBJSIG_DIZ) || \
 	    cp -f $(LIBJSIG_DIZ) $(DEST_JSIG_DIZ)
 	$(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done"
 
--- a/make/linux/makefiles/optimized.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/linux/makefiles/optimized.make	Wed Feb 19 12:08:49 2014 -0800
@@ -22,7 +22,7 @@
 #  
 #
 
-# Sets make macros for making optimized version of Gamma VM
+# Sets make macros for making optimized version of HotSpot VM
 # (This is the "product", not the "release" version.)
 
 # Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
--- a/make/linux/makefiles/product.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/linux/makefiles/product.make	Wed Feb 19 12:08:49 2014 -0800
@@ -22,7 +22,7 @@
 #  
 #
 
-# Sets make macros for making optimized version of Gamma VM
+# Sets make macros for making optimized version of HotSpot VM
 # (This is the "product", not the "release" version.)
 
 # Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
--- a/make/linux/makefiles/saproc.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/linux/makefiles/saproc.make	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -113,13 +113,13 @@
 endif
 
 install_saproc: $(BUILDLIBSAPROC)
-	$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then             \
-	  echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)";     \
-	  test -f $(LIBSAPROC_DEBUGINFO) &&                  \
+	$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then                   \
+	  echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)";           \
+	  test ! -f $(LIBSAPROC_DEBUGINFO) ||                      \
 	    cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO); \
-	  test -f $(LIBSAPROC_DIZ) &&                  \
-	    cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ); \
-	  cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done";  \
+	  test ! -f $(LIBSAPROC_DIZ) ||                            \
+	    cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ);             \
+	  cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done";        \
 	fi
 
 .PHONY: install_saproc
--- a/make/linux/makefiles/top.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/linux/makefiles/top.make	Wed Feb 19 12:08:49 2014 -0800
@@ -80,7 +80,7 @@
 	@echo All done.
 
 # This is an explicit dependency for the sake of parallel makes.
-vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) trace_stuff jvmti_stuff sa_stuff
+vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) trace_stuff jvmti_stuff sa_stuff dtrace_stuff
 	@# We need a null action here, so implicit rules don't get consulted.
 
 $(Cached_plat): $(Plat_File)
@@ -102,6 +102,9 @@
 sa_stuff:
 	@$(MAKE) -f sa.make $(MFLAGS-adjusted)
 
+dtrace_stuff: $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f dtrace.make dtrace_gen_headers $(MFLAGS-adjusted) GENERATED=$(GENERATED)
+
 # and the VM: must use other makefile with dependencies included
 
 # We have to go to great lengths to get control over the -jN argument
@@ -119,7 +122,7 @@
 	@$(UpdatePCH)
 	@$(MAKE) -f vm.make $(MFLAGS-adjusted)
 
-install gamma: the_vm
+install: the_vm
 	@$(MAKE) -f vm.make $@
 
 # next rules support "make foo.[ois]"
--- a/make/linux/makefiles/vm.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/linux/makefiles/vm.make	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -366,9 +366,9 @@
 
 install_jvm: $(LIBJVM)
 	@echo "Copying $(LIBJVM) to $(DEST_JVM)"
-	$(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \
+	$(QUIETLY) test ! -f $(LIBJVM_DEBUGINFO) || \
 	    cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
-	$(QUIETLY) test -f $(LIBJVM_DIZ) && \
+	$(QUIETLY) test ! -f $(LIBJVM_DIZ) || \
 	    cp -f $(LIBJVM_DIZ) $(DEST_JVM_DIZ)
 	$(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done"
 
--- a/make/solaris/makefiles/buildtree.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/solaris/makefiles/buildtree.make	Wed Feb 19 12:08:49 2014 -0800
@@ -117,7 +117,7 @@
 # For dependencies and recursive makes.
 BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
 
-BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make
+BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make dtrace.make
 
 BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
 	ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
@@ -349,6 +349,16 @@
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
 	) > $@
 
+dtrace.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
 FORCE:
 
 .PHONY:  all FORCE
--- a/make/solaris/makefiles/dtrace.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/solaris/makefiles/dtrace.make	Wed Feb 19 12:08:49 2014 -0800
@@ -36,6 +36,8 @@
 
 else
 
+DtraceOutDir = $(GENERATED)/dtracefiles
+
 JVM_DB = libjvm_db
 LIBJVM_DB = libjvm_db.so
 
@@ -53,6 +55,7 @@
 GENOFFS = generate$(JVMOFFS)
 
 DTRACE_SRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/dtrace
+DTRACE_COMMON_SRCDIR = $(GAMMADIR)/src/os/posix/dtrace
 DTRACE = dtrace
 DTRACE.o = $(DTRACE).o
 
@@ -251,8 +254,8 @@
   endif
 endif
 
-$(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \
-             $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
+$(DTRACE).d: $(DTRACE_COMMON_SRCDIR)/hotspot.d $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d \
+             $(DTRACE_COMMON_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
 	$(QUIETLY) cat $^ > $@
 
 DTraced_Files = ciEnv.o \
@@ -326,6 +329,22 @@
 	$(QUIETLY) if [ -f $(GENOFFS) ]; then touch $(GENOFFS); fi
 	$(QUIETLY) if [ -f $(JVMOFFS.o) ]; then touch $(JVMOFFS.o); fi
 
+
+$(DtraceOutDir):
+	mkdir $(DtraceOutDir)
+
+$(DtraceOutDir)/hotspot.h: $(DTRACE_COMMON_SRCDIR)/hotspot.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot.d
+
+$(DtraceOutDir)/hotspot_jni.h: $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d
+
+$(DtraceOutDir)/hs_private.h: $(DTRACE_COMMON_SRCDIR)/hs_private.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hs_private.d
+
+dtrace_gen_headers: $(DtraceOutDir)/hotspot.h $(DtraceOutDir)/hotspot_jni.h $(DtraceOutDir)/hs_private.h
+
+
 .PHONY: dtraceCheck
 
 SYSTEM_DTRACE_H = /usr/include/dtrace.h
--- a/make/solaris/makefiles/jsig.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/solaris/makefiles/jsig.make	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -79,9 +79,9 @@
 
 install_jsig: $(LIBJSIG)
 	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
-	$(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \
+	$(QUIETLY) test ! -f $(LIBJSIG_DEBUGINFO) || \
 	    cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
-	$(QUIETLY) test -f $(LIBJSIG_DIZ) && \
+	$(QUIETLY) test ! -f $(LIBJSIG_DIZ) || \
 	    cp -f $(LIBJSIG_DIZ) $(DEST_JSIG_DIZ)
 	$(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done"
 
--- a/make/solaris/makefiles/optimized.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/solaris/makefiles/optimized.make	Wed Feb 19 12:08:49 2014 -0800
@@ -22,7 +22,7 @@
 #  
 #
 
-# Sets make macros for making optimized version of Gamma VM
+# Sets make macros for making optimized version of HotSpot VM
 # (This is the "product", not the "release" version.)
 
 # Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
--- a/make/solaris/makefiles/product.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/solaris/makefiles/product.make	Wed Feb 19 12:08:49 2014 -0800
@@ -22,7 +22,7 @@
 #  
 #
 
-# Sets make macros for making optimized version of Gamma VM
+# Sets make macros for making optimized version of HotSpot VM
 # (This is the "product", not the "release" version.)
 
 # Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
--- a/make/solaris/makefiles/saproc.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/solaris/makefiles/saproc.make	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -147,13 +147,13 @@
 endif
 
 install_saproc: $(BULDLIBSAPROC)
-	$(QUIETLY) if [ -f $(LIBSAPROC) ] ; then             \
-	  echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)";     \
-	  test -f $(LIBSAPROC_DEBUGINFO) &&             \
+	$(QUIETLY) if [ -f $(LIBSAPROC) ] ; then                   \
+	  echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)";           \
+	  test ! -f $(LIBSAPROC_DEBUGINFO) ||                      \
 	    cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO); \
-	  test -f $(LIBSAPROC_DIZ) &&             \
-	    cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ); \
-	  cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done";  \
+	  test ! -f $(LIBSAPROC_DIZ) ||                            \
+	    cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ);             \
+	  cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done";        \
 	fi
 
 .PHONY: install_saproc
--- a/make/solaris/makefiles/top.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/solaris/makefiles/top.make	Wed Feb 19 12:08:49 2014 -0800
@@ -73,7 +73,7 @@
 	@echo All done.
 
 # This is an explicit dependency for the sake of parallel makes.
-vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff
+vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff dtrace_stuff
 	@# We need a null action here, so implicit rules don't get consulted.
 
 $(Cached_plat): $(Plat_File)
@@ -95,6 +95,9 @@
 sa_stuff:
 	@$(MAKE) -f sa.make $(MFLAGS-adjusted)
 
+dtrace_stuff: $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f dtrace.make dtrace_gen_headers $(MFLAGS-adjusted) GENERATED=$(GENERATED)
+
 # and the VM: must use other makefile with dependencies included
 
 # We have to go to great lengths to get control over the -jN argument
@@ -111,7 +114,7 @@
 the_vm: vm_build_preliminaries $(adjust-mflags)
 	@$(MAKE) -f vm.make $(MFLAGS-adjusted)
 
-install gamma: the_vm
+install: the_vm
 	@$(MAKE) -f vm.make $@
 
 # next rules support "make foo.[oi]"
--- a/make/solaris/makefiles/vm.make	Thu Jan 30 14:30:01 2014 +0100
+++ b/make/solaris/makefiles/vm.make	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -333,9 +333,9 @@
 
 install_jvm: $(LIBJVM)
 	@echo "Copying $(LIBJVM) to $(DEST_JVM)"
-	$(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \
+	$(QUIETLY) test ! -f $(LIBJVM_DEBUGINFO) || \
 	    cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
-	$(QUIETLY) test -f $(LIBJVM_DIZ) && \
+	$(QUIETLY) test ! -f $(LIBJVM_DIZ) || \
 	    cp -f $(LIBJVM_DIZ) $(DEST_JVM_DIZ)
 	$(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done"
 
--- a/src/cpu/sparc/vm/vm_version_sparc.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/cpu/sparc/vm/vm_version_sparc.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -49,7 +49,8 @@
     M_family             = 15,
     T_family             = 16,
     T1_model             = 17,
-    aes_instructions     = 18
+    sparc5_instructions  = 18,
+    aes_instructions     = 19
   };
 
   enum Feature_Flag_Set {
@@ -74,6 +75,7 @@
     M_family_m              = 1 << M_family,
     T_family_m              = 1 << T_family,
     T1_model_m              = 1 << T1_model,
+    sparc5_instructions_m   = 1 << sparc5_instructions,
     aes_instructions_m      = 1 << aes_instructions,
 
     generic_v8_m        = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m,
@@ -125,6 +127,7 @@
   static bool has_vis3()                { return (_features & vis3_instructions_m) != 0; }
   static bool has_blk_init()            { return (_features & blk_init_instructions_m) != 0; }
   static bool has_cbcond()              { return (_features & cbcond_instructions_m) != 0; }
+  static bool has_sparc5_instr()        { return (_features & sparc5_instructions_m) != 0; }
   static bool has_aes()                 { return (_features & aes_instructions_m) != 0; }
 
   static bool supports_compare_and_exchange()
@@ -136,6 +139,7 @@
 
   static bool is_M_series()             { return is_M_family(_features); }
   static bool is_T4()                   { return is_T_family(_features) && has_cbcond(); }
+  static bool is_T7()                   { return is_T_family(_features) && has_sparc5_instr(); }
 
   // Fujitsu SPARC64
   static bool is_sparc64()              { return (_features & sparc64_family_m) != 0; }
@@ -155,7 +159,7 @@
   static const char* cpu_features()     { return _features_str; }
 
   static intx prefetch_data_size()  {
-    return is_T4() ? 32 : 64;  // default prefetch block size on sparc
+    return is_T4() && !is_T7() ? 32 : 64;  // default prefetch block size on sparc
   }
 
   // Prefetch
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -107,10 +107,6 @@
   return Address(rsp,  Interpreter::expr_offset_in_bytes(2));
 }
 
-static inline Address at_tos_p3() {
-  return Address(rsp,  Interpreter::expr_offset_in_bytes(3));
-}
-
 // Condition conversion
 static Assembler::Condition j_not(TemplateTable::Condition cc) {
   switch (cc) {
--- a/src/os/bsd/dtrace/hotspot.d	Thu Jan 30 14:30:01 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *  
- */
-
-provider hotspot {
-  probe class__loaded(char*, uintptr_t, void*, uintptr_t);
-  probe class__unloaded(char*, uintptr_t, void*, uintptr_t);
-  probe class__initialization__required(char*, uintptr_t, void*, intptr_t);
-  probe class__initialization__recursive(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__concurrent(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__erroneous(char*, uintptr_t, void*, intptr_t, int);
-  probe class__initialization__super__failed(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__clinit(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__error(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__end(char*, uintptr_t, void*, intptr_t,int);
-  probe vm__init__begin();
-  probe vm__init__end();
-  probe vm__shutdown();
-  probe vmops__request(char*, uintptr_t, int);
-  probe vmops__begin(char*, uintptr_t, int);
-  probe vmops__end(char*, uintptr_t, int);
-  probe gc__begin(uintptr_t);
-  probe gc__end();
-  probe mem__pool__gc__begin(
-    char*, uintptr_t, char*, uintptr_t, 
-    uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe mem__pool__gc__end(
-    char*, uintptr_t, char*, uintptr_t, 
-    uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe thread__start(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe thread__stop(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe thread__sleep__begin(long long);
-  probe thread__sleep__end(int);
-  probe thread__yield();
-  probe thread__park__begin(uintptr_t, int, long long);
-  probe thread__park__end(uintptr_t);
-  probe thread__unpark(uintptr_t);
-  probe method__compile__begin(
-    const char*, uintptr_t, const char*, uintptr_t, const char*, uintptr_t, const char*, uintptr_t); 
-  probe method__compile__end(
-    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, 
-    char*, uintptr_t, uintptr_t); 
-  probe compiled__method__load(
-    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, void*, uintptr_t);
-  probe compiled__method__unload(
-    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t); 
-  probe monitor__contended__enter(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__contended__entered(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__contended__exit(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__wait(uintptr_t, uintptr_t, char*, uintptr_t, uintptr_t);
-  probe monitor__waited(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__notify(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__notifyAll(uintptr_t, uintptr_t, char*, uintptr_t);
-
-  probe object__alloc(int, char*, uintptr_t, uintptr_t);
-  probe method__entry(
-    int, char*, int, char*, int, char*, int);
-  probe method__return(
-    int, char*, int, char*, int, char*, int);
-};
-
-#pragma D attributes Evolving/Evolving/Common provider hotspot provider
-#pragma D attributes Private/Private/Unknown provider hotspot module
-#pragma D attributes Private/Private/Unknown provider hotspot function
-#pragma D attributes Evolving/Evolving/Common provider hotspot name
-#pragma D attributes Evolving/Evolving/Common provider hotspot args
--- a/src/os/bsd/dtrace/hotspot_jni.d	Thu Jan 30 14:30:01 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,506 +0,0 @@
-/*
- * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *  
- */
-
-provider hotspot_jni {
-  probe AllocObject__entry(void*, void*);
-  probe AllocObject__return(void*);
-  probe AttachCurrentThreadAsDaemon__entry(void*, void**, void*);
-  probe AttachCurrentThreadAsDaemon__return(uint32_t);
-  probe AttachCurrentThread__entry(void*, void**, void*);
-  probe AttachCurrentThread__return(uint32_t);
-  probe CallBooleanMethodA__entry(void*, void*, uintptr_t);
-  probe CallBooleanMethodA__return(uintptr_t);
-  probe CallBooleanMethod__entry(void*, void*, uintptr_t);
-  probe CallBooleanMethod__return(uintptr_t);
-  probe CallBooleanMethodV__entry(void*, void*, uintptr_t);
-  probe CallBooleanMethodV__return(uintptr_t);
-  probe CallByteMethodA__entry(void*, void*, uintptr_t);
-  probe CallByteMethodA__return(char);
-  probe CallByteMethod__entry(void*, void*, uintptr_t);
-  probe CallByteMethod__return(char);
-  probe CallByteMethodV__entry(void*, void*, uintptr_t);
-  probe CallByteMethodV__return(char);
-  probe CallCharMethodA__entry(void*, void*, uintptr_t);
-  probe CallCharMethodA__return(uint16_t);
-  probe CallCharMethod__entry(void*, void*, uintptr_t);
-  probe CallCharMethod__return(uint16_t);
-  probe CallCharMethodV__entry(void*, void*, uintptr_t);
-  probe CallCharMethodV__return(uint16_t);
-  probe CallDoubleMethodA__entry(void*, void*, uintptr_t);
-  probe CallDoubleMethodA__return();
-  probe CallDoubleMethod__entry(void*, void*, uintptr_t);
-  probe CallDoubleMethod__return();
-  probe CallDoubleMethodV__entry(void*, void*, uintptr_t);
-  probe CallDoubleMethodV__return();
-  probe CallFloatMethodA__entry(void*, void*, uintptr_t);
-  probe CallFloatMethodA__return();
-  probe CallFloatMethod__entry(void*, void*, uintptr_t);
-  probe CallFloatMethod__return();
-  probe CallFloatMethodV__entry(void*, void*, uintptr_t);
-  probe CallFloatMethodV__return();
-  probe CallIntMethodA__entry(void*, void*, uintptr_t);
-  probe CallIntMethodA__return(uint32_t);
-  probe CallIntMethod__entry(void*, void*, uintptr_t);
-  probe CallIntMethod__return(uint32_t);
-  probe CallIntMethodV__entry(void*, void*, uintptr_t);
-  probe CallIntMethodV__return(uint32_t);
-  probe CallLongMethodA__entry(void*, void*, uintptr_t);
-  probe CallLongMethodA__return(uintptr_t);
-  probe CallLongMethod__entry(void*, void*, uintptr_t);
-  probe CallLongMethod__return(uintptr_t);
-  probe CallLongMethodV__entry(void*, void*, uintptr_t);
-  probe CallLongMethodV__return(uintptr_t);
-  probe CallNonvirtualBooleanMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualBooleanMethodA__return(uintptr_t);
-  probe CallNonvirtualBooleanMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualBooleanMethod__return(uintptr_t);
-  probe CallNonvirtualBooleanMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualBooleanMethodV__return(uintptr_t);
-  probe CallNonvirtualByteMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualByteMethodA__return(char);
-  probe CallNonvirtualByteMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualByteMethod__return(char);
-  probe CallNonvirtualByteMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualByteMethodV__return(char);
-  probe CallNonvirtualCharMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualCharMethodA__return(uint16_t);
-  probe CallNonvirtualCharMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualCharMethod__return(uint16_t);
-  probe CallNonvirtualCharMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualCharMethodV__return(uint16_t);
-  probe CallNonvirtualDoubleMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualDoubleMethodA__return();
-  probe CallNonvirtualDoubleMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualDoubleMethod__return();
-  probe CallNonvirtualDoubleMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualDoubleMethodV__return();
-  probe CallNonvirtualFloatMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualFloatMethodA__return();
-  probe CallNonvirtualFloatMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualFloatMethod__return();
-  probe CallNonvirtualFloatMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualFloatMethodV__return();
-  probe CallNonvirtualIntMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualIntMethodA__return(uint32_t);
-  probe CallNonvirtualIntMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualIntMethod__return(uint32_t);
-  probe CallNonvirtualIntMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualIntMethodV__return(uint32_t);
-  probe CallNonvirtualLongMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualLongMethodA__return(uintptr_t);
-  probe CallNonvirtualLongMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualLongMethod__return(uintptr_t);
-  probe CallNonvirtualLongMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualLongMethodV__return(uintptr_t);
-  probe CallNonvirtualObjectMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualObjectMethodA__return(void*);
-  probe CallNonvirtualObjectMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualObjectMethod__return(void*);
-  probe CallNonvirtualObjectMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualObjectMethodV__return(void*);
-  probe CallNonvirtualShortMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualShortMethodA__return(uint16_t);
-  probe CallNonvirtualShortMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualShortMethod__return(uint16_t);
-  probe CallNonvirtualShortMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualShortMethodV__return(uint16_t);
-  probe CallNonvirtualVoidMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualVoidMethodA__return();
-  probe CallNonvirtualVoidMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualVoidMethod__return();
-  probe CallNonvirtualVoidMethodV__entry(void*, void*, void*, uintptr_t);  
-  probe CallNonvirtualVoidMethodV__return();
-  probe CallObjectMethodA__entry(void*, void*, uintptr_t);
-  probe CallObjectMethodA__return(void*);
-  probe CallObjectMethod__entry(void*, void*, uintptr_t);
-  probe CallObjectMethod__return(void*);
-  probe CallObjectMethodV__entry(void*, void*, uintptr_t);
-  probe CallObjectMethodV__return(void*);
-  probe CallShortMethodA__entry(void*, void*, uintptr_t);
-  probe CallShortMethodA__return(uint16_t);
-  probe CallShortMethod__entry(void*, void*, uintptr_t);
-  probe CallShortMethod__return(uint16_t);
-  probe CallShortMethodV__entry(void*, void*, uintptr_t);
-  probe CallShortMethodV__return(uint16_t);
-  probe CallStaticBooleanMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticBooleanMethodA__return(uintptr_t);
-  probe CallStaticBooleanMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticBooleanMethod__return(uintptr_t);
-  probe CallStaticBooleanMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticBooleanMethodV__return(uintptr_t);
-  probe CallStaticByteMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticByteMethodA__return(char);
-  probe CallStaticByteMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticByteMethod__return(char);
-  probe CallStaticByteMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticByteMethodV__return(char);
-  probe CallStaticCharMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticCharMethodA__return(uint16_t);
-  probe CallStaticCharMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticCharMethod__return(uint16_t);
-  probe CallStaticCharMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticCharMethodV__return(uint16_t);
-  probe CallStaticDoubleMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticDoubleMethodA__return();
-  probe CallStaticDoubleMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticDoubleMethod__return();
-  probe CallStaticDoubleMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticDoubleMethodV__return();
-  probe CallStaticFloatMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticFloatMethodA__return();
-  probe CallStaticFloatMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticFloatMethod__return();
-  probe CallStaticFloatMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticFloatMethodV__return();
-  probe CallStaticIntMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticIntMethodA__return(uint32_t);
-  probe CallStaticIntMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticIntMethod__return(uint32_t);
-  probe CallStaticIntMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticIntMethodV__return(uint32_t);
-  probe CallStaticLongMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticLongMethodA__return(uintptr_t);
-  probe CallStaticLongMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticLongMethod__return(uintptr_t);
-  probe CallStaticLongMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticLongMethodV__return(uintptr_t);
-  probe CallStaticObjectMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticObjectMethodA__return(void*);
-  probe CallStaticObjectMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticObjectMethod__return(void*);
-  probe CallStaticObjectMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticObjectMethodV__return(void*);
-  probe CallStaticShortMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticShortMethodA__return(uint16_t);
-  probe CallStaticShortMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticShortMethod__return(uint16_t);
-  probe CallStaticShortMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticShortMethodV__return(uint16_t);
-  probe CallStaticVoidMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticVoidMethodA__return();
-  probe CallStaticVoidMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticVoidMethod__return(); 
-  probe CallStaticVoidMethodV__entry(void*, void*, uintptr_t);  
-  probe CallStaticVoidMethodV__return();
-  probe CallVoidMethodA__entry(void*, void*, uintptr_t);  
-  probe CallVoidMethodA__return();
-  probe CallVoidMethod__entry(void*, void*, uintptr_t);  
-  probe CallVoidMethod__return(); 
-  probe CallVoidMethodV__entry(void*, void*, uintptr_t);  
-  probe CallVoidMethodV__return();
-  probe CreateJavaVM__entry(void**, void**, void*);
-  probe CreateJavaVM__return(uint32_t);
-  probe DefineClass__entry(void*, const char*, void*, char*, uintptr_t);
-  probe DefineClass__return(void*);
-  probe DeleteGlobalRef__entry(void*, void*);
-  probe DeleteGlobalRef__return();
-  probe DeleteLocalRef__entry(void*, void*);
-  probe DeleteLocalRef__return();
-  probe DeleteWeakGlobalRef__entry(void*, void*);
-  probe DeleteWeakGlobalRef__return();
-  probe DestroyJavaVM__entry(void*);
-  probe DestroyJavaVM__return(uint32_t);
-  probe DetachCurrentThread__entry(void*);
-  probe DetachCurrentThread__return(uint32_t);
-  probe EnsureLocalCapacity__entry(void*, uint32_t);
-  probe EnsureLocalCapacity__return(uint32_t);
-  probe ExceptionCheck__entry(void*);
-  probe ExceptionCheck__return(uintptr_t);
-  probe ExceptionClear__entry(void*);
-  probe ExceptionClear__return();
-  probe ExceptionDescribe__entry(void*);  
-  probe ExceptionDescribe__return();
-  probe ExceptionOccurred__entry(void*);
-  probe ExceptionOccurred__return(void*);
-  probe FatalError__entry(void* env, const char*);
-  probe FindClass__entry(void*, const char*);
-  probe FindClass__return(void*);
-  probe FromReflectedField__entry(void*, void*);
-  probe FromReflectedField__return(uintptr_t);
-  probe FromReflectedMethod__entry(void*, void*);
-  probe FromReflectedMethod__return(uintptr_t);
-  probe GetArrayLength__entry(void*, void*);
-  probe GetArrayLength__return(uintptr_t);
-  probe GetBooleanArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetBooleanArrayElements__return(uintptr_t*);
-  probe GetBooleanArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uintptr_t*);
-  probe GetBooleanArrayRegion__return();
-  probe GetBooleanField__entry(void*, void*, uintptr_t);
-  probe GetBooleanField__return(uintptr_t);
-  probe GetByteArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetByteArrayElements__return(char*);
-  probe GetByteArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, char*);
-  probe GetByteArrayRegion__return();
-  probe GetByteField__entry(void*, void*, uintptr_t);
-  probe GetByteField__return(char);
-  probe GetCharArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetCharArrayElements__return(uint16_t*);
-  probe GetCharArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
-  probe GetCharArrayRegion__return();
-  probe GetCharField__entry(void*, void*, uintptr_t);
-  probe GetCharField__return(uint16_t);
-  probe GetCreatedJavaVMs__entry(void**, uintptr_t, uintptr_t*);
-  probe GetCreatedJavaVMs__return(uintptr_t);
-  probe GetDefaultJavaVMInitArgs__entry(void*);
-  probe GetDefaultJavaVMInitArgs__return(uint32_t);
-  probe GetDirectBufferAddress__entry(void*, void*);
-  probe GetDirectBufferAddress__return(void*);
-  probe GetDirectBufferCapacity__entry(void*, void*);
-  probe GetDirectBufferCapacity__return(uintptr_t);
-  probe GetDoubleArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetDoubleArrayElements__return(double*);
-  probe GetDoubleArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, double*);
-  probe GetDoubleArrayRegion__return();
-  probe GetDoubleField__entry(void*, void*, uintptr_t);
-  probe GetDoubleField__return();
-  probe GetEnv__entry(void*, void*, uint32_t);
-  probe GetEnv__return(uint32_t);
-  probe GetFieldID__entry(void*, void*, const char*, const char*);
-  probe GetFieldID__return(uintptr_t);
-  probe GetFloatArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetFloatArrayElements__return(float*);
-  probe GetFloatArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, float*);
-  probe GetFloatArrayRegion__return();
-  probe GetFloatField__entry(void*, void*, uintptr_t);
-  probe GetFloatField__return();
-  probe GetIntArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetIntArrayElements__return(uint32_t*);
-  probe GetIntArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint32_t*);
-  probe GetIntArrayRegion__return();
-  probe GetIntField__entry(void*, void*, uintptr_t);
-  probe GetIntField__return(uint32_t);
-  probe GetJavaVM__entry(void*, void**);
-  probe GetJavaVM__return(uint32_t);
-  probe GetLongArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetLongArrayElements__return(uintptr_t*);
-  probe GetLongArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uintptr_t*);
-  probe GetLongArrayRegion__return();
-  probe GetLongField__entry(void*, void*, uintptr_t);
-  probe GetLongField__return(uintptr_t);
-  probe GetMethodID__entry(void*, void*, const char*, const char*);
-  probe GetMethodID__return(uintptr_t);
-  probe GetObjectArrayElement__entry(void*, void*, uintptr_t);
-  probe GetObjectArrayElement__return(void*);
-  probe GetObjectClass__entry(void*, void*);
-  probe GetObjectClass__return(void*);
-  probe GetObjectField__entry(void*, void*, uintptr_t);
-  probe GetObjectField__return(void*);
-  probe GetObjectRefType__entry(void*, void*);
-  probe GetObjectRefType__return(void*);
-  probe GetPrimitiveArrayCritical__entry(void*, void*, uintptr_t*);
-  probe GetPrimitiveArrayCritical__return(void*);
-  probe GetShortArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetShortArrayElements__return(uint16_t*);
-  probe GetShortArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
-  probe GetShortArrayRegion__return();
-  probe GetShortField__entry(void*, void*, uintptr_t);
-  probe GetShortField__return(uint16_t);
-  probe GetStaticBooleanField__entry(void*, void*, uintptr_t);
-  probe GetStaticBooleanField__return(uintptr_t);
-  probe GetStaticByteField__entry(void*, void*, uintptr_t);
-  probe GetStaticByteField__return(char);
-  probe GetStaticCharField__entry(void*, void*, uintptr_t);
-  probe GetStaticCharField__return(uint16_t);
-  probe GetStaticDoubleField__entry(void*, void*, uintptr_t);
-  probe GetStaticDoubleField__return();
-  probe GetStaticFieldID__entry(void*, void*, const char*, const char*);
-  probe GetStaticFieldID__return(uintptr_t);
-  probe GetStaticFloatField__entry(void*, void*, uintptr_t);
-  probe GetStaticFloatField__return();
-  probe GetStaticIntField__entry(void*, void*, uintptr_t);
-  probe GetStaticIntField__return(uint32_t);
-  probe GetStaticLongField__entry(void*, void*, uintptr_t);
-  probe GetStaticLongField__return(uintptr_t);
-  probe GetStaticMethodID__entry(void*, void*, const char*, const char*);
-  probe GetStaticMethodID__return(uintptr_t);
-  probe GetStaticObjectField__entry(void*, void*, uintptr_t);
-  probe GetStaticObjectField__return(void*);
-  probe GetStaticShortField__entry(void*, void*, uintptr_t);
-  probe GetStaticShortField__return(uint16_t);
-  probe GetStringChars__entry(void*, void*, uintptr_t*);
-  probe GetStringChars__return(const uint16_t*);
-  probe GetStringCritical__entry(void*, void*, uintptr_t*);
-  probe GetStringCritical__return(const uint16_t*);
-  probe GetStringLength__entry(void*, void*);
-  probe GetStringLength__return(uintptr_t);
-  probe GetStringRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
-  probe GetStringRegion__return();
-  probe GetStringUTFChars__entry(void*, void*, uintptr_t*);
-  probe GetStringUTFChars__return(const char*);
-  probe GetStringUTFLength__entry(void*, void*);
-  probe GetStringUTFLength__return(uintptr_t);
-  probe GetStringUTFRegion__entry(void*, void*, uintptr_t, uintptr_t, char*);
-  probe GetStringUTFRegion__return();
-  probe GetSuperclass__entry(void*, void*);
-  probe GetSuperclass__return(void*);
-  probe GetVersion__entry(void*);
-  probe GetVersion__return(uint32_t);
-  probe IsAssignableFrom__entry(void*, void*, void*);
-  probe IsAssignableFrom__return(uintptr_t);
-  probe IsInstanceOf__entry(void*, void*, void*);
-  probe IsInstanceOf__return(uintptr_t);
-  probe IsSameObject__entry(void*, void*, void*);
-  probe IsSameObject__return(uintptr_t);
-  probe MonitorEnter__entry(void*, void*);
-  probe MonitorEnter__return(uint32_t);
-  probe MonitorExit__entry(void*, void*);
-  probe MonitorExit__return(uint32_t);
-  probe NewBooleanArray__entry(void*, uintptr_t);
-  probe NewBooleanArray__return(void*);
-  probe NewByteArray__entry(void*, uintptr_t);
-  probe NewByteArray__return(void*);
-  probe NewCharArray__entry(void*, uintptr_t);
-  probe NewCharArray__return(void*);
-  probe NewDirectByteBuffer__entry(void*, void*, uintptr_t);
-  probe NewDirectByteBuffer__return(void*);
-  probe NewDoubleArray__entry(void*, uintptr_t);
-  probe NewDoubleArray__return(void*);
-  probe NewFloatArray__entry(void*, uintptr_t);
-  probe NewFloatArray__return(void*);
-  probe NewGlobalRef__entry(void*, void*);
-  probe NewGlobalRef__return(void*);
-  probe NewIntArray__entry(void*, uintptr_t);
-  probe NewIntArray__return(void*);
-  probe NewLocalRef__entry(void*, void*);
-  probe NewLocalRef__return(void*);
-  probe NewLongArray__entry(void*, uintptr_t);
-  probe NewLongArray__return(void*);
-  probe NewObjectA__entry(void*, void*, uintptr_t);  
-  probe NewObjectA__return(void*);
-  probe NewObjectArray__entry(void*, uintptr_t, void*, void*);
-  probe NewObjectArray__return(void*);
-  probe NewObject__entry(void*, void*, uintptr_t); 
-  probe NewObject__return(void*);
-  probe NewObjectV__entry(void*, void*, uintptr_t);  
-  probe NewObjectV__return(void*);
-  probe NewShortArray__entry(void*, uintptr_t);
-  probe NewShortArray__return(void*);
-  probe NewString__entry(void*, const uint16_t*, uintptr_t);
-  probe NewString__return(void*);
-  probe NewStringUTF__entry(void*, const char*);
-  probe NewStringUTF__return(void*);
-  probe NewWeakGlobalRef__entry(void*, void*);
-  probe NewWeakGlobalRef__return(void*);
-  probe PopLocalFrame__entry(void*, void*);
-  probe PopLocalFrame__return(void*);
-  probe PushLocalFrame__entry(void*, uint32_t);
-  probe PushLocalFrame__return(uint32_t);
-  probe RegisterNatives__entry(void*, void*, const void*, uint32_t);  
-  probe RegisterNatives__return(uint32_t);
-  probe ReleaseBooleanArrayElements__entry(void*, void*, uintptr_t*, uint32_t);
-  probe ReleaseBooleanArrayElements__return();
-  probe ReleaseByteArrayElements__entry(void*, void*, char*, uint32_t);
-  probe ReleaseByteArrayElements__return();
-  probe ReleaseCharArrayElements__entry(void*, void*, uint16_t*, uint32_t);
-  probe ReleaseCharArrayElements__return();
-  probe ReleaseDoubleArrayElements__entry(void*, void*, double*, uint32_t);
-  probe ReleaseDoubleArrayElements__return();
-  probe ReleaseFloatArrayElements__entry(void*, void*, float*, uint32_t);
-  probe ReleaseFloatArrayElements__return();
-  probe ReleaseIntArrayElements__entry(void*, void*, uint32_t*, uint32_t);
-  probe ReleaseIntArrayElements__return();
-  probe ReleaseLongArrayElements__entry(void*, void*, uintptr_t*, uint32_t);
-  probe ReleaseLongArrayElements__return();
-  probe ReleasePrimitiveArrayCritical__entry(void*, void*, void*, uint32_t);
-  probe ReleasePrimitiveArrayCritical__return();
-  probe ReleaseShortArrayElements__entry(void*, void*, uint16_t*, uint32_t);
-  probe ReleaseShortArrayElements__return();
-  probe ReleaseStringChars__entry(void*, void*, const uint16_t*);
-  probe ReleaseStringChars__return();
-  probe ReleaseStringCritical__entry(void*, void*, const uint16_t*);
-  probe ReleaseStringCritical__return();
-  probe ReleaseStringUTFChars__entry(void*, void*, const char*);
-  probe ReleaseStringUTFChars__return();
-  probe SetBooleanArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uintptr_t*);
-  probe SetBooleanArrayRegion__return();
-  probe SetBooleanField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetBooleanField__return();
-  probe SetByteArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const char*);
-  probe SetByteArrayRegion__return();
-  probe SetByteField__entry(void*, void*, uintptr_t, char);
-  probe SetByteField__return();
-  probe SetCharArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint16_t*);
-  probe SetCharArrayRegion__return();
-  probe SetCharField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetCharField__return();
-  probe SetDoubleArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const double*);
-  probe SetDoubleArrayRegion__return();
-  probe SetDoubleField__entry(void*, void*, uintptr_t);
-  probe SetDoubleField__return();
-  probe SetFloatArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const float*);
-  probe SetFloatArrayRegion__return();
-  probe SetFloatField__entry(void*, void*, uintptr_t);
-  probe SetFloatField__return();
-  probe SetIntArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint32_t*);
-  probe SetIntArrayRegion__return();
-  probe SetIntField__entry(void*, void*, uintptr_t, uint32_t);
-  probe SetIntField__return();
-  probe SetLongArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uintptr_t*);
-  probe SetLongArrayRegion__return();
-  probe SetLongField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetLongField__return();
-  probe SetObjectArrayElement__entry(void*, void*, uintptr_t, void*);
-  probe SetObjectArrayElement__return();
-  probe SetObjectField__entry(void*, void*, uintptr_t, void*);
-  probe SetObjectField__return();
-  probe SetShortArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint16_t*);
-  probe SetShortArrayRegion__return();
-  probe SetShortField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetShortField__return();
-  probe SetStaticBooleanField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetStaticBooleanField__return();
-  probe SetStaticByteField__entry(void*, void*, uintptr_t, char);
-  probe SetStaticByteField__return();
-  probe SetStaticCharField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetStaticCharField__return();
-  probe SetStaticDoubleField__entry(void*, void*, uintptr_t);
-  probe SetStaticDoubleField__return();
-  probe SetStaticFloatField__entry(void*, void*, uintptr_t);
-  probe SetStaticFloatField__return();
-  probe SetStaticIntField__entry(void*, void*, uintptr_t, uint32_t);
-  probe SetStaticIntField__return();
-  probe SetStaticLongField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetStaticLongField__return();
-  probe SetStaticObjectField__entry(void*, void*, uintptr_t, void*);
-  probe SetStaticObjectField__return();
-  probe SetStaticShortField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetStaticShortField__return();
-  probe Throw__entry(void*, void*);
-  probe Throw__return(intptr_t);
-  probe ThrowNew__entry(void*, void*, const char*);  
-  probe ThrowNew__return(intptr_t);  
-  probe ToReflectedField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe ToReflectedField__return(void*);
-  probe ToReflectedMethod__entry(void*, void*, uintptr_t, uintptr_t);
-  probe ToReflectedMethod__return(void*);
-  probe UnregisterNatives__entry(void*, void*);  
-  probe UnregisterNatives__return(uint32_t);
-};
-
-#pragma D attributes Standard/Standard/Common provider hotspot_jni provider
-#pragma D attributes Private/Private/Unknown provider hotspot_jni module
-#pragma D attributes Private/Private/Unknown provider hotspot_jni function
-#pragma D attributes Standard/Standard/Common provider hotspot_jni name
-#pragma D attributes Evolving/Evolving/Common provider hotspot_jni args
-
--- a/src/os/bsd/dtrace/hs_private.d	Thu Jan 30 14:30:01 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *  
- */
-
-provider hs_private {
-  probe hashtable__new_entry(void*, uint32_t, uintptr_t, void*); 
-  probe safepoint__begin();
-  probe safepoint__end();
-  probe cms__initmark__begin();
-  probe cms__initmark__end();
-  probe cms__remark__begin();
-  probe cms__remark__end();
-};
-
-#pragma D attributes Private/Private/Common provider hs_private provider
-#pragma D attributes Private/Private/Unknown provider hs_private module
-#pragma D attributes Private/Private/Unknown provider hs_private function
-#pragma D attributes Private/Private/Common provider hs_private name
-#pragma D attributes Private/Private/Common provider hs_private args
-
--- a/src/os/bsd/vm/os_bsd.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/os/bsd/vm/os_bsd.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1557,6 +1557,17 @@
 }
 #endif /* !__APPLE__ */
 
+void* os::get_default_process_handle() {
+#ifdef __APPLE__
+  // MacOS X needs to use RTLD_FIRST instead of RTLD_LAZY
+  // to avoid finding unexpected symbols on second (or later)
+  // loads of a library.
+  return (void*)::dlopen(NULL, RTLD_FIRST);
+#else
+  return (void*)::dlopen(NULL, RTLD_LAZY);
+#endif
+}
+
 // XXX: Do we need a lock around this as per Linux?
 void* os::dll_lookup(void* handle, const char* name) {
   return dlsym(handle, name);
@@ -1731,12 +1742,14 @@
   if (rp == NULL)
     return;
 
-  if (Arguments::created_by_gamma_launcher()) {
-    // Support for the gamma launcher.  Typical value for buf is
-    // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm".  If "/jre/lib/" appears at
-    // the right place in the string, then assume we are installed in a JDK and
-    // we're done.  Otherwise, check for a JAVA_HOME environment variable and
-    // construct a path to the JVM being overridden.
+  if (Arguments::sun_java_launcher_is_altjvm()) {
+    // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
+    // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so"
+    // or "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.dylib". If "/jre/lib/"
+    // appears at the right place in the string, then assume we are
+    // installed in a JDK and we're done. Otherwise, check for a
+    // JAVA_HOME environment variable and construct a path to the JVM
+    // being overridden.
 
     const char *p = buf + strlen(buf) - 1;
     for (int count = 0; p > buf && count < 5; ++count) {
@@ -2579,9 +2592,21 @@
   }
 }
 
-int os::naked_sleep() {
-  // %% make the sleep time an integer flag. for now use 1 millisec.
-  return os::sleep(Thread::current(), 1, false);
+void os::naked_short_sleep(jlong ms) {
+  struct timespec req;
+
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+  req.tv_sec = 0;
+  if (ms > 0) {
+    req.tv_nsec = (ms % 1000) * 1000000;
+  }
+  else {
+    req.tv_nsec = 1;
+  }
+
+  nanosleep(&req, NULL);
+
+  return;
 }
 
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
--- a/src/os/linux/vm/os_linux.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/os/linux/vm/os_linux.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2109,6 +2109,9 @@
   return res;
 }
 
+void* os::get_default_process_handle() {
+  return (void*)::dlopen(NULL, RTLD_LAZY);
+}
 
 static bool _print_ascii_file(const char* filename, outputStream* st) {
   int fd = ::open(filename, O_RDONLY);
@@ -2330,13 +2333,14 @@
   if (rp == NULL)
     return;
 
-  if (Arguments::created_by_gamma_launcher()) {
-    // Support for the gamma launcher.  Typical value for buf is
-    // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".  If "/jre/lib/" appears at
-    // the right place in the string, then assume we are installed in a JDK and
-    // we're done.  Otherwise, check for a JAVA_HOME environment variable and fix
-    // up the path so it looks like libjvm.so is installed there (append a
-    // fake suffix hotspot/libjvm.so).
+  if (Arguments::sun_java_launcher_is_altjvm()) {
+    // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
+    // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
+    // If "/jre/lib/" appears at the right place in the string, then
+    // assume we are installed in a JDK and we're done. Otherwise, check
+    // for a JAVA_HOME environment variable and fix up the path so it
+    // looks like libjvm.so is installed there (append a fake suffix
+    // hotspot/libjvm.so).
     const char *p = buf + strlen(buf) - 1;
     for (int count = 0; p > buf && count < 5; ++count) {
       for (--p; p > buf && *p != '/'; --p)
@@ -2958,7 +2962,9 @@
 
   unsigned char vec[1];
   unsigned imin = 1, imax = pages + 1, imid;
-  int mincore_return_value;
+  int mincore_return_value = 0;
+
+  assert(imin <= imax, "Unexpected page size");
 
   while (imin < imax) {
     imid = (imax + imin) / 2;
@@ -3830,9 +3836,33 @@
   }
 }
 
-int os::naked_sleep() {
-  // %% make the sleep time an integer flag. for now use 1 millisec.
-  return os::sleep(Thread::current(), 1, false);
+//
+// Short sleep, direct OS call.
+//
+// Note: certain versions of Linux CFS scheduler (since 2.6.23) do not guarantee
+// sched_yield(2) will actually give up the CPU:
+//
+//   * Alone on this pariticular CPU, keeps running.
+//   * Before the introduction of "skip_buddy" with "compat_yield" disabled
+//     (pre 2.6.39).
+//
+// So calling this with 0 is an alternative.
+//
+void os::naked_short_sleep(jlong ms) {
+  struct timespec req;
+
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+  req.tv_sec = 0;
+  if (ms > 0) {
+    req.tv_nsec = (ms % 1000) * 1000000;
+  }
+  else {
+    req.tv_nsec = 1;
+  }
+
+  nanosleep(&req, NULL);
+
+  return;
 }
 
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
--- a/src/os/linux/vm/perfMemory_linux.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/os/linux/vm/perfMemory_linux.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -891,8 +891,16 @@
   FREE_C_HEAP_ARRAY(char, filename, mtInternal);
 
   // open the shared memory file for the give vmid
-  fd = open_sharedmem_file(rfilename, file_flags, CHECK);
-  assert(fd != OS_ERR, "unexpected value");
+  fd = open_sharedmem_file(rfilename, file_flags, THREAD);
+
+  if (fd == OS_ERR) {
+    return;
+  }
+
+  if (HAS_PENDING_EXCEPTION) {
+    ::close(fd);
+    return;
+  }
 
   if (*sizep == 0) {
     size = sharedmem_filesize(fd, CHECK);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/posix/dtrace/hotspot.d	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *  
+ */
+
+provider hotspot {
+  probe class__loaded(char*, uintptr_t, void*, uintptr_t);
+  probe class__unloaded(char*, uintptr_t, void*, uintptr_t);
+  probe class__initialization__required(char*, uintptr_t, void*, intptr_t);
+  probe class__initialization__recursive(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__concurrent(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__erroneous(char*, uintptr_t, void*, intptr_t, int);
+  probe class__initialization__super__failed(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__clinit(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__error(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__end(char*, uintptr_t, void*, intptr_t,int);
+  probe vm__init__begin();
+  probe vm__init__end();
+  probe vm__shutdown();
+  probe vmops__request(char*, uintptr_t, int);
+  probe vmops__begin(char*, uintptr_t, int);
+  probe vmops__end(char*, uintptr_t, int);
+  probe gc__begin(uintptr_t);
+  probe gc__end();
+  probe mem__pool__gc__begin(
+    char*, uintptr_t, char*, uintptr_t, 
+    uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+  probe mem__pool__gc__end(
+    char*, uintptr_t, char*, uintptr_t, 
+    uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+  probe thread__start(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+  probe thread__stop(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+  probe thread__sleep__begin(long long);
+  probe thread__sleep__end(int);
+  probe thread__yield();
+  probe thread__park__begin(uintptr_t, int, long long);
+  probe thread__park__end(uintptr_t);
+  probe thread__unpark(uintptr_t);
+  probe method__compile__begin(
+    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, char*, uintptr_t); 
+  probe method__compile__end(
+    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, 
+    char*, uintptr_t, uintptr_t); 
+  probe compiled__method__load(
+    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, void*, uintptr_t);
+  probe compiled__method__unload(
+    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t); 
+  probe monitor__contended__enter(uintptr_t, uintptr_t, char*, uintptr_t);
+  probe monitor__contended__entered(uintptr_t, uintptr_t, char*, uintptr_t);
+  probe monitor__contended__exit(uintptr_t, uintptr_t, char*, uintptr_t);
+  probe monitor__wait(uintptr_t, uintptr_t, char*, uintptr_t, uintptr_t);
+  probe monitor__waited(uintptr_t, uintptr_t, char*, uintptr_t);
+  probe monitor__notify(uintptr_t, uintptr_t, char*, uintptr_t);
+  probe monitor__notifyAll(uintptr_t, uintptr_t, char*, uintptr_t);
+
+  probe object__alloc(int, char*, uintptr_t, uintptr_t);
+  probe method__entry(
+    int, char*, int, char*, int, char*, int);
+  probe method__return(
+    int, char*, int, char*, int, char*, int);
+};
+
+#pragma D attributes Evolving/Evolving/Common provider hotspot provider
+#pragma D attributes Private/Private/Unknown provider hotspot module
+#pragma D attributes Private/Private/Unknown provider hotspot function
+#pragma D attributes Evolving/Evolving/Common provider hotspot name
+#pragma D attributes Evolving/Evolving/Common provider hotspot args
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/posix/dtrace/hotspot_jni.d	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,506 @@
+/*
+ * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *  
+ */
+
+provider hotspot_jni {
+  probe AllocObject__entry(void*, void*);
+  probe AllocObject__return(void*);
+  probe AttachCurrentThreadAsDaemon__entry(void*, void**, void*);
+  probe AttachCurrentThreadAsDaemon__return(uint32_t);
+  probe AttachCurrentThread__entry(void*, void**, void*);
+  probe AttachCurrentThread__return(uint32_t);
+  probe CallBooleanMethodA__entry(void*, void*, uintptr_t);
+  probe CallBooleanMethodA__return(uintptr_t);
+  probe CallBooleanMethod__entry(void*, void*, uintptr_t);
+  probe CallBooleanMethod__return(uintptr_t);
+  probe CallBooleanMethodV__entry(void*, void*, uintptr_t);
+  probe CallBooleanMethodV__return(uintptr_t);
+  probe CallByteMethodA__entry(void*, void*, uintptr_t);
+  probe CallByteMethodA__return(char);
+  probe CallByteMethod__entry(void*, void*, uintptr_t);
+  probe CallByteMethod__return(char);
+  probe CallByteMethodV__entry(void*, void*, uintptr_t);
+  probe CallByteMethodV__return(char);
+  probe CallCharMethodA__entry(void*, void*, uintptr_t);
+  probe CallCharMethodA__return(uint16_t);
+  probe CallCharMethod__entry(void*, void*, uintptr_t);
+  probe CallCharMethod__return(uint16_t);
+  probe CallCharMethodV__entry(void*, void*, uintptr_t);
+  probe CallCharMethodV__return(uint16_t);
+  probe CallDoubleMethodA__entry(void*, void*, uintptr_t);
+  probe CallDoubleMethodA__return();
+  probe CallDoubleMethod__entry(void*, void*, uintptr_t);
+  probe CallDoubleMethod__return();
+  probe CallDoubleMethodV__entry(void*, void*, uintptr_t);
+  probe CallDoubleMethodV__return();
+  probe CallFloatMethodA__entry(void*, void*, uintptr_t);
+  probe CallFloatMethodA__return();
+  probe CallFloatMethod__entry(void*, void*, uintptr_t);
+  probe CallFloatMethod__return();
+  probe CallFloatMethodV__entry(void*, void*, uintptr_t);
+  probe CallFloatMethodV__return();
+  probe CallIntMethodA__entry(void*, void*, uintptr_t);
+  probe CallIntMethodA__return(uint32_t);
+  probe CallIntMethod__entry(void*, void*, uintptr_t);
+  probe CallIntMethod__return(uint32_t);
+  probe CallIntMethodV__entry(void*, void*, uintptr_t);
+  probe CallIntMethodV__return(uint32_t);
+  probe CallLongMethodA__entry(void*, void*, uintptr_t);
+  probe CallLongMethodA__return(uintptr_t);
+  probe CallLongMethod__entry(void*, void*, uintptr_t);
+  probe CallLongMethod__return(uintptr_t);
+  probe CallLongMethodV__entry(void*, void*, uintptr_t);
+  probe CallLongMethodV__return(uintptr_t);
+  probe CallNonvirtualBooleanMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualBooleanMethodA__return(uintptr_t);
+  probe CallNonvirtualBooleanMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualBooleanMethod__return(uintptr_t);
+  probe CallNonvirtualBooleanMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualBooleanMethodV__return(uintptr_t);
+  probe CallNonvirtualByteMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualByteMethodA__return(char);
+  probe CallNonvirtualByteMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualByteMethod__return(char);
+  probe CallNonvirtualByteMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualByteMethodV__return(char);
+  probe CallNonvirtualCharMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualCharMethodA__return(uint16_t);
+  probe CallNonvirtualCharMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualCharMethod__return(uint16_t);
+  probe CallNonvirtualCharMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualCharMethodV__return(uint16_t);
+  probe CallNonvirtualDoubleMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualDoubleMethodA__return();
+  probe CallNonvirtualDoubleMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualDoubleMethod__return();
+  probe CallNonvirtualDoubleMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualDoubleMethodV__return();
+  probe CallNonvirtualFloatMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualFloatMethodA__return();
+  probe CallNonvirtualFloatMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualFloatMethod__return();
+  probe CallNonvirtualFloatMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualFloatMethodV__return();
+  probe CallNonvirtualIntMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualIntMethodA__return(uint32_t);
+  probe CallNonvirtualIntMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualIntMethod__return(uint32_t);
+  probe CallNonvirtualIntMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualIntMethodV__return(uint32_t);
+  probe CallNonvirtualLongMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualLongMethodA__return(uintptr_t);
+  probe CallNonvirtualLongMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualLongMethod__return(uintptr_t);
+  probe CallNonvirtualLongMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualLongMethodV__return(uintptr_t);
+  probe CallNonvirtualObjectMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualObjectMethodA__return(void*);
+  probe CallNonvirtualObjectMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualObjectMethod__return(void*);
+  probe CallNonvirtualObjectMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualObjectMethodV__return(void*);
+  probe CallNonvirtualShortMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualShortMethodA__return(uint16_t);
+  probe CallNonvirtualShortMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualShortMethod__return(uint16_t);
+  probe CallNonvirtualShortMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualShortMethodV__return(uint16_t);
+  probe CallNonvirtualVoidMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualVoidMethodA__return();
+  probe CallNonvirtualVoidMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualVoidMethod__return();
+  probe CallNonvirtualVoidMethodV__entry(void*, void*, void*, uintptr_t);  
+  probe CallNonvirtualVoidMethodV__return();
+  probe CallObjectMethodA__entry(void*, void*, uintptr_t);
+  probe CallObjectMethodA__return(void*);
+  probe CallObjectMethod__entry(void*, void*, uintptr_t);
+  probe CallObjectMethod__return(void*);
+  probe CallObjectMethodV__entry(void*, void*, uintptr_t);
+  probe CallObjectMethodV__return(void*);
+  probe CallShortMethodA__entry(void*, void*, uintptr_t);
+  probe CallShortMethodA__return(uint16_t);
+  probe CallShortMethod__entry(void*, void*, uintptr_t);
+  probe CallShortMethod__return(uint16_t);
+  probe CallShortMethodV__entry(void*, void*, uintptr_t);
+  probe CallShortMethodV__return(uint16_t);
+  probe CallStaticBooleanMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticBooleanMethodA__return(uintptr_t);
+  probe CallStaticBooleanMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticBooleanMethod__return(uintptr_t);
+  probe CallStaticBooleanMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticBooleanMethodV__return(uintptr_t);
+  probe CallStaticByteMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticByteMethodA__return(char);
+  probe CallStaticByteMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticByteMethod__return(char);
+  probe CallStaticByteMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticByteMethodV__return(char);
+  probe CallStaticCharMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticCharMethodA__return(uint16_t);
+  probe CallStaticCharMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticCharMethod__return(uint16_t);
+  probe CallStaticCharMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticCharMethodV__return(uint16_t);
+  probe CallStaticDoubleMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticDoubleMethodA__return();
+  probe CallStaticDoubleMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticDoubleMethod__return();
+  probe CallStaticDoubleMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticDoubleMethodV__return();
+  probe CallStaticFloatMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticFloatMethodA__return();
+  probe CallStaticFloatMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticFloatMethod__return();
+  probe CallStaticFloatMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticFloatMethodV__return();
+  probe CallStaticIntMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticIntMethodA__return(uint32_t);
+  probe CallStaticIntMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticIntMethod__return(uint32_t);
+  probe CallStaticIntMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticIntMethodV__return(uint32_t);
+  probe CallStaticLongMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticLongMethodA__return(uintptr_t);
+  probe CallStaticLongMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticLongMethod__return(uintptr_t);
+  probe CallStaticLongMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticLongMethodV__return(uintptr_t);
+  probe CallStaticObjectMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticObjectMethodA__return(void*);
+  probe CallStaticObjectMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticObjectMethod__return(void*);
+  probe CallStaticObjectMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticObjectMethodV__return(void*);
+  probe CallStaticShortMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticShortMethodA__return(uint16_t);
+  probe CallStaticShortMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticShortMethod__return(uint16_t);
+  probe CallStaticShortMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticShortMethodV__return(uint16_t);
+  probe CallStaticVoidMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticVoidMethodA__return();
+  probe CallStaticVoidMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticVoidMethod__return(); 
+  probe CallStaticVoidMethodV__entry(void*, void*, uintptr_t);  
+  probe CallStaticVoidMethodV__return();
+  probe CallVoidMethodA__entry(void*, void*, uintptr_t);  
+  probe CallVoidMethodA__return();
+  probe CallVoidMethod__entry(void*, void*, uintptr_t);  
+  probe CallVoidMethod__return(); 
+  probe CallVoidMethodV__entry(void*, void*, uintptr_t);  
+  probe CallVoidMethodV__return();
+  probe CreateJavaVM__entry(void**, void**, void*);
+  probe CreateJavaVM__return(uint32_t);
+  probe DefineClass__entry(void*, const char*, void*, char*, uintptr_t);
+  probe DefineClass__return(void*);
+  probe DeleteGlobalRef__entry(void*, void*);
+  probe DeleteGlobalRef__return();
+  probe DeleteLocalRef__entry(void*, void*);
+  probe DeleteLocalRef__return();
+  probe DeleteWeakGlobalRef__entry(void*, void*);
+  probe DeleteWeakGlobalRef__return();
+  probe DestroyJavaVM__entry(void*);
+  probe DestroyJavaVM__return(uint32_t);
+  probe DetachCurrentThread__entry(void*);
+  probe DetachCurrentThread__return(uint32_t);
+  probe EnsureLocalCapacity__entry(void*, uint32_t);
+  probe EnsureLocalCapacity__return(uint32_t);
+  probe ExceptionCheck__entry(void*);
+  probe ExceptionCheck__return(uintptr_t);
+  probe ExceptionClear__entry(void*);
+  probe ExceptionClear__return();
+  probe ExceptionDescribe__entry(void*);  
+  probe ExceptionDescribe__return();
+  probe ExceptionOccurred__entry(void*);
+  probe ExceptionOccurred__return(void*);
+  probe FatalError__entry(void* env, const char*);
+  probe FindClass__entry(void*, const char*);
+  probe FindClass__return(void*);
+  probe FromReflectedField__entry(void*, void*);
+  probe FromReflectedField__return(uintptr_t);
+  probe FromReflectedMethod__entry(void*, void*);
+  probe FromReflectedMethod__return(uintptr_t);
+  probe GetArrayLength__entry(void*, void*);
+  probe GetArrayLength__return(uintptr_t);
+  probe GetBooleanArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetBooleanArrayElements__return(uintptr_t*);
+  probe GetBooleanArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uintptr_t*);
+  probe GetBooleanArrayRegion__return();
+  probe GetBooleanField__entry(void*, void*, uintptr_t);
+  probe GetBooleanField__return(uintptr_t);
+  probe GetByteArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetByteArrayElements__return(char*);
+  probe GetByteArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, char*);
+  probe GetByteArrayRegion__return();
+  probe GetByteField__entry(void*, void*, uintptr_t);
+  probe GetByteField__return(char);
+  probe GetCharArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetCharArrayElements__return(uint16_t*);
+  probe GetCharArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
+  probe GetCharArrayRegion__return();
+  probe GetCharField__entry(void*, void*, uintptr_t);
+  probe GetCharField__return(uint16_t);
+  probe GetCreatedJavaVMs__entry(void**, uintptr_t, uintptr_t*);
+  probe GetCreatedJavaVMs__return(uintptr_t);
+  probe GetDefaultJavaVMInitArgs__entry(void*);
+  probe GetDefaultJavaVMInitArgs__return(uint32_t);
+  probe GetDirectBufferAddress__entry(void*, void*);
+  probe GetDirectBufferAddress__return(void*);
+  probe GetDirectBufferCapacity__entry(void*, void*);
+  probe GetDirectBufferCapacity__return(uintptr_t);
+  probe GetDoubleArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetDoubleArrayElements__return(double*);
+  probe GetDoubleArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, double*);
+  probe GetDoubleArrayRegion__return();
+  probe GetDoubleField__entry(void*, void*, uintptr_t);
+  probe GetDoubleField__return();
+  probe GetEnv__entry(void*, void*, uint32_t);
+  probe GetEnv__return(uint32_t);
+  probe GetFieldID__entry(void*, void*, const char*, const char*);
+  probe GetFieldID__return(uintptr_t);
+  probe GetFloatArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetFloatArrayElements__return(float*);
+  probe GetFloatArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, float*);
+  probe GetFloatArrayRegion__return();
+  probe GetFloatField__entry(void*, void*, uintptr_t);
+  probe GetFloatField__return();
+  probe GetIntArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetIntArrayElements__return(uint32_t*);
+  probe GetIntArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint32_t*);
+  probe GetIntArrayRegion__return();
+  probe GetIntField__entry(void*, void*, uintptr_t);
+  probe GetIntField__return(uint32_t);
+  probe GetJavaVM__entry(void*, void**);
+  probe GetJavaVM__return(uint32_t);
+  probe GetLongArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetLongArrayElements__return(uintptr_t*);
+  probe GetLongArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uintptr_t*);
+  probe GetLongArrayRegion__return();
+  probe GetLongField__entry(void*, void*, uintptr_t);
+  probe GetLongField__return(uintptr_t);
+  probe GetMethodID__entry(void*, void*, const char*, const char*);
+  probe GetMethodID__return(uintptr_t);
+  probe GetObjectArrayElement__entry(void*, void*, uintptr_t);
+  probe GetObjectArrayElement__return(void*);
+  probe GetObjectClass__entry(void*, void*);
+  probe GetObjectClass__return(void*);
+  probe GetObjectField__entry(void*, void*, uintptr_t);
+  probe GetObjectField__return(void*);
+  probe GetObjectRefType__entry(void*, void*);
+  probe GetObjectRefType__return(void*);
+  probe GetPrimitiveArrayCritical__entry(void*, void*, uintptr_t*);
+  probe GetPrimitiveArrayCritical__return(void*);
+  probe GetShortArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetShortArrayElements__return(uint16_t*);
+  probe GetShortArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
+  probe GetShortArrayRegion__return();
+  probe GetShortField__entry(void*, void*, uintptr_t);
+  probe GetShortField__return(uint16_t);
+  probe GetStaticBooleanField__entry(void*, void*, uintptr_t);
+  probe GetStaticBooleanField__return(uintptr_t);
+  probe GetStaticByteField__entry(void*, void*, uintptr_t);
+  probe GetStaticByteField__return(char);
+  probe GetStaticCharField__entry(void*, void*, uintptr_t);
+  probe GetStaticCharField__return(uint16_t);
+  probe GetStaticDoubleField__entry(void*, void*, uintptr_t);
+  probe GetStaticDoubleField__return();
+  probe GetStaticFieldID__entry(void*, void*, const char*, const char*);
+  probe GetStaticFieldID__return(uintptr_t);
+  probe GetStaticFloatField__entry(void*, void*, uintptr_t);
+  probe GetStaticFloatField__return();
+  probe GetStaticIntField__entry(void*, void*, uintptr_t);
+  probe GetStaticIntField__return(uint32_t);
+  probe GetStaticLongField__entry(void*, void*, uintptr_t);
+  probe GetStaticLongField__return(uintptr_t);
+  probe GetStaticMethodID__entry(void*, void*, const char*, const char*);
+  probe GetStaticMethodID__return(uintptr_t);
+  probe GetStaticObjectField__entry(void*, void*, uintptr_t);
+  probe GetStaticObjectField__return(void*);
+  probe GetStaticShortField__entry(void*, void*, uintptr_t);
+  probe GetStaticShortField__return(uint16_t);
+  probe GetStringChars__entry(void*, void*, uintptr_t*);
+  probe GetStringChars__return(const uint16_t*);
+  probe GetStringCritical__entry(void*, void*, uintptr_t*);
+  probe GetStringCritical__return(const uint16_t*);
+  probe GetStringLength__entry(void*, void*);
+  probe GetStringLength__return(uintptr_t);
+  probe GetStringRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
+  probe GetStringRegion__return();
+  probe GetStringUTFChars__entry(void*, void*, uintptr_t*);
+  probe GetStringUTFChars__return(const char*);
+  probe GetStringUTFLength__entry(void*, void*);
+  probe GetStringUTFLength__return(uintptr_t);
+  probe GetStringUTFRegion__entry(void*, void*, uintptr_t, uintptr_t, char*);
+  probe GetStringUTFRegion__return();
+  probe GetSuperclass__entry(void*, void*);
+  probe GetSuperclass__return(void*);
+  probe GetVersion__entry(void*);
+  probe GetVersion__return(uint32_t);
+  probe IsAssignableFrom__entry(void*, void*, void*);
+  probe IsAssignableFrom__return(uintptr_t);
+  probe IsInstanceOf__entry(void*, void*, void*);
+  probe IsInstanceOf__return(uintptr_t);
+  probe IsSameObject__entry(void*, void*, void*);
+  probe IsSameObject__return(uintptr_t);
+  probe MonitorEnter__entry(void*, void*);
+  probe MonitorEnter__return(uint32_t);
+  probe MonitorExit__entry(void*, void*);
+  probe MonitorExit__return(uint32_t);
+  probe NewBooleanArray__entry(void*, uintptr_t);
+  probe NewBooleanArray__return(void*);
+  probe NewByteArray__entry(void*, uintptr_t);
+  probe NewByteArray__return(void*);
+  probe NewCharArray__entry(void*, uintptr_t);
+  probe NewCharArray__return(void*);
+  probe NewDirectByteBuffer__entry(void*, void*, uintptr_t);
+  probe NewDirectByteBuffer__return(void*);
+  probe NewDoubleArray__entry(void*, uintptr_t);
+  probe NewDoubleArray__return(void*);
+  probe NewFloatArray__entry(void*, uintptr_t);
+  probe NewFloatArray__return(void*);
+  probe NewGlobalRef__entry(void*, void*);
+  probe NewGlobalRef__return(void*);
+  probe NewIntArray__entry(void*, uintptr_t);
+  probe NewIntArray__return(void*);
+  probe NewLocalRef__entry(void*, void*);
+  probe NewLocalRef__return(void*);
+  probe NewLongArray__entry(void*, uintptr_t);
+  probe NewLongArray__return(void*);
+  probe NewObjectA__entry(void*, void*, uintptr_t);  
+  probe NewObjectA__return(void*);
+  probe NewObjectArray__entry(void*, uintptr_t, void*, void*);
+  probe NewObjectArray__return(void*);
+  probe NewObject__entry(void*, void*, uintptr_t); 
+  probe NewObject__return(void*);
+  probe NewObjectV__entry(void*, void*, uintptr_t);  
+  probe NewObjectV__return(void*);
+  probe NewShortArray__entry(void*, uintptr_t);
+  probe NewShortArray__return(void*);
+  probe NewString__entry(void*, const uint16_t*, uintptr_t);
+  probe NewString__return(void*);
+  probe NewStringUTF__entry(void*, const char*);
+  probe NewStringUTF__return(void*);
+  probe NewWeakGlobalRef__entry(void*, void*);
+  probe NewWeakGlobalRef__return(void*);
+  probe PopLocalFrame__entry(void*, void*);
+  probe PopLocalFrame__return(void*);
+  probe PushLocalFrame__entry(void*, uint32_t);
+  probe PushLocalFrame__return(uint32_t);
+  probe RegisterNatives__entry(void*, void*, const void*, uint32_t);  
+  probe RegisterNatives__return(uint32_t);
+  probe ReleaseBooleanArrayElements__entry(void*, void*, uintptr_t*, uint32_t);
+  probe ReleaseBooleanArrayElements__return();
+  probe ReleaseByteArrayElements__entry(void*, void*, char*, uint32_t);
+  probe ReleaseByteArrayElements__return();
+  probe ReleaseCharArrayElements__entry(void*, void*, uint16_t*, uint32_t);
+  probe ReleaseCharArrayElements__return();
+  probe ReleaseDoubleArrayElements__entry(void*, void*, double*, uint32_t);
+  probe ReleaseDoubleArrayElements__return();
+  probe ReleaseFloatArrayElements__entry(void*, void*, float*, uint32_t);
+  probe ReleaseFloatArrayElements__return();
+  probe ReleaseIntArrayElements__entry(void*, void*, uint32_t*, uint32_t);
+  probe ReleaseIntArrayElements__return();
+  probe ReleaseLongArrayElements__entry(void*, void*, uintptr_t*, uint32_t);
+  probe ReleaseLongArrayElements__return();
+  probe ReleasePrimitiveArrayCritical__entry(void*, void*, void*, uint32_t);
+  probe ReleasePrimitiveArrayCritical__return();
+  probe ReleaseShortArrayElements__entry(void*, void*, uint16_t*, uint32_t);
+  probe ReleaseShortArrayElements__return();
+  probe ReleaseStringChars__entry(void*, void*, const uint16_t*);
+  probe ReleaseStringChars__return();
+  probe ReleaseStringCritical__entry(void*, void*, const uint16_t*);
+  probe ReleaseStringCritical__return();
+  probe ReleaseStringUTFChars__entry(void*, void*, const char*);
+  probe ReleaseStringUTFChars__return();
+  probe SetBooleanArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uintptr_t*);
+  probe SetBooleanArrayRegion__return();
+  probe SetBooleanField__entry(void*, void*, uintptr_t, uintptr_t);
+  probe SetBooleanField__return();
+  probe SetByteArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const char*);
+  probe SetByteArrayRegion__return();
+  probe SetByteField__entry(void*, void*, uintptr_t, char);
+  probe SetByteField__return();
+  probe SetCharArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint16_t*);
+  probe SetCharArrayRegion__return();
+  probe SetCharField__entry(void*, void*, uintptr_t, uint16_t);
+  probe SetCharField__return();
+  probe SetDoubleArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const double*);
+  probe SetDoubleArrayRegion__return();
+  probe SetDoubleField__entry(void*, void*, uintptr_t);
+  probe SetDoubleField__return();
+  probe SetFloatArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const float*);
+  probe SetFloatArrayRegion__return();
+  probe SetFloatField__entry(void*, void*, uintptr_t);
+  probe SetFloatField__return();
+  probe SetIntArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint32_t*);
+  probe SetIntArrayRegion__return();
+  probe SetIntField__entry(void*, void*, uintptr_t, uint32_t);
+  probe SetIntField__return();
+  probe SetLongArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uintptr_t*);
+  probe SetLongArrayRegion__return();
+  probe SetLongField__entry(void*, void*, uintptr_t, uintptr_t);
+  probe SetLongField__return();
+  probe SetObjectArrayElement__entry(void*, void*, uintptr_t, void*);
+  probe SetObjectArrayElement__return();
+  probe SetObjectField__entry(void*, void*, uintptr_t, void*);
+  probe SetObjectField__return();
+  probe SetShortArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint16_t*);
+  probe SetShortArrayRegion__return();
+  probe SetShortField__entry(void*, void*, uintptr_t, uint16_t);
+  probe SetShortField__return();
+  probe SetStaticBooleanField__entry(void*, void*, uintptr_t, uintptr_t);
+  probe SetStaticBooleanField__return();
+  probe SetStaticByteField__entry(void*, void*, uintptr_t, char);
+  probe SetStaticByteField__return();
+  probe SetStaticCharField__entry(void*, void*, uintptr_t, uint16_t);
+  probe SetStaticCharField__return();
+  probe SetStaticDoubleField__entry(void*, void*, uintptr_t);
+  probe SetStaticDoubleField__return();
+  probe SetStaticFloatField__entry(void*, void*, uintptr_t);
+  probe SetStaticFloatField__return();
+  probe SetStaticIntField__entry(void*, void*, uintptr_t, uint32_t);
+  probe SetStaticIntField__return();
+  probe SetStaticLongField__entry(void*, void*, uintptr_t, uintptr_t);
+  probe SetStaticLongField__return();
+  probe SetStaticObjectField__entry(void*, void*, uintptr_t, void*);
+  probe SetStaticObjectField__return();
+  probe SetStaticShortField__entry(void*, void*, uintptr_t, uint16_t);
+  probe SetStaticShortField__return();
+  probe Throw__entry(void*, void*);
+  probe Throw__return(intptr_t);
+  probe ThrowNew__entry(void*, void*, const char*);  
+  probe ThrowNew__return(intptr_t);  
+  probe ToReflectedField__entry(void*, void*, uintptr_t, uintptr_t);
+  probe ToReflectedField__return(void*);
+  probe ToReflectedMethod__entry(void*, void*, uintptr_t, uintptr_t);
+  probe ToReflectedMethod__return(void*);
+  probe UnregisterNatives__entry(void*, void*);  
+  probe UnregisterNatives__return(uint32_t);
+};
+
+#pragma D attributes Standard/Standard/Common provider hotspot_jni provider
+#pragma D attributes Private/Private/Unknown provider hotspot_jni module
+#pragma D attributes Private/Private/Unknown provider hotspot_jni function
+#pragma D attributes Standard/Standard/Common provider hotspot_jni name
+#pragma D attributes Evolving/Evolving/Common provider hotspot_jni args
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/posix/dtrace/hs_private.d	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *  
+ */
+
+provider hs_private {
+  probe safepoint__begin();
+  probe safepoint__end();
+  probe cms__initmark__begin();
+  probe cms__initmark__end();
+  probe cms__remark__begin();
+  probe cms__remark__end();
+};
+
+#pragma D attributes Private/Private/Common provider hs_private provider
+#pragma D attributes Private/Private/Unknown provider hs_private module
+#pragma D attributes Private/Private/Unknown provider hs_private function
+#pragma D attributes Private/Private/Common provider hs_private name
+#pragma D attributes Private/Private/Common provider hs_private args
+
--- a/src/os/posix/vm/os_posix.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/os/posix/vm/os_posix.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -270,10 +270,6 @@
   return ::fdopen(fd, mode);
 }
 
-void* os::get_default_process_handle() {
-  return (void*)::dlopen(NULL, RTLD_LAZY);
-}
-
 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
 // which is used to find statically linked in agents.
 // Parameters:
--- a/src/os/solaris/dtrace/hotspot.d	Thu Jan 30 14:30:01 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *  
- */
-
-provider hotspot {
-  probe class__loaded(char*, uintptr_t, void*, uintptr_t);
-  probe class__unloaded(char*, uintptr_t, void*, uintptr_t);
-  probe class__initialization__required(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__recursive(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__concurrent(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__erroneous(char*, uintptr_t, void*, intptr_t, int);
-  probe class__initialization__super__failed(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__clinit(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__error(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__end(char*, uintptr_t, void*, intptr_t,int);
-  probe vm__init__begin();
-  probe vm__init__end();
-  probe vm__shutdown();
-  probe vmops__request(char*, uintptr_t, int);
-  probe vmops__begin(char*, uintptr_t, int);
-  probe vmops__end(char*, uintptr_t, int);
-  probe gc__begin(uintptr_t);
-  probe gc__end();
-  probe mem__pool__gc__begin(
-    char*, uintptr_t, char*, uintptr_t, 
-    uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe mem__pool__gc__end(
-    char*, uintptr_t, char*, uintptr_t, 
-    uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe thread__start(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe thread__stop(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe thread__sleep__begin(long long);
-  probe thread__sleep__end(int);
-  probe thread__yield();
-  probe thread__park__begin(uintptr_t, int, long long);
-  probe thread__park__end(uintptr_t);
-  probe thread__unpark(uintptr_t);
-  probe method__compile__begin(
-    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, char*, uintptr_t); 
-  probe method__compile__end(
-    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, 
-    char*, uintptr_t, uintptr_t); 
-  probe compiled__method__load(
-    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, void*, uintptr_t);
-  probe compiled__method__unload(
-    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t); 
-  probe monitor__contended__enter(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__contended__entered(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__contended__exit(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__wait(uintptr_t, uintptr_t, char*, uintptr_t, uintptr_t);
-  probe monitor__waited(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__notify(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__notifyAll(uintptr_t, uintptr_t, char*, uintptr_t);
-
-  probe object__alloc(int, char*, uintptr_t, uintptr_t);
-  probe method__entry(
-    int, char*, int, char*, int, char*, int);
-  probe method__return(
-    int, char*, int, char*, int, char*, int);
-};
-
-#pragma D attributes Evolving/Evolving/Common provider hotspot provider
-#pragma D attributes Private/Private/Unknown provider hotspot module
-#pragma D attributes Private/Private/Unknown provider hotspot function
-#pragma D attributes Evolving/Evolving/Common provider hotspot name
-#pragma D attributes Evolving/Evolving/Common provider hotspot args
--- a/src/os/solaris/dtrace/hotspot_jni.d	Thu Jan 30 14:30:01 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,506 +0,0 @@
-/*
- * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *  
- */
-
-provider hotspot_jni {
-  probe AllocObject__entry(void*, void*);
-  probe AllocObject__return(void*);
-  probe AttachCurrentThreadAsDaemon__entry(void*, void**, void*);
-  probe AttachCurrentThreadAsDaemon__return(uint32_t);
-  probe AttachCurrentThread__entry(void*, void**, void*);
-  probe AttachCurrentThread__return(uint32_t);
-  probe CallBooleanMethodA__entry(void*, void*, uintptr_t);
-  probe CallBooleanMethodA__return(uintptr_t);
-  probe CallBooleanMethod__entry(void*, void*, uintptr_t);
-  probe CallBooleanMethod__return(uintptr_t);
-  probe CallBooleanMethodV__entry(void*, void*, uintptr_t);
-  probe CallBooleanMethodV__return(uintptr_t);
-  probe CallByteMethodA__entry(void*, void*, uintptr_t);
-  probe CallByteMethodA__return(char);
-  probe CallByteMethod__entry(void*, void*, uintptr_t);
-  probe CallByteMethod__return(char);
-  probe CallByteMethodV__entry(void*, void*, uintptr_t);
-  probe CallByteMethodV__return(char);
-  probe CallCharMethodA__entry(void*, void*, uintptr_t);
-  probe CallCharMethodA__return(uint16_t);
-  probe CallCharMethod__entry(void*, void*, uintptr_t);
-  probe CallCharMethod__return(uint16_t);
-  probe CallCharMethodV__entry(void*, void*, uintptr_t);
-  probe CallCharMethodV__return(uint16_t);
-  probe CallDoubleMethodA__entry(void*, void*, uintptr_t);
-  probe CallDoubleMethodA__return();
-  probe CallDoubleMethod__entry(void*, void*, uintptr_t);
-  probe CallDoubleMethod__return();
-  probe CallDoubleMethodV__entry(void*, void*, uintptr_t);
-  probe CallDoubleMethodV__return();
-  probe CallFloatMethodA__entry(void*, void*, uintptr_t);
-  probe CallFloatMethodA__return();
-  probe CallFloatMethod__entry(void*, void*, uintptr_t);
-  probe CallFloatMethod__return();
-  probe CallFloatMethodV__entry(void*, void*, uintptr_t);
-  probe CallFloatMethodV__return();
-  probe CallIntMethodA__entry(void*, void*, uintptr_t);
-  probe CallIntMethodA__return(uint32_t);
-  probe CallIntMethod__entry(void*, void*, uintptr_t);
-  probe CallIntMethod__return(uint32_t);
-  probe CallIntMethodV__entry(void*, void*, uintptr_t);
-  probe CallIntMethodV__return(uint32_t);
-  probe CallLongMethodA__entry(void*, void*, uintptr_t);
-  probe CallLongMethodA__return(uintptr_t);
-  probe CallLongMethod__entry(void*, void*, uintptr_t);
-  probe CallLongMethod__return(uintptr_t);
-  probe CallLongMethodV__entry(void*, void*, uintptr_t);
-  probe CallLongMethodV__return(uintptr_t);
-  probe CallNonvirtualBooleanMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualBooleanMethodA__return(uintptr_t);
-  probe CallNonvirtualBooleanMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualBooleanMethod__return(uintptr_t);
-  probe CallNonvirtualBooleanMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualBooleanMethodV__return(uintptr_t);
-  probe CallNonvirtualByteMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualByteMethodA__return(char);
-  probe CallNonvirtualByteMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualByteMethod__return(char);
-  probe CallNonvirtualByteMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualByteMethodV__return(char);
-  probe CallNonvirtualCharMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualCharMethodA__return(uint16_t);
-  probe CallNonvirtualCharMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualCharMethod__return(uint16_t);
-  probe CallNonvirtualCharMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualCharMethodV__return(uint16_t);
-  probe CallNonvirtualDoubleMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualDoubleMethodA__return();
-  probe CallNonvirtualDoubleMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualDoubleMethod__return();
-  probe CallNonvirtualDoubleMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualDoubleMethodV__return();
-  probe CallNonvirtualFloatMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualFloatMethodA__return();
-  probe CallNonvirtualFloatMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualFloatMethod__return();
-  probe CallNonvirtualFloatMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualFloatMethodV__return();
-  probe CallNonvirtualIntMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualIntMethodA__return(uint32_t);
-  probe CallNonvirtualIntMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualIntMethod__return(uint32_t);
-  probe CallNonvirtualIntMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualIntMethodV__return(uint32_t);
-  probe CallNonvirtualLongMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualLongMethodA__return(uintptr_t);
-  probe CallNonvirtualLongMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualLongMethod__return(uintptr_t);
-  probe CallNonvirtualLongMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualLongMethodV__return(uintptr_t);
-  probe CallNonvirtualObjectMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualObjectMethodA__return(void*);
-  probe CallNonvirtualObjectMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualObjectMethod__return(void*);
-  probe CallNonvirtualObjectMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualObjectMethodV__return(void*);
-  probe CallNonvirtualShortMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualShortMethodA__return(uint16_t);
-  probe CallNonvirtualShortMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualShortMethod__return(uint16_t);
-  probe CallNonvirtualShortMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualShortMethodV__return(uint16_t);
-  probe CallNonvirtualVoidMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualVoidMethodA__return();
-  probe CallNonvirtualVoidMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualVoidMethod__return();
-  probe CallNonvirtualVoidMethodV__entry(void*, void*, void*, uintptr_t);  
-  probe CallNonvirtualVoidMethodV__return();
-  probe CallObjectMethodA__entry(void*, void*, uintptr_t);
-  probe CallObjectMethodA__return(void*);
-  probe CallObjectMethod__entry(void*, void*, uintptr_t);
-  probe CallObjectMethod__return(void*);
-  probe CallObjectMethodV__entry(void*, void*, uintptr_t);
-  probe CallObjectMethodV__return(void*);
-  probe CallShortMethodA__entry(void*, void*, uintptr_t);
-  probe CallShortMethodA__return(uint16_t);
-  probe CallShortMethod__entry(void*, void*, uintptr_t);
-  probe CallShortMethod__return(uint16_t);
-  probe CallShortMethodV__entry(void*, void*, uintptr_t);
-  probe CallShortMethodV__return(uint16_t);
-  probe CallStaticBooleanMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticBooleanMethodA__return(uintptr_t);
-  probe CallStaticBooleanMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticBooleanMethod__return(uintptr_t);
-  probe CallStaticBooleanMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticBooleanMethodV__return(uintptr_t);
-  probe CallStaticByteMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticByteMethodA__return(char);
-  probe CallStaticByteMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticByteMethod__return(char);
-  probe CallStaticByteMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticByteMethodV__return(char);
-  probe CallStaticCharMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticCharMethodA__return(uint16_t);
-  probe CallStaticCharMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticCharMethod__return(uint16_t);
-  probe CallStaticCharMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticCharMethodV__return(uint16_t);
-  probe CallStaticDoubleMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticDoubleMethodA__return();
-  probe CallStaticDoubleMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticDoubleMethod__return();
-  probe CallStaticDoubleMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticDoubleMethodV__return();
-  probe CallStaticFloatMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticFloatMethodA__return();
-  probe CallStaticFloatMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticFloatMethod__return();
-  probe CallStaticFloatMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticFloatMethodV__return();
-  probe CallStaticIntMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticIntMethodA__return(uint32_t);
-  probe CallStaticIntMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticIntMethod__return(uint32_t);
-  probe CallStaticIntMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticIntMethodV__return(uint32_t);
-  probe CallStaticLongMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticLongMethodA__return(uintptr_t);
-  probe CallStaticLongMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticLongMethod__return(uintptr_t);
-  probe CallStaticLongMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticLongMethodV__return(uintptr_t);
-  probe CallStaticObjectMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticObjectMethodA__return(void*);
-  probe CallStaticObjectMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticObjectMethod__return(void*);
-  probe CallStaticObjectMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticObjectMethodV__return(void*);
-  probe CallStaticShortMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticShortMethodA__return(uint16_t);
-  probe CallStaticShortMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticShortMethod__return(uint16_t);
-  probe CallStaticShortMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticShortMethodV__return(uint16_t);
-  probe CallStaticVoidMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticVoidMethodA__return();
-  probe CallStaticVoidMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticVoidMethod__return(); 
-  probe CallStaticVoidMethodV__entry(void*, void*, uintptr_t);  
-  probe CallStaticVoidMethodV__return();
-  probe CallVoidMethodA__entry(void*, void*, uintptr_t);  
-  probe CallVoidMethodA__return();
-  probe CallVoidMethod__entry(void*, void*, uintptr_t);  
-  probe CallVoidMethod__return(); 
-  probe CallVoidMethodV__entry(void*, void*, uintptr_t);  
-  probe CallVoidMethodV__return();
-  probe CreateJavaVM__entry(void**, void**, void*);
-  probe CreateJavaVM__return(uint32_t);
-  probe DefineClass__entry(void*, const char*, void*, char, uintptr_t);
-  probe DefineClass__return(void*);
-  probe DeleteGlobalRef__entry(void*, void*);
-  probe DeleteGlobalRef__return();
-  probe DeleteLocalRef__entry(void*, void*);
-  probe DeleteLocalRef__return();
-  probe DeleteWeakGlobalRef__entry(void*, void*);
-  probe DeleteWeakGlobalRef__return();
-  probe DestroyJavaVM__entry(void*);
-  probe DestroyJavaVM__return(uint32_t);
-  probe DetachCurrentThread__entry(void*);
-  probe DetachCurrentThread__return(uint32_t);
-  probe EnsureLocalCapacity__entry(void*, uint32_t);
-  probe EnsureLocalCapacity__return(uint32_t);
-  probe ExceptionCheck__entry(void*);
-  probe ExceptionCheck__return(uintptr_t);
-  probe ExceptionClear__entry(void*);
-  probe ExceptionClear__return();
-  probe ExceptionDescribe__entry(void*);  
-  probe ExceptionDescribe__return();
-  probe ExceptionOccurred__entry(void*);
-  probe ExceptionOccurred__return(void*);
-  probe FatalError__entry(void* env, const char*);
-  probe FindClass__entry(void*, const char*);
-  probe FindClass__return(void*);
-  probe FromReflectedField__entry(void*, void*);
-  probe FromReflectedField__return(uintptr_t);
-  probe FromReflectedMethod__entry(void*, void*);
-  probe FromReflectedMethod__return(uintptr_t);
-  probe GetArrayLength__entry(void*, void*);
-  probe GetArrayLength__return(uintptr_t);
-  probe GetBooleanArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetBooleanArrayElements__return(uintptr_t*);
-  probe GetBooleanArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uintptr_t*);
-  probe GetBooleanArrayRegion__return();
-  probe GetBooleanField__entry(void*, void*, uintptr_t);
-  probe GetBooleanField__return(uintptr_t);
-  probe GetByteArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetByteArrayElements__return(char*);
-  probe GetByteArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, char*);
-  probe GetByteArrayRegion__return();
-  probe GetByteField__entry(void*, void*, uintptr_t);
-  probe GetByteField__return(char);
-  probe GetCharArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetCharArrayElements__return(uint16_t*);
-  probe GetCharArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
-  probe GetCharArrayRegion__return();
-  probe GetCharField__entry(void*, void*, uintptr_t);
-  probe GetCharField__return(uint16_t);
-  probe GetCreatedJavaVMs__entry(void**, uintptr_t, uintptr_t*);
-  probe GetCreatedJavaVMs__return(uintptr_t);
-  probe GetDefaultJavaVMInitArgs__entry(void*);
-  probe GetDefaultJavaVMInitArgs__return(uint32_t);
-  probe GetDirectBufferAddress__entry(void*, void*);
-  probe GetDirectBufferAddress__return(void*);
-  probe GetDirectBufferCapacity__entry(void*, void*);
-  probe GetDirectBufferCapacity__return(uintptr_t);
-  probe GetDoubleArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetDoubleArrayElements__return(double*);
-  probe GetDoubleArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, double*);
-  probe GetDoubleArrayRegion__return();
-  probe GetDoubleField__entry(void*, void*, uintptr_t);
-  probe GetDoubleField__return();
-  probe GetEnv__entry(void*, void*, uint32_t);
-  probe GetEnv__return(uint32_t);
-  probe GetFieldID__entry(void*, void*, const char*, const char*);
-  probe GetFieldID__return(uintptr_t);
-  probe GetFloatArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetFloatArrayElements__return(float*);
-  probe GetFloatArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, float*);
-  probe GetFloatArrayRegion__return();
-  probe GetFloatField__entry(void*, void*, uintptr_t);
-  probe GetFloatField__return();
-  probe GetIntArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetIntArrayElements__return(uint32_t*);
-  probe GetIntArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint32_t*);
-  probe GetIntArrayRegion__return();
-  probe GetIntField__entry(void*, void*, uintptr_t);
-  probe GetIntField__return(uint32_t);
-  probe GetJavaVM__entry(void*, void**);
-  probe GetJavaVM__return(uint32_t);
-  probe GetLongArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetLongArrayElements__return(uintptr_t*);
-  probe GetLongArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uintptr_t*);
-  probe GetLongArrayRegion__return();
-  probe GetLongField__entry(void*, void*, uintptr_t);
-  probe GetLongField__return(uintptr_t);
-  probe GetMethodID__entry(void*, void*, const char*, const char*);
-  probe GetMethodID__return(uintptr_t);
-  probe GetObjectArrayElement__entry(void*, void*, uintptr_t);
-  probe GetObjectArrayElement__return(void*);
-  probe GetObjectClass__entry(void*, void*);
-  probe GetObjectClass__return(void*);
-  probe GetObjectField__entry(void*, void*, uintptr_t);
-  probe GetObjectField__return(void*);
-  probe GetObjectRefType__entry(void*, void*);
-  probe GetObjectRefType__return(void*);
-  probe GetPrimitiveArrayCritical__entry(void*, void*, uintptr_t*);
-  probe GetPrimitiveArrayCritical__return(void*);
-  probe GetShortArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetShortArrayElements__return(uint16_t*);
-  probe GetShortArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
-  probe GetShortArrayRegion__return();
-  probe GetShortField__entry(void*, void*, uintptr_t);
-  probe GetShortField__return(uint16_t);
-  probe GetStaticBooleanField__entry(void*, void*, uintptr_t);
-  probe GetStaticBooleanField__return(uintptr_t);
-  probe GetStaticByteField__entry(void*, void*, uintptr_t);
-  probe GetStaticByteField__return(char);
-  probe GetStaticCharField__entry(void*, void*, uintptr_t);
-  probe GetStaticCharField__return(uint16_t);
-  probe GetStaticDoubleField__entry(void*, void*, uintptr_t);
-  probe GetStaticDoubleField__return();
-  probe GetStaticFieldID__entry(void*, void*, const char*, const char*);
-  probe GetStaticFieldID__return(uintptr_t);
-  probe GetStaticFloatField__entry(void*, void*, uintptr_t);
-  probe GetStaticFloatField__return();
-  probe GetStaticIntField__entry(void*, void*, uintptr_t);
-  probe GetStaticIntField__return(uint32_t);
-  probe GetStaticLongField__entry(void*, void*, uintptr_t);
-  probe GetStaticLongField__return(uintptr_t);
-  probe GetStaticMethodID__entry(void*, void*, const char*, const char*);
-  probe GetStaticMethodID__return(uintptr_t);
-  probe GetStaticObjectField__entry(void*, void*, uintptr_t);
-  probe GetStaticObjectField__return(void*);
-  probe GetStaticShortField__entry(void*, void*, uintptr_t);
-  probe GetStaticShortField__return(uint16_t);
-  probe GetStringChars__entry(void*, void*, uintptr_t*);
-  probe GetStringChars__return(const uint16_t*);
-  probe GetStringCritical__entry(void*, void*, uintptr_t*);
-  probe GetStringCritical__return(const uint16_t*);
-  probe GetStringLength__entry(void*, void*);
-  probe GetStringLength__return(uintptr_t);
-  probe GetStringRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
-  probe GetStringRegion__return();
-  probe GetStringUTFChars__entry(void*, void*, uintptr_t*);
-  probe GetStringUTFChars__return(const char*);
-  probe GetStringUTFLength__entry(void*, void*);
-  probe GetStringUTFLength__return(uintptr_t);
-  probe GetStringUTFRegion__entry(void*, void*, uintptr_t, uintptr_t, char*);
-  probe GetStringUTFRegion__return();
-  probe GetSuperclass__entry(void*, void*);
-  probe GetSuperclass__return(void*);
-  probe GetVersion__entry(void*);
-  probe GetVersion__return(uint32_t);
-  probe IsAssignableFrom__entry(void*, void*, void*);
-  probe IsAssignableFrom__return(uintptr_t);
-  probe IsInstanceOf__entry(void*, void*, void*);
-  probe IsInstanceOf__return(uintptr_t);
-  probe IsSameObject__entry(void*, void*, void*);
-  probe IsSameObject__return(uintptr_t);
-  probe MonitorEnter__entry(void*, void*);
-  probe MonitorEnter__return(uint32_t);
-  probe MonitorExit__entry(void*, void*);
-  probe MonitorExit__return(uint32_t);
-  probe NewBooleanArray__entry(void*, uintptr_t);
-  probe NewBooleanArray__return(void*);
-  probe NewByteArray__entry(void*, uintptr_t);
-  probe NewByteArray__return(void*);
-  probe NewCharArray__entry(void*, uintptr_t);
-  probe NewCharArray__return(void*);
-  probe NewDirectByteBuffer__entry(void*, void*, uintptr_t);
-  probe NewDirectByteBuffer__return(void*);
-  probe NewDoubleArray__entry(void*, uintptr_t);
-  probe NewDoubleArray__return(void*);
-  probe NewFloatArray__entry(void*, uintptr_t);
-  probe NewFloatArray__return(void*);
-  probe NewGlobalRef__entry(void*, void*);
-  probe NewGlobalRef__return(void*);
-  probe NewIntArray__entry(void*, uintptr_t);
-  probe NewIntArray__return(void*);
-  probe NewLocalRef__entry(void*, void*);
-  probe NewLocalRef__return(void*);
-  probe NewLongArray__entry(void*, uintptr_t);
-  probe NewLongArray__return(void*);
-  probe NewObjectA__entry(void*, void*, uintptr_t);  
-  probe NewObjectA__return(void*);
-  probe NewObjectArray__entry(void*, uintptr_t, void*, void*);
-  probe NewObjectArray__return(void*);
-  probe NewObject__entry(void*, void*, uintptr_t); 
-  probe NewObject__return(void*);
-  probe NewObjectV__entry(void*, void*, uintptr_t);  
-  probe NewObjectV__return(void*);
-  probe NewShortArray__entry(void*, uintptr_t);
-  probe NewShortArray__return(void*);
-  probe NewString__entry(void*, const uint16_t*, uintptr_t);
-  probe NewString__return(void*);
-  probe NewStringUTF__entry(void*, const char*);
-  probe NewStringUTF__return(void*);
-  probe NewWeakGlobalRef__entry(void*, void*);
-  probe NewWeakGlobalRef__return(void*);
-  probe PopLocalFrame__entry(void*, void*);
-  probe PopLocalFrame__return(void*);
-  probe PushLocalFrame__entry(void*, uint32_t);
-  probe PushLocalFrame__return(uint32_t);
-  probe RegisterNatives__entry(void*, void*, const void*, uint32_t);  
-  probe RegisterNatives__return(uint32_t);
-  probe ReleaseBooleanArrayElements__entry(void*, void*, uintptr_t*, uint32_t);
-  probe ReleaseBooleanArrayElements__return();
-  probe ReleaseByteArrayElements__entry(void*, void*, char*, uint32_t);
-  probe ReleaseByteArrayElements__return();
-  probe ReleaseCharArrayElements__entry(void*, void*, uint16_t*, uint32_t);
-  probe ReleaseCharArrayElements__return();
-  probe ReleaseDoubleArrayElements__entry(void*, void*, double*, uint32_t);
-  probe ReleaseDoubleArrayElements__return();
-  probe ReleaseFloatArrayElements__entry(void*, void*, float*, uint32_t);
-  probe ReleaseFloatArrayElements__return();
-  probe ReleaseIntArrayElements__entry(void*, void*, uint32_t*, uint32_t);
-  probe ReleaseIntArrayElements__return();
-  probe ReleaseLongArrayElements__entry(void*, void*, uintptr_t*, uint32_t);
-  probe ReleaseLongArrayElements__return();
-  probe ReleasePrimitiveArrayCritical__entry(void*, void*, void*, uint32_t);
-  probe ReleasePrimitiveArrayCritical__return();
-  probe ReleaseShortArrayElements__entry(void*, void*, uint16_t*, uint32_t);
-  probe ReleaseShortArrayElements__return();
-  probe ReleaseStringChars__entry(void*, void*, const uint16_t*);
-  probe ReleaseStringChars__return();
-  probe ReleaseStringCritical__entry(void*, void*, const uint16_t*);
-  probe ReleaseStringCritical__return();
-  probe ReleaseStringUTFChars__entry(void*, void*, const char*);
-  probe ReleaseStringUTFChars__return();
-  probe SetBooleanArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uintptr_t*);
-  probe SetBooleanArrayRegion__return();
-  probe SetBooleanField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetBooleanField__return();
-  probe SetByteArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const char*);
-  probe SetByteArrayRegion__return();
-  probe SetByteField__entry(void*, void*, uintptr_t, char);
-  probe SetByteField__return();
-  probe SetCharArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint16_t*);
-  probe SetCharArrayRegion__return();
-  probe SetCharField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetCharField__return();
-  probe SetDoubleArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const double*);
-  probe SetDoubleArrayRegion__return();
-  probe SetDoubleField__entry(void*, void*, uintptr_t);
-  probe SetDoubleField__return();
-  probe SetFloatArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const float*);
-  probe SetFloatArrayRegion__return();
-  probe SetFloatField__entry(void*, void*, uintptr_t);
-  probe SetFloatField__return();
-  probe SetIntArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint32_t*);
-  probe SetIntArrayRegion__return();
-  probe SetIntField__entry(void*, void*, uintptr_t, uint32_t);
-  probe SetIntField__return();
-  probe SetLongArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uintptr_t*);
-  probe SetLongArrayRegion__return();
-  probe SetLongField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetLongField__return();
-  probe SetObjectArrayElement__entry(void*, void*, uintptr_t, void*);
-  probe SetObjectArrayElement__return();
-  probe SetObjectField__entry(void*, void*, uintptr_t, void*);
-  probe SetObjectField__return();
-  probe SetShortArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint16_t*);
-  probe SetShortArrayRegion__return();
-  probe SetShortField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetShortField__return();
-  probe SetStaticBooleanField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetStaticBooleanField__return();
-  probe SetStaticByteField__entry(void*, void*, uintptr_t, char);
-  probe SetStaticByteField__return();
-  probe SetStaticCharField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetStaticCharField__return();
-  probe SetStaticDoubleField__entry(void*, void*, uintptr_t);
-  probe SetStaticDoubleField__return();
-  probe SetStaticFloatField__entry(void*, void*, uintptr_t);
-  probe SetStaticFloatField__return();
-  probe SetStaticIntField__entry(void*, void*, uintptr_t, uint32_t);
-  probe SetStaticIntField__return();
-  probe SetStaticLongField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetStaticLongField__return();
-  probe SetStaticObjectField__entry(void*, void*, uintptr_t, void*);
-  probe SetStaticObjectField__return();
-  probe SetStaticShortField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetStaticShortField__return();
-  probe Throw__entry(void*, void*);
-  probe Throw__return(intptr_t);
-  probe ThrowNew__entry(void*, void*, const char*);  
-  probe ThrowNew__return(intptr_t);  
-  probe ToReflectedField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe ToReflectedField__return(void*);
-  probe ToReflectedMethod__entry(void*, void*, uintptr_t, uintptr_t);
-  probe ToReflectedMethod__return(void*);
-  probe UnregisterNatives__entry(void*, void*);  
-  probe UnregisterNatives__return(uint32_t);
-};
-
-#pragma D attributes Standard/Standard/Common provider hotspot_jni provider
-#pragma D attributes Private/Private/Unknown provider hotspot_jni module
-#pragma D attributes Private/Private/Unknown provider hotspot_jni function
-#pragma D attributes Standard/Standard/Common provider hotspot_jni name
-#pragma D attributes Evolving/Evolving/Common provider hotspot_jni args
-
--- a/src/os/solaris/dtrace/hs_private.d	Thu Jan 30 14:30:01 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *  
- */
-
-provider hs_private {
-  probe safepoint__begin();
-  probe safepoint__end();
-  probe cms__initmark__begin();
-  probe cms__initmark__end();
-  probe cms__remark__begin();
-  probe cms__remark__end();
-};
-
-#pragma D attributes Private/Private/Common provider hs_private provider
-#pragma D attributes Private/Private/Unknown provider hs_private module
-#pragma D attributes Private/Private/Unknown provider hs_private function
-#pragma D attributes Private/Private/Common provider hs_private name
-#pragma D attributes Private/Private/Common provider hs_private args
-
--- a/src/os/solaris/vm/os_solaris.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/os/solaris/vm/os_solaris.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2146,6 +2146,10 @@
   return dlsym(handle, name);
 }
 
+void* os::get_default_process_handle() {
+  return (void*)::dlopen(NULL, RTLD_LAZY);
+}
+
 int os::stat(const char *path, struct stat *sbuf) {
   char pathbuf[MAX_PATH];
   if (strlen(path) > MAX_PATH - 1) {
@@ -2228,8 +2232,8 @@
         st->cr();
         status = true;
       }
-      ::close(fd);
     }
+    ::close(fd);
   }
   return status;
 }
@@ -2393,13 +2397,14 @@
     return;
   }
 
-  if (Arguments::created_by_gamma_launcher()) {
-    // Support for the gamma launcher.  Typical value for buf is
-    // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".  If "/jre/lib/" appears at
-    // the right place in the string, then assume we are installed in a JDK and
-    // we're done.  Otherwise, check for a JAVA_HOME environment variable and fix
-    // up the path so it looks like libjvm.so is installed there (append a
-    // fake suffix hotspot/libjvm.so).
+  if (Arguments::sun_java_launcher_is_altjvm()) {
+    // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
+    // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
+    // If "/jre/lib/" appears at the right place in the string, then
+    // assume we are installed in a JDK and we're done.  Otherwise, check
+    // for a JAVA_HOME environment variable and fix up the path so it
+    // looks like libjvm.so is installed there (append a fake suffix
+    // hotspot/libjvm.so).
     const char *p = buf + strlen(buf) - 1;
     for (int count = 0; p > buf && count < 5; ++count) {
       for (--p; p > buf && *p != '/'; --p)
@@ -2963,7 +2968,7 @@
 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
-  uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT];
+  uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
   uint_t validity[MAX_MEMINFO_CNT];
 
   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
@@ -3002,7 +3007,7 @@
       }
     }
 
-    if (i != addrs_count) {
+    if (i < addrs_count) {
       if ((validity[i] & 2) != 0) {
         page_found->lgrp_id = outdata[types * i];
       } else {
@@ -3492,9 +3497,14 @@
   return os_sleep(millis, interruptible);
 }
 
-int os::naked_sleep() {
-  // %% make the sleep time an integer flag. for now use 1 millisec.
-  return os_sleep(1, false);
+void os::naked_short_sleep(jlong ms) {
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+
+  // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
+  // Solaris requires -lrt for this.
+  usleep((ms * 1000));
+
+  return;
 }
 
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
--- a/src/os/solaris/vm/perfMemory_solaris.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/os/solaris/vm/perfMemory_solaris.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -431,10 +431,12 @@
 
       RESTARTABLE(::read(fd, addr, remaining), result);
       if (result == OS_ERR) {
+        ::close(fd);
         THROW_MSG_0(vmSymbols::java_io_IOException(), "Read error");
+      } else {
+        remaining-=result;
+        addr+=result;
       }
-      remaining-=result;
-      addr+=result;
     }
 
     ::close(fd);
@@ -906,8 +908,16 @@
   FREE_C_HEAP_ARRAY(char, filename, mtInternal);
 
   // open the shared memory file for the give vmid
-  fd = open_sharedmem_file(rfilename, file_flags, CHECK);
-  assert(fd != OS_ERR, "unexpected value");
+  fd = open_sharedmem_file(rfilename, file_flags, THREAD);
+
+  if (fd == OS_ERR) {
+    return;
+  }
+
+  if (HAS_PENDING_EXCEPTION) {
+    ::close(fd);
+    return;
+  }
 
   if (*sizep == 0) {
     size = sharedmem_filesize(fd, CHECK);
--- a/src/os/windows/vm/os_windows.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/os/windows/vm/os_windows.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1810,32 +1810,30 @@
   }
 
   buf[0] = '\0';
-  if (Arguments::created_by_gamma_launcher()) {
-     // Support for the gamma launcher. Check for an
-     // JAVA_HOME environment variable
-     // and fix up the path so it looks like
-     // libjvm.so is installed there (append a fake suffix
-     // hotspot/libjvm.so).
-     char* java_home_var = ::getenv("JAVA_HOME");
-     if (java_home_var != NULL && java_home_var[0] != 0) {
-
-        strncpy(buf, java_home_var, buflen);
-
-        // determine if this is a legacy image or modules image
-        // modules image doesn't have "jre" subdirectory
-        size_t len = strlen(buf);
-        char* jrebin_p = buf + len;
-        jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
-        if (0 != _access(buf, 0)) {
-          jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
-        }
-        len = strlen(buf);
-        jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
-     }
-  }
-
-  if(buf[0] == '\0') {
-  GetModuleFileName(vm_lib_handle, buf, buflen);
+  if (Arguments::sun_java_launcher_is_altjvm()) {
+    // Support for the java launcher's '-XXaltjvm=<path>' option. Check
+    // for a JAVA_HOME environment variable and fix up the path so it
+    // looks like jvm.dll is installed there (append a fake suffix
+    // hotspot/jvm.dll).
+    char* java_home_var = ::getenv("JAVA_HOME");
+    if (java_home_var != NULL && java_home_var[0] != 0) {
+      strncpy(buf, java_home_var, buflen);
+
+      // determine if this is a legacy image or modules image
+      // modules image doesn't have "jre" subdirectory
+      size_t len = strlen(buf);
+      char* jrebin_p = buf + len;
+      jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
+      if (0 != _access(buf, 0)) {
+        jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
+      }
+      len = strlen(buf);
+      jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
+    }
+  }
+
+  if (buf[0] == '\0') {
+    GetModuleFileName(vm_lib_handle, buf, buflen);
   }
   strcpy(saved_jvm_path, buf);
 }
@@ -3486,6 +3484,16 @@
   return result;
 }
 
+//
+// Short sleep, direct OS call.
+//
+// ms = 0, means allow others (if any) to run.
+//
+void os::naked_short_sleep(jlong ms) {
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+  Sleep(ms);
+}
+
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
 void os::infinite_sleep() {
   while (true) {    // sleep forever ...
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -49,6 +49,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
+#include "services/memTracker.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 
@@ -906,6 +907,9 @@
   if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) {
     return; // No matter, we tried, best effort.
   }
+
+  MemTracker::record_virtual_memory_type((address)codebuf, mtInternal);
+
   if (PrintMiscellaneous && (Verbose || WizardMode)) {
      tty->print_cr("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
   }
--- a/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -75,13 +75,19 @@
     do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m);
 
     // Extract valid instruction set extensions.
-    uint_t av;
-    uint_t avn = os::Solaris::getisax(&av, 1);
-    assert(avn == 1, "should only return one av");
+    uint_t avs[2];
+    uint_t avn = os::Solaris::getisax(avs, 2);
+    assert(avn <= 2, "should return two or less av's");
+    uint_t av = avs[0];
 
 #ifndef PRODUCT
-    if (PrintMiscellaneous && Verbose)
-      tty->print_cr("getisax(2) returned: " PTR32_FORMAT, av);
+    if (PrintMiscellaneous && Verbose) {
+      tty->print("getisax(2) returned: " PTR32_FORMAT, av);
+      if (avn > 1) {
+        tty->print(", " PTR32_FORMAT, avs[1]);
+      }
+      tty->cr();
+    }
 #endif
 
     if (av & AV_SPARC_MUL32)  features |= hardware_mul32_m;
@@ -91,6 +97,13 @@
     if (av & AV_SPARC_POPC)   features |= hardware_popc_m;
     if (av & AV_SPARC_VIS)    features |= vis1_instructions_m;
     if (av & AV_SPARC_VIS2)   features |= vis2_instructions_m;
+    if (avn > 1) {
+      uint_t av2 = avs[1];
+#ifndef AV2_SPARC_SPARC5
+#define AV2_SPARC_SPARC5 0x00000008 /* The 29 new fp and sub instructions */
+#endif
+      if (av2 & AV2_SPARC_SPARC5)       features |= sparc5_instructions_m;
+    }
 
     // Next values are not defined before Solaris 10
     // but Solaris 8 is used for jdk6 update builds.
--- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -475,9 +475,11 @@
         // here if the underlying file has been truncated.
         // Do not crash the VM in such a case.
         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
-        nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
-        if (nm != NULL && nm->has_unsafe_access()) {
-          stub = StubRoutines::handler_for_unsafe_access();
+        if (cb != NULL) {
+          nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
+          if (nm != NULL && nm->has_unsafe_access()) {
+            stub = StubRoutines::handler_for_unsafe_access();
+          }
         }
       }
       else
@@ -724,6 +726,7 @@
   err.report_and_die();
 
   ShouldNotReachHere();
+  return false;
 }
 
 void os::print_context(outputStream *st, void *context) {
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Wed Feb 19 12:08:49 2014 -0800
@@ -161,7 +161,18 @@
         for (BuildConfig cfg : allConfigs) {
             startTag(cfg, "PropertyGroup");
             tagData("LocalDebuggerCommand", cfg.get("JdkTargetRoot") + "\\bin\\java.exe");
-            tagData("LocalDebuggerCommandArguments", "-XXaltjvm=$(TargetDir) -Dsun.java.launcher=gamma");
+            // The JVM loads some libraries using a path relative to
+            // itself because it expects to be in a JRE or a JDK. The java
+            // launcher's '-XXaltjvm=' option allows the JVM to be outside
+            // the JRE or JDK so '-Dsun.java.launcher.is_altjvm=true'
+            // forces a fake JAVA_HOME relative path to be used to
+            // find the other libraries. The '-XX:+PauseAtExit' option
+            // causes the VM to wait for key press before exiting; this
+            // allows any stdout or stderr messages to be seen before
+            // the cmdtool exits.
+            tagData("LocalDebuggerCommandArguments", "-XXaltjvm=$(TargetDir) "
+                    + "-Dsun.java.launcher.is_altjvm=true "
+                    + "-XX:+UnlockDiagnosticVMOptions -XX:+PauseAtExit");
             tagData("LocalDebuggerEnvironment", "JAVA_HOME=" + cfg.get("JdkTargetRoot"));
             endTag();
         }
--- a/src/share/vm/ci/ciField.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/ci/ciField.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -201,16 +201,10 @@
       return;
     }
 
-    // This field just may be constant.  The only cases where it will
-    // not be constant are:
-    //
-    // 1. The field holds a non-perm-space oop.  The field is, strictly
-    //    speaking, constant but we cannot embed non-perm-space oops into
-    //    generated code.  For the time being we need to consider the
-    //    field to be not constant.
-    // 2. The field is a *special* static&final field whose value
-    //    may change.  The three examples are java.lang.System.in,
-    //    java.lang.System.out, and java.lang.System.err.
+    // This field just may be constant.  The only case where it will
+    // not be constant is when the field is a *special* static&final field
+    // whose value may change.  The three examples are java.lang.System.in,
+    // java.lang.System.out, and java.lang.System.err.
 
     KlassHandle k = _holder->get_Klass();
     assert( SystemDictionary::System_klass() != NULL, "Check once per vm");
--- a/src/share/vm/ci/ciField.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/ci/ciField.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -130,9 +130,7 @@
   //   1. The field is both static and final
   //   2. The canonical holder of the field has undergone
   //      static initialization.
-  //   3. If the field is an object or array, then the oop
-  //      in question is allocated in perm space.
-  //   4. The field is not one of the special static/final
+  //   3. The field is not one of the special static/final
   //      non-constant fields.  These are java.lang.System.in
   //      and java.lang.System.out.  Abomination.
   //
--- a/src/share/vm/classfile/altHashing.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/classfile/altHashing.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,18 +39,18 @@
 }
 
 // Seed value used for each alternative hash calculated.
-jint AltHashing::compute_seed() {
+juint AltHashing::compute_seed() {
   jlong nanos = os::javaTimeNanos();
   jlong now = os::javaTimeMillis();
-  jint SEED_MATERIAL[8] = {
-            (jint) object_hash(SystemDictionary::String_klass()),
-            (jint) object_hash(SystemDictionary::System_klass()),
-            (jint) os::random(),  // current thread isn't a java thread
-            (jint) (((julong)nanos) >> 32),
-            (jint) nanos,
-            (jint) (((julong)now) >> 32),
-            (jint) now,
-            (jint) (os::javaTimeNanos() >> 2)
+  int SEED_MATERIAL[8] = {
+            (int) object_hash(SystemDictionary::String_klass()),
+            (int) object_hash(SystemDictionary::System_klass()),
+            (int) os::random(),  // current thread isn't a java thread
+            (int) (((julong)nanos) >> 32),
+            (int) nanos,
+            (int) (((julong)now) >> 32),
+            (int) now,
+            (int) (os::javaTimeNanos() >> 2)
   };
 
   return murmur3_32(SEED_MATERIAL, 8);
@@ -58,14 +58,14 @@
 
 
 // Murmur3 hashing for Symbol
-jint AltHashing::murmur3_32(jint seed, const jbyte* data, int len) {
-  jint h1 = seed;
+juint AltHashing::murmur3_32(juint seed, const jbyte* data, int len) {
+  juint h1 = seed;
   int count = len;
   int offset = 0;
 
   // body
   while (count >= 4) {
-    jint k1 = (data[offset] & 0x0FF)
+    juint k1 = (data[offset] & 0x0FF)
         | (data[offset + 1] & 0x0FF) << 8
         | (data[offset + 2] & 0x0FF) << 16
         | data[offset + 3] << 24;
@@ -85,7 +85,7 @@
   // tail
 
   if (count > 0) {
-    jint k1 = 0;
+    juint k1 = 0;
 
     switch (count) {
       case 3:
@@ -109,18 +109,18 @@
   h1 ^= len;
 
   // finalization mix force all bits of a hash block to avalanche
-  h1 ^= ((unsigned int)h1) >> 16;
+  h1 ^= h1 >> 16;
   h1 *= 0x85ebca6b;
-  h1 ^= ((unsigned int)h1) >> 13;
+  h1 ^= h1 >> 13;
   h1 *= 0xc2b2ae35;
-  h1 ^= ((unsigned int)h1) >> 16;
+  h1 ^= h1 >> 16;
 
   return h1;
 }
 
 // Murmur3 hashing for Strings
-jint AltHashing::murmur3_32(jint seed, const jchar* data, int len) {
-  jint h1 = seed;
+juint AltHashing::murmur3_32(juint seed, const jchar* data, int len) {
+  juint h1 = seed;
 
   int off = 0;
   int count = len;
@@ -129,7 +129,7 @@
   while (count >= 2) {
     jchar d1 = data[off++] & 0xFFFF;
     jchar d2 = data[off++];
-    jint k1 = (d1 | d2 << 16);
+    juint k1 = (d1 | d2 << 16);
 
     count -= 2;
 
@@ -145,7 +145,7 @@
   // tail
 
   if (count > 0) {
-    int k1 = data[off];
+    juint k1 = (juint)data[off];
 
     k1 *= 0xcc9e2d51;
     k1 = Integer_rotateLeft(k1, 15);
@@ -157,25 +157,25 @@
   h1 ^= len * 2; // (Character.SIZE / Byte.SIZE);
 
   // finalization mix force all bits of a hash block to avalanche
-  h1 ^= ((unsigned int)h1) >> 16;
+  h1 ^= h1 >> 16;
   h1 *= 0x85ebca6b;
-  h1 ^= ((unsigned int)h1) >> 13;
+  h1 ^= h1 >> 13;
   h1 *= 0xc2b2ae35;
-  h1 ^= ((unsigned int)h1) >> 16;
+  h1 ^= h1 >> 16;
 
   return h1;
 }
 
 // Hash used for the seed.
-jint AltHashing::murmur3_32(jint seed, const int* data, int len) {
-  jint h1 = seed;
+juint AltHashing::murmur3_32(juint seed, const int* data, int len) {
+  juint h1 = seed;
 
   int off = 0;
   int end = len;
 
   // body
   while (off < end) {
-    jint k1 = data[off++];
+    juint k1 = (juint)data[off++];
 
     k1 *= 0xcc9e2d51;
     k1 = Integer_rotateLeft(k1, 15);
@@ -193,26 +193,26 @@
   h1 ^= len * 4; // (Integer.SIZE / Byte.SIZE);
 
   // finalization mix force all bits of a hash block to avalanche
-  h1 ^= ((juint)h1) >> 16;
+  h1 ^= h1 >> 16;
   h1 *= 0x85ebca6b;
-  h1 ^= ((juint)h1) >> 13;
+  h1 ^= h1 >> 13;
   h1 *= 0xc2b2ae35;
-  h1 ^= ((juint)h1) >> 16;
+  h1 ^= h1 >> 16;
 
   return h1;
 }
 
-jint AltHashing::murmur3_32(const int* data, int len) {
+juint AltHashing::murmur3_32(const int* data, int len) {
   return murmur3_32(0, data, len);
 }
 
 #ifndef PRODUCT
 // Overloaded versions for internal test.
-jint AltHashing::murmur3_32(const jbyte* data, int len) {
+juint AltHashing::murmur3_32(const jbyte* data, int len) {
   return murmur3_32(0, data, len);
 }
 
-jint AltHashing::murmur3_32(const jchar* data, int len) {
+juint AltHashing::murmur3_32(const jchar* data, int len) {
   return murmur3_32(0, data, len);
 }
 
@@ -251,11 +251,11 @@
 
   // Hash subranges {}, {0}, {0,1}, {0,1,2}, ..., {0,...,255}
   for (int i = 0; i < 256; i++) {
-    jint hash = murmur3_32(256 - i, vector, i);
+    juint hash = murmur3_32(256 - i, vector, i);
     hashes[i * 4] = (jbyte) hash;
-    hashes[i * 4 + 1] = (jbyte) (((juint)hash) >> 8);
-    hashes[i * 4 + 2] = (jbyte) (((juint)hash) >> 16);
-    hashes[i * 4 + 3] = (jbyte) (((juint)hash) >> 24);
+    hashes[i * 4 + 1] = (jbyte)(hash >> 8);
+    hashes[i * 4 + 2] = (jbyte)(hash >> 16);
+    hashes[i * 4 + 3] = (jbyte)(hash >> 24);
   }
 
   // hash to get const result.
@@ -269,7 +269,7 @@
 }
 
 void AltHashing::testEquivalentHashes() {
-  jint jbytes, jchars, ints;
+  juint jbytes, jchars, ints;
 
   // printf("testEquivalentHashes\n");
 
--- a/src/share/vm/classfile/altHashing.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/classfile/altHashing.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,24 +39,24 @@
 class AltHashing : AllStatic {
 
   // utility function copied from java/lang/Integer
-  static jint Integer_rotateLeft(jint i, int distance) {
-    return (i << distance) | (((juint)i) >> (32-distance));
+  static juint Integer_rotateLeft(juint i, int distance) {
+    return (i << distance) | (i >> (32-distance));
   }
-  static jint murmur3_32(const int* data, int len);
-  static jint murmur3_32(jint seed, const int* data, int len);
+  static juint murmur3_32(const int* data, int len);
+  static juint murmur3_32(juint seed, const int* data, int len);
 
 #ifndef PRODUCT
   // Hashing functions used for internal testing
-  static jint murmur3_32(const jbyte* data, int len);
-  static jint murmur3_32(const jchar* data, int len);
+  static juint murmur3_32(const jbyte* data, int len);
+  static juint murmur3_32(const jchar* data, int len);
   static void testMurmur3_32_ByteArray();
   static void testEquivalentHashes();
 #endif // PRODUCT
 
  public:
-  static jint compute_seed();
-  static jint murmur3_32(jint seed, const jbyte* data, int len);
-  static jint murmur3_32(jint seed, const jchar* data, int len);
+  static juint compute_seed();
+  static juint murmur3_32(juint seed, const jbyte* data, int len);
+  static juint murmur3_32(juint seed, const jchar* data, int len);
   NOT_PRODUCT(static void test_alt_hash();)
 };
 #endif // SHARE_VM_CLASSFILE_ALTHASHING_HPP
--- a/src/share/vm/classfile/classFileParser.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/classfile/classFileParser.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3746,18 +3746,24 @@
       Exceptions::fthrow(
         THREAD_AND_LOCATION,
         vmSymbols::java_lang_UnsupportedClassVersionError(),
-        "Unsupported major.minor version %u.%u",
+        "Unsupported class file version %u.%u, "
+        "this version of the Java Runtime only recognizes class file versions up to %u.%u",
         major_version,
-        minor_version);
+        minor_version,
+        JAVA_MAX_SUPPORTED_VERSION,
+        JAVA_MAX_SUPPORTED_MINOR_VERSION);
     } else {
       ResourceMark rm(THREAD);
       Exceptions::fthrow(
         THREAD_AND_LOCATION,
         vmSymbols::java_lang_UnsupportedClassVersionError(),
-        "%s : Unsupported major.minor version %u.%u",
+        "%s has been compiled by a more recent version of the Java Runtime (class file version %u.%u), "
+        "this version of the Java Runtime only recognizes class file versions up to %u.%u",
         name->as_C_string(),
         major_version,
-        minor_version);
+        minor_version,
+        JAVA_MAX_SUPPORTED_VERSION,
+        JAVA_MAX_SUPPORTED_MINOR_VERSION);
     }
     return nullHandle;
   }
@@ -4504,8 +4510,8 @@
             break; // didn't find any match; get out
           }
 
-          if (super_m->is_final() &&
-              // matching method in super is final
+          if (super_m->is_final() && !super_m->is_static() &&
+              // matching method in super is final, and not static
               (Reflection::verify_field_access(this_klass(),
                                                super_m->method_holder(),
                                                super_m->method_holder(),
--- a/src/share/vm/classfile/symbolTable.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/classfile/symbolTable.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -38,6 +38,9 @@
 
 // --------------------------------------------------------------------------
 
+// the number of buckets a thread claims
+const int ClaimChunkSize = 32;
+
 SymbolTable* SymbolTable::_the_table = NULL;
 // Static arena for symbols that are not deallocated
 Arena* SymbolTable::_arena = NULL;
@@ -83,16 +86,12 @@
   }
 }
 
-int SymbolTable::symbols_removed = 0;
-int SymbolTable::symbols_counted = 0;
+int SymbolTable::_symbols_removed = 0;
+int SymbolTable::_symbols_counted = 0;
+volatile int SymbolTable::_parallel_claimed_idx = 0;
 
-// Remove unreferenced symbols from the symbol table
-// This is done late during GC.
-void SymbolTable::unlink() {
-  int removed = 0;
-  int total = 0;
-  size_t memory_total = 0;
-  for (int i = 0; i < the_table()->table_size(); ++i) {
+void SymbolTable::buckets_unlink(int start_idx, int end_idx, int* processed, int* removed, size_t* memory_total) {
+  for (int i = start_idx; i < end_idx; ++i) {
     HashtableEntry<Symbol*, mtSymbol>** p = the_table()->bucket_addr(i);
     HashtableEntry<Symbol*, mtSymbol>* entry = the_table()->bucket(i);
     while (entry != NULL) {
@@ -104,14 +103,14 @@
         break;
       }
       Symbol* s = entry->literal();
-      memory_total += s->size();
-      total++;
+      (*memory_total) += s->size();
+      (*processed)++;
       assert(s != NULL, "just checking");
       // If reference count is zero, remove.
       if (s->refcount() == 0) {
         assert(!entry->is_shared(), "shared entries should be kept live");
         delete s;
-        removed++;
+        (*removed)++;
         *p = entry->next();
         the_table()->free_entry(entry);
       } else {
@@ -121,12 +120,45 @@
       entry = (HashtableEntry<Symbol*, mtSymbol>*)HashtableEntry<Symbol*, mtSymbol>::make_ptr(*p);
     }
   }
-  symbols_removed += removed;
-  symbols_counted += total;
+}
+
+// Remove unreferenced symbols from the symbol table
+// This is done late during GC.
+void SymbolTable::unlink(int* processed, int* removed) {
+  size_t memory_total = 0;
+  buckets_unlink(0, the_table()->table_size(), processed, removed, &memory_total);
+  _symbols_removed += *removed;
+  _symbols_counted += *processed;
   // Exclude printing for normal PrintGCDetails because people parse
   // this output.
   if (PrintGCDetails && Verbose && WizardMode) {
-    gclog_or_tty->print(" [Symbols=%d size=" SIZE_FORMAT "K] ", total,
+    gclog_or_tty->print(" [Symbols=%d size=" SIZE_FORMAT "K] ", *processed,
+                        (memory_total*HeapWordSize)/1024);
+  }
+}
+
+void SymbolTable::possibly_parallel_unlink(int* processed, int* removed) {
+  const int limit = the_table()->table_size();
+
+  size_t memory_total = 0;
+
+  for (;;) {
+    // Grab next set of buckets to scan
+    int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
+    if (start_idx >= limit) {
+      // End of table
+      break;
+    }
+
+    int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
+    buckets_unlink(start_idx, end_idx, processed, removed, &memory_total);
+  }
+  Atomic::add(*processed, &_symbols_counted);
+  Atomic::add(*removed, &_symbols_removed);
+  // Exclude printing for normal PrintGCDetails because people parse
+  // this output.
+  if (PrintGCDetails && Verbose && WizardMode) {
+    gclog_or_tty->print(" [Symbols: scanned=%d removed=%d size=" SIZE_FORMAT "K] ", *processed, *removed,
                         (memory_total*HeapWordSize)/1024);
   }
 }
@@ -494,11 +526,11 @@
   tty->print_cr("Total number of symbols  %5d", count);
   tty->print_cr("Total size in memory     %5dK",
           (memory_total*HeapWordSize)/1024);
-  tty->print_cr("Total counted            %5d", symbols_counted);
-  tty->print_cr("Total removed            %5d", symbols_removed);
-  if (symbols_counted > 0) {
+  tty->print_cr("Total counted            %5d", _symbols_counted);
+  tty->print_cr("Total removed            %5d", _symbols_removed);
+  if (_symbols_counted > 0) {
     tty->print_cr("Percent removed          %3.2f",
-          ((float)symbols_removed/(float)symbols_counted)* 100);
+          ((float)_symbols_removed/(float)_symbols_counted)* 100);
   }
   tty->print_cr("Reference counts         %5d", Symbol::_total_count);
   tty->print_cr("Symbol arena size        %5d used %5d",
@@ -739,39 +771,38 @@
   return result;
 }
 
-void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) {
+  buckets_unlink_or_oops_do(is_alive, f, 0, the_table()->table_size(), processed, removed);
+}
+
+void StringTable::possibly_parallel_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) {
   // Readers of the table are unlocked, so we should only be removing
   // entries at a safepoint.
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  for (int i = 0; i < the_table()->table_size(); ++i) {
-    HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
-    HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
-    while (entry != NULL) {
-      assert(!entry->is_shared(), "CDS not used for the StringTable");
+  const int limit = the_table()->table_size();
 
-      if (is_alive->do_object_b(entry->literal())) {
-        if (f != NULL) {
-          f->do_oop((oop*)entry->literal_addr());
-        }
-        p = entry->next_addr();
-      } else {
-        *p = entry->next();
-        the_table()->free_entry(entry);
-      }
-      entry = *p;
+  for (;;) {
+    // Grab next set of buckets to scan
+    int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
+    if (start_idx >= limit) {
+      // End of table
+      break;
     }
+
+    int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
+    buckets_unlink_or_oops_do(is_alive, f, start_idx, end_idx, processed, removed);
   }
 }
 
-void StringTable::buckets_do(OopClosure* f, int start_idx, int end_idx) {
+void StringTable::buckets_oops_do(OopClosure* f, int start_idx, int end_idx) {
   const int limit = the_table()->table_size();
 
   assert(0 <= start_idx && start_idx <= limit,
-         err_msg("start_idx (" INT32_FORMAT ") oob?", start_idx));
+         err_msg("start_idx (" INT32_FORMAT ") is out of bounds", start_idx));
   assert(0 <= end_idx && end_idx <= limit,
-         err_msg("end_idx (" INT32_FORMAT ") oob?", end_idx));
+         err_msg("end_idx (" INT32_FORMAT ") is out of bounds", end_idx));
   assert(start_idx <= end_idx,
-         err_msg("Ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
+         err_msg("Index ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
                  start_idx, end_idx));
 
   for (int i = start_idx; i < end_idx; i += 1) {
@@ -786,12 +817,44 @@
   }
 }
 
+void StringTable::buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed) {
+  const int limit = the_table()->table_size();
+
+  assert(0 <= start_idx && start_idx <= limit,
+         err_msg("start_idx (" INT32_FORMAT ") is out of bounds", start_idx));
+  assert(0 <= end_idx && end_idx <= limit,
+         err_msg("end_idx (" INT32_FORMAT ") is out of bounds", end_idx));
+  assert(start_idx <= end_idx,
+         err_msg("Index ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
+                 start_idx, end_idx));
+
+  for (int i = start_idx; i < end_idx; ++i) {
+    HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
+    HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
+    while (entry != NULL) {
+      assert(!entry->is_shared(), "CDS not used for the StringTable");
+
+      if (is_alive->do_object_b(entry->literal())) {
+        if (f != NULL) {
+          f->do_oop((oop*)entry->literal_addr());
+        }
+        p = entry->next_addr();
+      } else {
+        *p = entry->next();
+        the_table()->free_entry(entry);
+        (*removed)++;
+      }
+      (*processed)++;
+      entry = *p;
+    }
+  }
+}
+
 void StringTable::oops_do(OopClosure* f) {
-  buckets_do(f, 0, the_table()->table_size());
+  buckets_oops_do(f, 0, the_table()->table_size());
 }
 
 void StringTable::possibly_parallel_oops_do(OopClosure* f) {
-  const int ClaimChunkSize = 32;
   const int limit = the_table()->table_size();
 
   for (;;) {
@@ -803,7 +866,7 @@
     }
 
     int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
-    buckets_do(f, start_idx, end_idx);
+    buckets_oops_do(f, start_idx, end_idx);
   }
 }
 
--- a/src/share/vm/classfile/symbolTable.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/classfile/symbolTable.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -86,8 +86,8 @@
   static bool _needs_rehashing;
 
   // For statistics
-  static int symbols_removed;
-  static int symbols_counted;
+  static int _symbols_removed;
+  static int _symbols_counted;
 
   Symbol* allocate_symbol(const u1* name, int len, bool c_heap, TRAPS); // Assumes no characters larger than 0x7F
 
@@ -121,6 +121,11 @@
   static Arena* arena() { return _arena; }  // called for statistics
 
   static void initialize_symbols(int arena_alloc_size = 0);
+
+  static volatile int _parallel_claimed_idx;
+
+  // Release any dead symbols
+  static void buckets_unlink(int start_idx, int end_idx, int* processed, int* removed, size_t* memory_total);
 public:
   enum {
     symbol_alloc_batch_size = 8,
@@ -177,7 +182,14 @@
                   unsigned int* hashValues, TRAPS);
 
   // Release any dead symbols
-  static void unlink();
+  static void unlink() {
+    int processed = 0;
+    int removed = 0;
+    unlink(&processed, &removed);
+  }
+  static void unlink(int* processed, int* removed);
+  // Release any dead symbols, possibly parallel version
+  static void possibly_parallel_unlink(int* processed, int* removed);
 
   // iterate over symbols
   static void symbols_do(SymbolClosure *cl);
@@ -235,6 +247,9 @@
   // Rehash the symbol table if it gets out of balance
   static void rehash_table();
   static bool needs_rehashing()         { return _needs_rehashing; }
+  // Parallel chunked scanning
+  static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; }
+  static int parallel_claimed_index()        { return _parallel_claimed_idx; }
 };
 
 class StringTable : public Hashtable<oop, mtSymbol> {
@@ -258,7 +273,10 @@
 
   // Apply the give oop closure to the entries to the buckets
   // in the range [start_idx, end_idx).
-  static void buckets_do(OopClosure* f, int start_idx, int end_idx);
+  static void buckets_oops_do(OopClosure* f, int start_idx, int end_idx);
+  // Unlink or apply the give oop closure to the entries to the buckets
+  // in the range [start_idx, end_idx).
+  static void buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed);
 
   StringTable() : Hashtable<oop, mtSymbol>((int)StringTableSize,
                               sizeof (HashtableEntry<oop, mtSymbol>)) {}
@@ -280,15 +298,28 @@
 
   // GC support
   //   Delete pointers to otherwise-unreachable objects.
-  static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f);
+  static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f) {
+    int processed = 0;
+    int removed = 0;
+    unlink_or_oops_do(cl, f, &processed, &removed);
+  }
   static void unlink(BoolObjectClosure* cl) {
-    unlink_or_oops_do(cl, NULL);
+    int processed = 0;
+    int removed = 0;
+    unlink_or_oops_do(cl, NULL, &processed, &removed);
   }
-
+  static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f, int* processed, int* removed);
+  static void unlink(BoolObjectClosure* cl, int* processed, int* removed) {
+    unlink_or_oops_do(cl, NULL, processed, removed);
+  }
   // Serially invoke "f->do_oop" on the locations of all oops in the table.
   static void oops_do(OopClosure* f);
 
-  // Possibly parallel version of the above
+  // Possibly parallel versions of the above
+  static void possibly_parallel_unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f, int* processed, int* removed);
+  static void possibly_parallel_unlink(BoolObjectClosure* cl, int* processed, int* removed) {
+    possibly_parallel_unlink_or_oops_do(cl, NULL, processed, removed);
+  }
   static void possibly_parallel_oops_do(OopClosure* f);
 
   // Hashing algorithm, used as the hash value used by the
@@ -349,5 +380,6 @@
 
   // Parallel chunked scanning
   static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; }
+  static int parallel_claimed_index() { return _parallel_claimed_idx; }
 };
 #endif // SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
--- a/src/share/vm/code/vtableStubs.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/code/vtableStubs.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -55,6 +55,9 @@
   const int chunk_factor = 32;
   if (_chunk == NULL || _chunk + real_size > _chunk_end) {
     const int bytes = chunk_factor * real_size + pd_code_alignment();
+
+   // There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp
+   // If changing the name, update the other file accordingly.
     BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
     if (blob == NULL) {
       return NULL;
@@ -62,12 +65,6 @@
     _chunk = blob->content_begin();
     _chunk_end = _chunk + bytes;
     Forte::register_stub("vtable stub", _chunk, _chunk_end);
-    // Notify JVMTI about this stub. The event will be recorded by the enclosing
-    // JvmtiDynamicCodeEventCollector and posted when this thread has released
-    // all locks.
-    if (JvmtiExport::should_post_dynamic_code_generated()) {
-      JvmtiExport::post_dynamic_code_generated_while_holding_locks("vtable stub", _chunk, _chunk_end);
-    }
     align_chunk();
   }
   assert(_chunk + real_size <= _chunk_end, "bad allocation");
@@ -130,6 +127,13 @@
                     is_vtable_stub? "vtbl": "itbl", vtable_index, VtableStub::receiver_location());
       Disassembler::decode(s->code_begin(), s->code_end());
     }
+    // Notify JVMTI about this stub. The event will be recorded by the enclosing
+    // JvmtiDynamicCodeEventCollector and posted when this thread has released
+    // all locks.
+    if (JvmtiExport::should_post_dynamic_code_generated()) {
+      JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
+                                                                   s->code_begin(), s->code_end());
+    }
   }
   return s->entry_point();
 }
@@ -195,6 +199,14 @@
   VtableStubs::initialize();
 }
 
+void VtableStubs::vtable_stub_do(void f(VtableStub*)) {
+    for (int i = 0; i < N; i++) {
+        for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
+            f(s);
+        }
+    }
+}
+
 
 //-----------------------------------------------------------------------------------------------------
 // Non-product code
--- a/src/share/vm/code/vtableStubs.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/code/vtableStubs.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -131,6 +131,7 @@
   static VtableStub* stub_containing(address pc);                    // stub containing pc or NULL
   static int         number_of_vtable_stubs() { return _number_of_vtable_stubs; }
   static void        initialize();
+  static void        vtable_stub_do(void f(VtableStub*));            // iterates over all vtable stubs
 };
 
 #endif // SHARE_VM_CODE_VTABLESTUBS_HPP
--- a/src/share/vm/compiler/compileBroker.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/compiler/compileBroker.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -98,7 +98,7 @@
     Symbol* name = (method)->name();                                     \
     Symbol* signature = (method)->signature();                           \
     HOTSPOT_METHOD_COMPILE_BEGIN(                                        \
-      comp_name, strlen(comp_name),                                      \
+      (char *) comp_name, strlen(comp_name),                             \
       (char *) klass_name->bytes(), klass_name->utf8_length(),           \
       (char *) name->bytes(), name->utf8_length(),                       \
       (char *) signature->bytes(), signature->utf8_length());            \
@@ -110,7 +110,7 @@
     Symbol* name = (method)->name();                                     \
     Symbol* signature = (method)->signature();                           \
     HOTSPOT_METHOD_COMPILE_END(                                          \
-      comp_name, strlen(comp_name),                                      \
+      (char *) comp_name, strlen(comp_name),                             \
       (char *) klass_name->bytes(), klass_name->utf8_length(),           \
       (char *) name->bytes(), name->utf8_length(),                       \
       (char *) signature->bytes(), signature->utf8_length(), (success)); \
@@ -955,7 +955,7 @@
 
     if (compiler_thread == NULL || compiler_thread->osthread() == NULL){
       vm_exit_during_initialization("java.lang.OutOfMemoryError",
-                                    "unable to create new native thread");
+                                    os::native_thread_creation_failed_msg());
     }
 
     java_lang_Thread::set_thread(thread_oop(), compiler_thread);
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -469,7 +469,7 @@
 void CMSAdaptiveSizePolicy::checkpoint_roots_final_begin() {
   _STW_timer.stop();
   _latest_cms_initial_mark_end_to_remark_start_secs = _STW_timer.seconds();
-  // Start accumumlating time for the remark in the STW timer.
+  // Start accumulating time for the remark in the STW timer.
   _STW_timer.reset();
   _STW_timer.start();
 }
@@ -540,8 +540,8 @@
       avg_msc_pause()->sample(msc_pause_in_seconds);
       double mutator_time_in_seconds = 0.0;
       if (_latest_cms_collection_end_to_collection_start_secs == 0.0) {
-        // This assertion may fail because of time stamp gradularity.
-        // Comment it out and investiage it at a later time.  The large
+        // This assertion may fail because of time stamp granularity.
+        // Comment it out and investigate it at a later time.  The large
         // time stamp granularity occurs on some older linux systems.
 #ifndef CLOCK_GRANULARITY_TOO_LARGE
         assert((_latest_cms_concurrent_marking_time_secs == 0.0) &&
@@ -703,7 +703,7 @@
     double latest_cms_sum_concurrent_phases_time_secs =
       concurrent_collection_time();
     if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("\nCMSAdaptiveSizePolicy::ms_collecton_end "
+      gclog_or_tty->print_cr("\nCMSAdaptiveSizePolicy::ms_collection_end "
         "STW_in_foreground_in_seconds %f "
         "_latest_cms_initial_mark_start_to_end_time_secs %f "
         "_latest_cms_remark_start_to_end_time_secs %f "
@@ -839,7 +839,7 @@
 
 void CMSAdaptiveSizePolicy::ms_collection_marking_begin() {
   _STW_timer.stop();
-  // Start accumumlating time for the marking in the STW timer.
+  // Start accumulating time for the marking in the STW timer.
   _STW_timer.reset();
   _STW_timer.start();
 }
@@ -1230,7 +1230,7 @@
     // We use the tenuring threshold to equalize the cost of major
     // and minor collections.
     // ThresholdTolerance is used to indicate how sensitive the
-    // tenuring threshold is to differences in cost betweent the
+    // tenuring threshold is to differences in cost between the
     // collection types.
 
     // Get the times of interest. This involves a little work, so
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -356,7 +356,7 @@
   void concurrent_sweeping_begin();
   void concurrent_sweeping_end();
   // Similar to the above (e.g., concurrent_marking_end()) and
-  // is used for both the precleaning an abortable precleaing
+  // is used for both the precleaning an abortable precleaning
   // phases.
   void concurrent_precleaning_begin();
   void concurrent_precleaning_end();
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -88,8 +88,7 @@
   // of the tenured generation.
   PerfVariable* _avg_msc_pause_counter;
   // Average for the time between the most recent end of a
-  // MSC collection and the beginning of the next
-  // MSC collection.
+  // MSC collection and the beginning of the next MSC collection.
   PerfVariable* _avg_msc_interval_counter;
   // Average for the GC cost of a MSC collection based on
   // _avg_msc_pause_counter and _avg_msc_interval_counter.
@@ -99,8 +98,7 @@
   // of the tenured generation.
   PerfVariable* _avg_ms_pause_counter;
   // Average for the time between the most recent end of a
-  // MS collection and the beginning of the next
-  // MS collection.
+  // MS collection and the beginning of the next MS collection.
   PerfVariable* _avg_ms_interval_counter;
   // Average for the GC cost of a MS collection based on
   // _avg_ms_pause_counter and _avg_ms_interval_counter.
@@ -108,9 +106,9 @@
 
   // Average of the bytes promoted per minor collection.
   PerfVariable* _promoted_avg_counter;
-  // Average of the deviation of the promoted average
+  // Average of the deviation of the promoted average.
   PerfVariable* _promoted_avg_dev_counter;
-  // Padded average of the bytes promoted per minor colleciton
+  // Padded average of the bytes promoted per minor collection.
   PerfVariable* _promoted_padded_avg_counter;
 
   // See description of the _change_young_gen_for_maj_pauses
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -258,10 +258,10 @@
   bool take_from_overflow_list();
 };
 
-// Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
+// In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
 // stack and the bitMap are shared, so access needs to be suitably
-// sycnhronized. An OopTaskQueue structure, supporting efficient
-// workstealing, replaces a CMSMarkStack for storing grey objects.
+// synchronized. An OopTaskQueue structure, supporting efficient
+// work stealing, replaces a CMSMarkStack for storing grey objects.
 class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
  private:
   MemRegion              _span;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -407,8 +407,8 @@
   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
                        (size_t) SmallForLinearAlloc - 1));
   // XXX the following could potentially be pretty slow;
-  // should one, pesimally for the rare cases when res
-  // caclulated above is less than IndexSetSize,
+  // should one, pessimistically for the rare cases when res
+  // calculated above is less than IndexSetSize,
   // just return res calculated above? My reasoning was that
   // those cases will be so rare that the extra time spent doesn't
   // really matter....
@@ -759,7 +759,7 @@
 // Note on locking for the space iteration functions:
 // since the collector's iteration activities are concurrent with
 // allocation activities by mutators, absent a suitable mutual exclusion
-// mechanism the iterators may go awry. For instace a block being iterated
+// mechanism the iterators may go awry. For instance a block being iterated
 // may suddenly be allocated or divided up and part of it allocated and
 // so on.
 
@@ -2116,7 +2116,7 @@
 
 // Support for concurrent collection policy decisions.
 bool CompactibleFreeListSpace::should_concurrent_collect() const {
-  // In the future we might want to add in frgamentation stats --
+  // In the future we might want to add in fragmentation stats --
   // including erosion of the "mountain" into this decision as well.
   return !adaptive_freelists() && linearAllocationWouldFail();
 }
@@ -2125,7 +2125,7 @@
 
 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
-  // prepare_for_compaction() uses the space between live objects
+  // Prepare_for_compaction() uses the space between live objects
   // so that later phase can skip dead space quickly.  So verification
   // of the free lists doesn't work after.
 }
@@ -2148,7 +2148,7 @@
   SCAN_AND_COMPACT(obj_size);
 }
 
-// fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
+// Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
 // where fbs is free block sizes
 double CompactibleFreeListSpace::flsFrag() const {
   size_t itabFree = totalSizeInIndexedFreeLists();
@@ -2677,7 +2677,7 @@
   // changes on-the-fly during a scavenge and avoid such a phase-change
   // pothole. The following code is a heuristic attempt to do that.
   // It is protected by a product flag until we have gained
-  // enough experience with this heuristic and fine-tuned its behaviour.
+  // enough experience with this heuristic and fine-tuned its behavior.
   // WARNING: This might increase fragmentation if we overreact to
   // small spikes, so some kind of historical smoothing based on
   // previous experience with the greater reactivity might be useful.
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -58,7 +58,7 @@
   HeapWord* _ptr;
   size_t    _word_size;
   size_t    _refillSize;
-  size_t    _allocation_size_limit;  // largest size that will be allocated
+  size_t    _allocation_size_limit;  // Largest size that will be allocated
 
   void print_on(outputStream* st) const;
 };
@@ -116,14 +116,14 @@
 
   PromotionInfo _promoInfo;
 
-  // helps to impose a global total order on freelistLock ranks;
+  // Helps to impose a global total order on freelistLock ranks;
   // assumes that CFLSpace's are allocated in global total order
   static int   _lockRank;
 
-  // a lock protecting the free lists and free blocks;
+  // A lock protecting the free lists and free blocks;
   // mutable because of ubiquity of locking even for otherwise const methods
   mutable Mutex _freelistLock;
-  // locking verifier convenience function
+  // Locking verifier convenience function
   void assert_locked() const PRODUCT_RETURN;
   void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
 
@@ -131,12 +131,13 @@
   LinearAllocBlock _smallLinearAllocBlock;
 
   FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
-  AFLBinaryTreeDictionary* _dictionary;    // ptr to dictionary for large size blocks
+  AFLBinaryTreeDictionary* _dictionary;    // Pointer to dictionary for large size blocks
 
+  // Indexed array for small size blocks
   AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
-                                       // indexed array for small size blocks
-  // allocation stategy
-  bool       _fitStrategy;      // Use best fit strategy.
+
+  // Allocation strategy
+  bool       _fitStrategy;        // Use best fit strategy
   bool       _adaptive_freelists; // Use adaptive freelists
 
   // This is an address close to the largest free chunk in the heap.
@@ -157,7 +158,7 @@
 
   // Extra stuff to manage promotion parallelism.
 
-  // a lock protecting the dictionary during par promotion allocation.
+  // A lock protecting the dictionary during par promotion allocation.
   mutable Mutex _parDictionaryAllocLock;
   Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
 
@@ -275,26 +276,26 @@
   }
 
  protected:
-  // reset the indexed free list to its initial empty condition.
+  // Reset the indexed free list to its initial empty condition.
   void resetIndexedFreeListArray();
-  // reset to an initial state with a single free block described
+  // Reset to an initial state with a single free block described
   // by the MemRegion parameter.
   void reset(MemRegion mr);
   // Return the total number of words in the indexed free lists.
   size_t     totalSizeInIndexedFreeLists() const;
 
  public:
-  // Constructor...
+  // Constructor
   CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
                            bool use_adaptive_freelists,
                            FreeBlockDictionary<FreeChunk>::DictionaryChoice);
-  // accessors
+  // Accessors
   bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
   FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; }
   HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
   void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
 
-  // Set CMS global values
+  // Set CMS global values.
   static void set_cms_values();
 
   // Return the free chunk at the end of the space.  If no such
@@ -305,7 +306,7 @@
 
   void set_collector(CMSCollector* collector) { _collector = collector; }
 
-  // Support for parallelization of rescan and marking
+  // Support for parallelization of rescan and marking.
   const size_t rescan_task_size()  const { return _rescan_task_size;  }
   const size_t marking_task_size() const { return _marking_task_size; }
   SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
@@ -346,7 +347,7 @@
   // Resizing support
   void set_end(HeapWord* value);  // override
 
-  // mutual exclusion support
+  // Mutual exclusion support
   Mutex* freelistLock() const { return &_freelistLock; }
 
   // Iteration support
@@ -370,7 +371,7 @@
   // If the iteration encounters an unparseable portion of the region,
   // terminate the iteration and return the address of the start of the
   // subregion that isn't done.  Return of "NULL" indicates that the
-  // interation completed.
+  // iteration completed.
   virtual HeapWord*
        object_iterate_careful_m(MemRegion mr,
                                 ObjectClosureCareful* cl);
@@ -393,11 +394,11 @@
   size_t block_size_nopar(const HeapWord* p) const;
   bool block_is_obj_nopar(const HeapWord* p) const;
 
-  // iteration support for promotion
+  // Iteration support for promotion
   void save_marks();
   bool no_allocs_since_save_marks();
 
-  // iteration support for sweeping
+  // Iteration support for sweeping
   void save_sweep_limit() {
     _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
                    unallocated_block() : end();
@@ -457,7 +458,7 @@
 
   FreeChunk* allocateScratch(size_t size);
 
-  // returns true if either the small or large linear allocation buffer is empty.
+  // Returns true if either the small or large linear allocation buffer is empty.
   bool       linearAllocationWouldFail() const;
 
   // Adjust the chunk for the minimum size.  This version is called in
@@ -477,18 +478,18 @@
   void      addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
               bool coalesced);
 
-  // Support for decisions regarding concurrent collection policy
+  // Support for decisions regarding concurrent collection policy.
   bool should_concurrent_collect() const;
 
-  // Support for compaction
+  // Support for compaction.
   void prepare_for_compaction(CompactPoint* cp);
   void adjust_pointers();
   void compact();
-  // reset the space to reflect the fact that a compaction of the
+  // Reset the space to reflect the fact that a compaction of the
   // space has been done.
   virtual void reset_after_compaction();
 
-  // Debugging support
+  // Debugging support.
   void print()                            const;
   void print_on(outputStream* st)         const;
   void prepare_for_verify();
@@ -500,7 +501,7 @@
   // i.e. either the binary tree dictionary, the indexed free lists
   // or the linear allocation block.
   bool verify_chunk_in_free_list(FreeChunk* fc) const;
-  // Verify that the given chunk is the linear allocation block
+  // Verify that the given chunk is the linear allocation block.
   bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
   // Do some basic checks on the the free lists.
   void check_free_list_consistency()      const PRODUCT_RETURN;
@@ -516,7 +517,7 @@
     size_t sumIndexedFreeListArrayReturnedBytes();
     // Return the total number of chunks in the indexed free lists.
     size_t totalCountInIndexedFreeLists() const;
-    // Return the total numberof chunks in the space.
+    // Return the total number of chunks in the space.
     size_t totalCount();
   )
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -117,10 +117,10 @@
 // hide the naked CGC_lock manipulation in the baton-passing code
 // further below. That's something we should try to do. Also, the proof
 // of correctness of this 2-level locking scheme is far from obvious,
-// and potentially quite slippery. We have an uneasy supsicion, for instance,
+// and potentially quite slippery. We have an uneasy suspicion, for instance,
 // that there may be a theoretical possibility of delay/starvation in the
 // low-level lock/wait/notify scheme used for the baton-passing because of
-// potential intereference with the priority scheme embodied in the
+// potential interference with the priority scheme embodied in the
 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 // invocation further below and marked with "XXX 20011219YSR".
 // Indeed, as we note elsewhere, this may become yet more slippery
@@ -259,7 +259,7 @@
   // Ideally, in the calculation below, we'd compute the dilatation
   // factor as: MinChunkSize/(promoting_gen's min object size)
   // Since we do not have such a general query interface for the
-  // promoting generation, we'll instead just use the mimimum
+  // promoting generation, we'll instead just use the minimum
   // object size (which today is a header's worth of space);
   // note that all arithmetic is in units of HeapWords.
   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
@@ -274,7 +274,7 @@
 //
 //   Let "f" be MinHeapFreeRatio in
 //
-//    _intiating_occupancy = 100-f +
+//    _initiating_occupancy = 100-f +
 //                           f * (CMSTriggerRatio/100)
 //   where CMSTriggerRatio is the argument "tr" below.
 //
@@ -958,7 +958,7 @@
         desired_free_percentage);
       gclog_or_tty->print_cr("  Maximum free fraction %f",
         maximum_free_percentage);
-      gclog_or_tty->print_cr("  Capactiy "SIZE_FORMAT, capacity()/1000);
+      gclog_or_tty->print_cr("  Capacity "SIZE_FORMAT, capacity()/1000);
       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
         desired_capacity/1000);
       int prev_level = level() - 1;
@@ -2671,7 +2671,7 @@
 // that it's responsible for collecting, while itself doing any
 // work common to all generations it's responsible for. A similar
 // comment applies to the  gc_epilogue()'s.
-// The role of the varaible _between_prologue_and_epilogue is to
+// The role of the variable _between_prologue_and_epilogue is to
 // enforce the invocation protocol.
 void CMSCollector::gc_prologue(bool full) {
   // Call gc_prologue_work() for the CMSGen
@@ -2878,10 +2878,10 @@
 // Check reachability of the given heap address in CMS generation,
 // treating all other generations as roots.
 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
-  // We could "guarantee" below, rather than assert, but i'll
+  // We could "guarantee" below, rather than assert, but I'll
   // leave these as "asserts" so that an adventurous debugger
   // could try this in the product build provided some subset of
-  // the conditions were met, provided they were intersted in the
+  // the conditions were met, provided they were interested in the
   // results and knew that the computation below wouldn't interfere
   // with other concurrent computations mutating the structures
   // being read or written.
@@ -2982,7 +2982,7 @@
   // This is as intended, because by this time
   // GC must already have cleared any refs that need to be cleared,
   // and traced those that need to be marked; moreover,
-  // the marking done here is not going to intefere in any
+  // the marking done here is not going to interfere in any
   // way with the marking information used by GC.
   NoRefDiscovery no_discovery(ref_processor());
 
@@ -3000,7 +3000,7 @@
 
   if (CMSRemarkVerifyVariant == 1) {
     // In this first variant of verification, we complete
-    // all marking, then check if the new marks-verctor is
+    // all marking, then check if the new marks-vector is
     // a subset of the CMS marks-vector.
     verify_after_remark_work_1();
   } else if (CMSRemarkVerifyVariant == 2) {
@@ -3033,7 +3033,6 @@
   gch->gen_process_strong_roots(_cmsGen->level(),
                                 true,   // younger gens are roots
                                 true,   // activate StrongRootsScope
-                                false,  // not scavenging
                                 SharedHeap::ScanningOption(roots_scanning_options()),
                                 &notOlder,
                                 true,   // walk code active on stacks
@@ -3101,7 +3100,6 @@
   gch->gen_process_strong_roots(_cmsGen->level(),
                                 true,   // younger gens are roots
                                 true,   // activate StrongRootsScope
-                                false,  // not scavenging
                                 SharedHeap::ScanningOption(roots_scanning_options()),
                                 &notOlder,
                                 true,   // walk code active on stacks
@@ -3303,7 +3301,7 @@
 void CMSCollector::setup_cms_unloading_and_verification_state() {
   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
                              || VerifyBeforeExit;
-  const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
+  const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache;
 
   // We set the proper root for this CMS cycle here.
   if (should_unload_classes()) {   // Should unload classes this cycle
@@ -3315,7 +3313,7 @@
   }
 
   // Not unloading classes this cycle
-  assert(!should_unload_classes(), "Inconsitency!");
+  assert(!should_unload_classes(), "Inconsistency!");
   remove_root_scanning_option(SharedHeap::SO_SystemClasses);
   add_root_scanning_option(SharedHeap::SO_AllClasses);
 
@@ -3401,7 +3399,7 @@
       CMSExpansionCause::_allocate_par_lab);
     // Now go around the loop and try alloc again;
     // A competing par_promote might beat us to the expansion space,
-    // so we may go around the loop again if promotion fails agaion.
+    // so we may go around the loop again if promotion fails again.
     if (GCExpandToAllocateDelayMillis > 0) {
       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
     }
@@ -3738,10 +3736,9 @@
       gch->gen_process_strong_roots(_cmsGen->level(),
                                     true,   // younger gens are roots
                                     true,   // activate StrongRootsScope
-                                    false,  // not scavenging
                                     SharedHeap::ScanningOption(roots_scanning_options()),
                                     &notOlder,
-                                    true,   // walk all of code cache if (so & SO_CodeCache)
+                                    true,   // walk all of code cache if (so & SO_AllCodeCache)
                                     NULL,
                                     &klass_closure);
     }
@@ -4373,7 +4370,7 @@
   // should really use wait/notify, which is the recommended
   // way of doing this type of interaction. Additionally, we should
   // consolidate the eight methods that do the yield operation and they
-  // are almost identical into one for better maintenability and
+  // are almost identical into one for better maintainability and
   // readability. See 6445193.
   //
   // Tony 2006.06.29
@@ -4541,7 +4538,7 @@
   // If Eden's current occupancy is below this threshold,
   // immediately schedule the remark; else preclean
   // past the next scavenge in an effort to
-  // schedule the pause as described avove. By choosing
+  // schedule the pause as described above. By choosing
   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
   // we will never do an actual abortable preclean cycle.
   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
@@ -5238,14 +5235,13 @@
   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
                                 false,     // yg was scanned above
                                 false,     // this is parallel code
-                                false,     // not scavenging
                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
                                 &par_mri_cl,
-                                true,   // walk all of code cache if (so & SO_CodeCache)
+                                true,   // walk all of code cache if (so & SO_AllCodeCache)
                                 NULL,
                                 &klass_closure);
   assert(_collector->should_unload_classes()
-         || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
+         || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
   _timer.stop();
   if (PrintCMSStatistics != 0) {
@@ -5375,14 +5371,13 @@
   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
                                 false,     // yg was scanned above
                                 false,     // this is parallel code
-                                false,     // not scavenging
                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
                                 &par_mrias_cl,
-                                true,   // walk all of code cache if (so & SO_CodeCache)
+                                true,   // walk all of code cache if (so & SO_AllCodeCache)
                                 NULL,
                                 NULL);     // The dirty klasses will be handled below
   assert(_collector->should_unload_classes()
-         || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
+         || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
   _timer.stop();
   if (PrintCMSStatistics != 0) {
@@ -5537,8 +5532,8 @@
   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
   // CAUTION: This closure has state that persists across calls to
   // the work method dirty_range_iterate_clear() in that it has
-  // imbedded in it a (subtype of) UpwardsObjectClosure. The
-  // use of that state in the imbedded UpwardsObjectClosure instance
+  // embedded in it a (subtype of) UpwardsObjectClosure. The
+  // use of that state in the embedded UpwardsObjectClosure instance
   // assumes that the cards are always iterated (even if in parallel
   // by several threads) in monotonically increasing order per each
   // thread. This is true of the implementation below which picks
@@ -5553,7 +5548,7 @@
   // sure that the changes there do not run counter to the
   // assumptions made here and necessary for correctness and
   // efficiency. Note also that this code might yield inefficient
-  // behaviour in the case of very large objects that span one or
+  // behavior in the case of very large objects that span one or
   // more work chunks. Such objects would potentially be scanned
   // several times redundantly. Work on 4756801 should try and
   // address that performance anomaly if at all possible. XXX
@@ -5579,7 +5574,7 @@
 
   while (!pst->is_task_claimed(/* reference */ nth_task)) {
     // Having claimed the nth_task, compute corresponding mem-region,
-    // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
+    // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
     // The alignment restriction ensures that we do not need any
     // synchronization with other gang-workers while setting or
     // clearing bits in thus chunk of the MUT.
@@ -5966,7 +5961,6 @@
     gch->gen_process_strong_roots(_cmsGen->level(),
                                   true,  // younger gens as roots
                                   false, // use the local StrongRootsScope
-                                  false, // not scavenging
                                   SharedHeap::ScanningOption(roots_scanning_options()),
                                   &mrias_cl,
                                   true,   // walk code active on stacks
@@ -5974,7 +5968,7 @@
                                   NULL);  // The dirty klasses will be handled below
 
     assert(should_unload_classes()
-           || (roots_scanning_options() & SharedHeap::SO_CodeCache),
+           || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
   }
 
@@ -6371,7 +6365,7 @@
   _inter_sweep_timer.reset();
   _inter_sweep_timer.start();
 
-  // We need to use a monotonically non-deccreasing time in ms
+  // We need to use a monotonically non-decreasing time in ms
   // or we will see time-warp warnings and os::javaTimeMillis()
   // does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
@@ -6732,7 +6726,7 @@
     warning("CMS bit map allocation failure");
     return false;
   }
-  // For now we'll just commit all of the bit map up fromt.
+  // For now we'll just commit all of the bit map up front.
   // Later on we'll try to be more parsimonious with swap.
   if (!_virtual_space.initialize(brs, brs.size())) {
     warning("CMS bit map backing store failure");
@@ -6839,8 +6833,8 @@
 
 // XXX FIX ME !!! In the MT case we come in here holding a
 // leaf lock. For printing we need to take a further lock
-// which has lower rank. We need to recallibrate the two
-// lock-ranks involved in order to be able to rpint the
+// which has lower rank. We need to recalibrate the two
+// lock-ranks involved in order to be able to print the
 // messages below. (Or defer the printing to the caller.
 // For now we take the expedient path of just disabling the
 // messages for the problematic case.)
@@ -7180,7 +7174,7 @@
           }
         #endif // ASSERT
     } else {
-      // an unitialized object
+      // An uninitialized object.
       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
       size = pointer_delta(nextOneAddr + 1, addr);
@@ -7188,7 +7182,7 @@
              "alignment problem");
       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
       // will dirty the card when the klass pointer is installed in the
-      // object (signalling the completion of initialization).
+      // object (signaling the completion of initialization).
     }
   } else {
     // Either a not yet marked object or an uninitialized object
@@ -7249,7 +7243,7 @@
   HeapWord* addr = (HeapWord*)p;
   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
   assert(!_span.contains(addr), "we are scanning the survivor spaces");
-  assert(p->klass_or_null() != NULL, "object should be initializd");
+  assert(p->klass_or_null() != NULL, "object should be initialized");
   // an initialized object; ignore mark word in verification below
   // since we are running concurrent with mutators
   assert(p->is_oop(true), "should be an oop");
@@ -7999,7 +7993,7 @@
          // we need to dirty all of the cards that the object spans,
          // since the rescan of object arrays will be limited to the
          // dirty cards.
-         // Note that no one can be intefering with us in this action
+         // Note that no one can be interfering with us in this action
          // of dirtying the mod union table, so no locking or atomics
          // are required.
          if (obj->is_objArray()) {
@@ -9025,7 +9019,7 @@
 
 // It's OK to call this multi-threaded;  the worst thing
 // that can happen is that we'll get a bunch of closely
-// spaced simulated oveflows, but that's OK, in fact
+// spaced simulated overflows, but that's OK, in fact
 // probably good as it would exercise the overflow code
 // under contention.
 bool CMSCollector::simulate_overflow() {
@@ -9145,7 +9139,7 @@
       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
     }
   } else {
-    // Chop off the suffix and rerturn it to the global list.
+    // Chop off the suffix and return it to the global list.
     assert(cur->mark() != BUSY, "Error");
     oop suffix_head = cur->mark(); // suffix will be put back on global list
     cur->set_mark(NULL);           // break off suffix
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -171,19 +171,19 @@
 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
 class CMSMarkStack: public CHeapObj<mtGC>  {
   //
-  friend class CMSCollector;   // to get at expasion stats further below
+  friend class CMSCollector;   // To get at expansion stats further below.
   //
 
-  VirtualSpace _virtual_space;  // space for the stack
-  oop*   _base;      // bottom of stack
-  size_t _index;     // one more than last occupied index
-  size_t _capacity;  // max #elements
-  Mutex  _par_lock;  // an advisory lock used in case of parallel access
-  NOT_PRODUCT(size_t _max_depth;)  // max depth plumbed during run
+  VirtualSpace _virtual_space;  // Space for the stack
+  oop*   _base;      // Bottom of stack
+  size_t _index;     // One more than last occupied index
+  size_t _capacity;  // Max #elements
+  Mutex  _par_lock;  // An advisory lock used in case of parallel access
+  NOT_PRODUCT(size_t _max_depth;)  // Max depth plumbed during run
 
  protected:
-  size_t _hit_limit;      // we hit max stack size limit
-  size_t _failed_double;  // we failed expansion before hitting limit
+  size_t _hit_limit;      // We hit max stack size limit
+  size_t _failed_double;  // We failed expansion before hitting limit
 
  public:
   CMSMarkStack():
@@ -238,7 +238,7 @@
     _index = 0;
   }
 
-  // Expand the stack, typically in response to an overflow condition
+  // Expand the stack, typically in response to an overflow condition.
   void expand();
 
   // Compute the least valued stack element.
@@ -250,7 +250,7 @@
      return least;
   }
 
-  // Exposed here to allow stack expansion in || case
+  // Exposed here to allow stack expansion in || case.
   Mutex* par_lock() { return &_par_lock; }
 };
 
@@ -557,7 +557,7 @@
   // Manipulated with CAS in the parallel/multi-threaded case.
   oop _overflow_list;
   // The following array-pair keeps track of mark words
-  // displaced for accomodating overflow list above.
+  // displaced for accommodating overflow list above.
   // This code will likely be revisited under RFE#4922830.
   Stack<oop, mtGC>     _preserved_oop_stack;
   Stack<markOop, mtGC> _preserved_mark_stack;
@@ -599,7 +599,7 @@
   void verify_after_remark_work_1();
   void verify_after_remark_work_2();
 
-  // true if any verification flag is on.
+  // True if any verification flag is on.
   bool _verifying;
   bool verifying() const { return _verifying; }
   void set_verifying(bool v) { _verifying = v; }
@@ -611,9 +611,9 @@
   void set_did_compact(bool v);
 
   // XXX Move these to CMSStats ??? FIX ME !!!
-  elapsedTimer _inter_sweep_timer;   // time between sweeps
-  elapsedTimer _intra_sweep_timer;   // time _in_ sweeps
-  // padded decaying average estimates of the above
+  elapsedTimer _inter_sweep_timer;   // Time between sweeps
+  elapsedTimer _intra_sweep_timer;   // Time _in_ sweeps
+  // Padded decaying average estimates of the above
   AdaptivePaddedAverage _inter_sweep_estimate;
   AdaptivePaddedAverage _intra_sweep_estimate;
 
@@ -632,16 +632,16 @@
   void report_heap_summary(GCWhen::Type when);
 
  protected:
-  ConcurrentMarkSweepGeneration* _cmsGen;  // old gen (CMS)
-  MemRegion                      _span;    // span covering above two
-  CardTableRS*                   _ct;      // card table
+  ConcurrentMarkSweepGeneration* _cmsGen;  // Old gen (CMS)
+  MemRegion                      _span;    // Span covering above two
+  CardTableRS*                   _ct;      // Card table
 
   // CMS marking support structures
   CMSBitMap     _markBitMap;
   CMSBitMap     _modUnionTable;
   CMSMarkStack  _markStack;
 
-  HeapWord*     _restart_addr; // in support of marking stack overflow
+  HeapWord*     _restart_addr; // In support of marking stack overflow
   void          lower_restart_addr(HeapWord* low);
 
   // Counters in support of marking stack / work queue overflow handling:
@@ -656,12 +656,12 @@
   size_t        _par_kac_ovflw;
   NOT_PRODUCT(ssize_t _num_par_pushes;)
 
-  // ("Weak") Reference processing support
+  // ("Weak") Reference processing support.
   ReferenceProcessor*            _ref_processor;
   CMSIsAliveClosure              _is_alive_closure;
-      // keep this textually after _markBitMap and _span; c'tor dependency
+  // Keep this textually after _markBitMap and _span; c'tor dependency.
 
-  ConcurrentMarkSweepThread*     _cmsThread;   // the thread doing the work
+  ConcurrentMarkSweepThread*     _cmsThread;   // The thread doing the work
   ModUnionClosure    _modUnionClosure;
   ModUnionClosurePar _modUnionClosurePar;
 
@@ -697,7 +697,7 @@
   // State related to prologue/epilogue invocation for my generations
   bool _between_prologue_and_epilogue;
 
-  // Signalling/State related to coordination between fore- and backgroud GC
+  // Signaling/State related to coordination between fore- and background GC
   // Note: When the baton has been passed from background GC to foreground GC,
   // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
   static bool _foregroundGCIsActive;    // true iff foreground collector is active or
@@ -712,13 +712,13 @@
   int    _numYields;
   size_t _numDirtyCards;
   size_t _sweep_count;
-  // number of full gc's since the last concurrent gc.
+  // Number of full gc's since the last concurrent gc.
   uint   _full_gcs_since_conc_gc;
 
-  // occupancy used for bootstrapping stats
+  // Occupancy used for bootstrapping stats
   double _bootstrap_occupancy;
 
-  // timer
+  // Timer
   elapsedTimer _timer;
 
   // Timing, allocation and promotion statistics, used for scheduling.
@@ -770,7 +770,7 @@
                                    int no_of_gc_threads);
   void push_on_overflow_list(oop p);
   void par_push_on_overflow_list(oop p);
-  // the following is, obviously, not, in general, "MT-stable"
+  // The following is, obviously, not, in general, "MT-stable"
   bool overflow_list_is_empty() const;
 
   void preserve_mark_if_necessary(oop p);
@@ -778,24 +778,24 @@
   void preserve_mark_work(oop p, markOop m);
   void restore_preserved_marks_if_any();
   NOT_PRODUCT(bool no_preserved_marks() const;)
-  // in support of testing overflow code
+  // In support of testing overflow code
   NOT_PRODUCT(int _overflow_counter;)
-  NOT_PRODUCT(bool simulate_overflow();)       // sequential
+  NOT_PRODUCT(bool simulate_overflow();)       // Sequential
   NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
 
   // CMS work methods
-  void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
+  void checkpointRootsInitialWork(bool asynch); // Initial checkpoint work
 
-  // a return value of false indicates failure due to stack overflow
-  bool markFromRootsWork(bool asynch);  // concurrent marking work
+  // A return value of false indicates failure due to stack overflow
+  bool markFromRootsWork(bool asynch);  // Concurrent marking work
 
  public:   // FIX ME!!! only for testing
-  bool do_marking_st(bool asynch);      // single-threaded marking
-  bool do_marking_mt(bool asynch);      // multi-threaded  marking
+  bool do_marking_st(bool asynch);      // Single-threaded marking
+  bool do_marking_mt(bool asynch);      // Multi-threaded  marking
 
  private:
 
-  // concurrent precleaning work
+  // Concurrent precleaning work
   size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
                                   ScanMarkedObjectsAgainCarefullyClosure* cl);
   size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
@@ -811,26 +811,26 @@
   // Resets (i.e. clears) the per-thread plab sample vectors
   void reset_survivor_plab_arrays();
 
-  // final (second) checkpoint work
+  // Final (second) checkpoint work
   void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
                                 bool init_mark_was_synchronous);
-  // work routine for parallel version of remark
+  // Work routine for parallel version of remark
   void do_remark_parallel();
-  // work routine for non-parallel version of remark
+  // Work routine for non-parallel version of remark
   void do_remark_non_parallel();
-  // reference processing work routine (during second checkpoint)
+  // Reference processing work routine (during second checkpoint)
   void refProcessingWork(bool asynch, bool clear_all_soft_refs);
 
-  // concurrent sweeping work
+  // Concurrent sweeping work
   void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
 
-  // (concurrent) resetting of support data structures
+  // (Concurrent) resetting of support data structures
   void reset(bool asynch);
 
   // Clear _expansion_cause fields of constituent generations
   void clear_expansion_cause();
 
-  // An auxilliary method used to record the ends of
+  // An auxiliary method used to record the ends of
   // used regions of each generation to limit the extent of sweep
   void save_sweep_limits();
 
@@ -854,7 +854,7 @@
   bool is_external_interruption();
   void report_concurrent_mode_interruption();
 
-  // If the backgrould GC is active, acquire control from the background
+  // If the background GC is active, acquire control from the background
   // GC and do the collection.
   void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
 
@@ -893,7 +893,7 @@
 
   ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
 
-  // locking checks
+  // Locking checks
   NOT_PRODUCT(static bool have_cms_token();)
 
   // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
@@ -958,7 +958,7 @@
   CMSBitMap* markBitMap()  { return &_markBitMap; }
   void directAllocated(HeapWord* start, size_t size);
 
-  // main CMS steps and related support
+  // Main CMS steps and related support
   void checkpointRootsInitial(bool asynch);
   bool markFromRoots(bool asynch);  // a return value of false indicates failure
                                     // due to stack overflow
@@ -977,7 +977,7 @@
   // Performance Counter Support
   CollectorCounters* counters()    { return _gc_counters; }
 
-  // timer stuff
+  // Timer stuff
   void    startTimer() { assert(!_timer.is_active(), "Error"); _timer.start();   }
   void    stopTimer()  { assert( _timer.is_active(), "Error"); _timer.stop();    }
   void    resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset();   }
@@ -1014,18 +1014,18 @@
 
   static void print_on_error(outputStream* st);
 
-  // debugging
+  // Debugging
   void verify();
   bool verify_after_remark(bool silent = VerifySilently);
   void verify_ok_to_terminate() const PRODUCT_RETURN;
   void verify_work_stacks_empty() const PRODUCT_RETURN;
   void verify_overflow_empty() const PRODUCT_RETURN;
 
-  // convenience methods in support of debugging
+  // Convenience methods in support of debugging
   static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
   HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
 
-  // accessors
+  // Accessors
   CMSMarkStack* verification_mark_stack() { return &_markStack; }
   CMSBitMap*    verification_mark_bm()    { return &_verification_mark_bm; }
 
@@ -1109,7 +1109,7 @@
 
   CollectionTypes _debug_collection_type;
 
-  // True if a compactiing collection was done.
+  // True if a compacting collection was done.
   bool _did_compact;
   bool did_compact() { return _did_compact; }
 
@@ -1203,7 +1203,7 @@
 
   // Support for compaction
   CompactibleSpace* first_compaction_space() const;
-  // Adjust quantites in the generation affected by
+  // Adjust quantities in the generation affected by
   // the compaction.
   void reset_after_compaction();
 
@@ -1301,7 +1301,7 @@
   void setNearLargestChunk();
   bool isNearLargestChunk(HeapWord* addr);
 
-  // Get the chunk at the end of the space.  Delagates to
+  // Get the chunk at the end of the space.  Delegates to
   // the space.
   FreeChunk* find_chunk_at_end();
 
@@ -1422,7 +1422,6 @@
 // marking from the roots following the first checkpoint.
 // XXX This should really be a subclass of The serial version
 // above, but i have not had the time to refactor things cleanly.
-// That willbe done for Dolphin.
 class Par_MarkFromRootsClosure: public BitMapClosure {
   CMSCollector*  _collector;
   MemRegion      _whole_span;
@@ -1780,7 +1779,7 @@
   void do_already_free_chunk(FreeChunk *fc);
   // Work method called when processing an already free or a
   // freshly garbage chunk to do a lookahead and possibly a
-  // premptive flush if crossing over _limit.
+  // preemptive flush if crossing over _limit.
   void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
   // Process a garbage chunk during sweeping.
   size_t do_garbage_chunk(FreeChunk *fc);
@@ -1879,7 +1878,7 @@
 };
 
 // Allow yielding or short-circuiting of reference list
-// prelceaning work.
+// precleaning work.
 class CMSPrecleanRefsYieldClosure: public YieldClosure {
   CMSCollector* _collector;
   void do_yield_work();
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -197,13 +197,13 @@
 }
 
 
-// Return the HeapWord address corrsponding to the next "0" bit
+// Return the HeapWord address corresponding to the next "0" bit
 // (inclusive).
 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
   return getNextUnmarkedWordAddress(addr, endWord());
 }
 
-// Return the HeapWord address corrsponding to the next "0" bit
+// Return the HeapWord address corresponding to the next "0" bit
 // (inclusive).
 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
   HeapWord* start_addr, HeapWord* end_addr) const {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -164,7 +164,7 @@
   //  _pending_yields that holds the sum (of both sync and async requests), and
   //  a second counter _pending_decrements that only holds the async requests,
   //  for greater efficiency, since in a typical CMS run, there are many more
-  //  pontential (i.e. static) yield points than there are actual
+  //  potential (i.e. static) yield points than there are actual
   //  (i.e. dynamic) yields because of requests, which are few and far between.
   //
   // Note that, while "_pending_yields >= _pending_decrements" is an invariant,
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -279,7 +279,7 @@
 // When _spoolTail is NULL, then the set of slots with displaced headers
 // is all those starting at the slot <_spoolHead, _firstIndex> and
 // going up to the last slot of last block in the linked list.
-// In this lartter case, _splice_point points to the tail block of
+// In this latter case, _splice_point points to the tail block of
 // this linked list of blocks holding displaced headers.
 void PromotionInfo::verify() const {
   // Verify the following:
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -141,8 +141,7 @@
 #ifndef USDT2
   HS_DTRACE_PROBE(hs_private, cms__initmark__begin);
 #else /* USDT2 */
-  HS_PRIVATE_CMS_INITMARK_BEGIN(
-                                );
+  HS_PRIVATE_CMS_INITMARK_BEGIN();
 #endif /* USDT2 */
 
   _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
@@ -162,8 +161,7 @@
 #ifndef USDT2
   HS_DTRACE_PROBE(hs_private, cms__initmark__end);
 #else /* USDT2 */
-  HS_PRIVATE_CMS_INITMARK_END(
-                                );
+  HS_PRIVATE_CMS_INITMARK_END();
 #endif /* USDT2 */
 }
 
@@ -178,8 +176,7 @@
 #ifndef USDT2
   HS_DTRACE_PROBE(hs_private, cms__remark__begin);
 #else /* USDT2 */
-  HS_PRIVATE_CMS_REMARK_BEGIN(
-                                );
+  HS_PRIVATE_CMS_REMARK_BEGIN();
 #endif /* USDT2 */
 
   _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
@@ -200,8 +197,7 @@
 #ifndef USDT2
   HS_DTRACE_PROBE(hs_private, cms__remark__end);
 #else /* USDT2 */
-  HS_PRIVATE_CMS_REMARK_END(
-                                );
+  HS_PRIVATE_CMS_REMARK_END();
 #endif /* USDT2 */
 }
 
@@ -258,7 +254,7 @@
       // No need to do a young gc, we'll just nudge the CMS thread
       // in the doit() method above, to be executed soon.
       assert(_gc_count_before < gch->total_collections(),
-             "total_collections() should be monotnically increasing");
+             "total_collections() should be monotonically increasing");
       return false;  // no need for foreground young gc
     }
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/bufferingOopClosure.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/bufferingOopClosure.hpp"
+#include "memory/iterator.hpp"
+#include "utilities/debug.hpp"
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class TestBufferingOopClosure {
+
+  // Helper class to fake a set of oop*s and narrowOop*s.
+  class FakeRoots {
+   public:
+    // Used for sanity checking of the values passed to the do_oops functions in the test.
+    static const uintptr_t NarrowOopMarker = uintptr_t(1) << (BitsPerWord -1);
+
+    int    _num_narrow;
+    int    _num_full;
+    void** _narrow;
+    void** _full;
+
+    FakeRoots(int num_narrow, int num_full) :
+        _num_narrow(num_narrow),
+        _num_full(num_full),
+        _narrow((void**)::malloc(sizeof(void*) * num_narrow)),
+        _full((void**)::malloc(sizeof(void*) * num_full)) {
+
+      for (int i = 0; i < num_narrow; i++) {
+        _narrow[i] = (void*)(NarrowOopMarker + (uintptr_t)i);
+      }
+      for (int i = 0; i < num_full; i++) {
+        _full[i] = (void*)(uintptr_t)i;
+      }
+    }
+
+    ~FakeRoots() {
+      ::free(_narrow);
+      ::free(_full);
+    }
+
+    void oops_do_narrow_then_full(OopClosure* cl) {
+      for (int i = 0; i < _num_narrow; i++) {
+        cl->do_oop((narrowOop*)_narrow[i]);
+      }
+      for (int i = 0; i < _num_full; i++) {
+        cl->do_oop((oop*)_full[i]);
+      }
+    }
+
+    void oops_do_full_then_narrow(OopClosure* cl) {
+      for (int i = 0; i < _num_full; i++) {
+        cl->do_oop((oop*)_full[i]);
+      }
+      for (int i = 0; i < _num_narrow; i++) {
+        cl->do_oop((narrowOop*)_narrow[i]);
+      }
+    }
+
+    void oops_do_mixed(OopClosure* cl) {
+      int i;
+      for (i = 0; i < _num_full && i < _num_narrow; i++) {
+        cl->do_oop((oop*)_full[i]);
+        cl->do_oop((narrowOop*)_narrow[i]);
+      }
+      for (int j = i; j < _num_full; j++) {
+        cl->do_oop((oop*)_full[i]);
+      }
+      for (int j = i; j < _num_narrow; j++) {
+        cl->do_oop((narrowOop*)_narrow[i]);
+      }
+    }
+
+    static const int MaxOrder = 2;
+
+    void oops_do(OopClosure* cl, int do_oop_order) {
+      switch(do_oop_order) {
+        case 0:
+          oops_do_narrow_then_full(cl);
+          break;
+        case 1:
+          oops_do_full_then_narrow(cl);
+          break;
+        case 2:
+          oops_do_mixed(cl);
+          break;
+        default:
+          oops_do_narrow_then_full(cl);
+          break;
+      }
+    }
+  };
+
+  class CountOopClosure : public OopClosure {
+    int _narrow_oop_count;
+    int _full_oop_count;
+   public:
+    CountOopClosure() : _narrow_oop_count(0), _full_oop_count(0) {}
+    void do_oop(narrowOop* p) {
+      assert((uintptr_t(p) & FakeRoots::NarrowOopMarker) != 0,
+          "The narrowOop was unexpectedly not marked with the NarrowOopMarker");
+      _narrow_oop_count++;
+    }
+
+    void do_oop(oop* p){
+      assert((uintptr_t(p) & FakeRoots::NarrowOopMarker) == 0,
+          "The oop was unexpectedly marked with the NarrowOopMarker");
+      _full_oop_count++;
+    }
+
+    int narrow_oop_count() { return _narrow_oop_count; }
+    int full_oop_count()   { return _full_oop_count; }
+    int all_oop_count()    { return _narrow_oop_count + _full_oop_count; }
+  };
+
+  class DoNothingOopClosure : public OopClosure {
+   public:
+    void do_oop(narrowOop* p) {}
+    void do_oop(oop* p)       {}
+  };
+
+  static void testCount(int num_narrow, int num_full, int do_oop_order) {
+    FakeRoots fr(num_narrow, num_full);
+
+    CountOopClosure coc;
+    BufferingOopClosure boc(&coc);
+
+    fr.oops_do(&boc, do_oop_order);
+
+    boc.done();
+
+    #define assert_testCount(got, expected)                                     \
+       assert((got) == (expected),                                              \
+           err_msg("Expected: %d, got: %d, when running testCount(%d, %d, %d)", \
+               (got), (expected), num_narrow, num_full, do_oop_order))
+
+    assert_testCount(num_narrow, coc.narrow_oop_count());
+    assert_testCount(num_full, coc.full_oop_count());
+    assert_testCount(num_narrow + num_full, coc.all_oop_count());
+  }
+
+  static void testCount() {
+    int buffer_length = BufferingOopClosure::BufferLength;
+
+    for (int order = 0; order < FakeRoots::MaxOrder; order++) {
+      testCount(0,                 0,                 order);
+      testCount(10,                0,                 order);
+      testCount(0,                 10,                order);
+      testCount(10,                10,                order);
+      testCount(buffer_length,     10,                order);
+      testCount(10,                buffer_length,     order);
+      testCount(buffer_length,     buffer_length,     order);
+      testCount(buffer_length + 1, 10,                order);
+      testCount(10,                buffer_length + 1, order);
+      testCount(buffer_length + 1, buffer_length,     order);
+      testCount(buffer_length,     buffer_length + 1, order);
+      testCount(buffer_length + 1, buffer_length + 1, order);
+    }
+  }
+
+  static void testIsBufferEmptyOrFull(int num_narrow, int num_full, bool expect_empty, bool expect_full) {
+    FakeRoots fr(num_narrow, num_full);
+
+    DoNothingOopClosure cl;
+    BufferingOopClosure boc(&cl);
+
+    fr.oops_do(&boc, 0);
+
+    #define assert_testIsBufferEmptyOrFull(got, expected)                             \
+        assert((got) == (expected),                                                   \
+            err_msg("Expected: %d, got: %d. testIsBufferEmptyOrFull(%d, %d, %s, %s)", \
+                (got), (expected), num_narrow, num_full,                              \
+                BOOL_TO_STR(expect_empty), BOOL_TO_STR(expect_full)))
+
+    assert_testIsBufferEmptyOrFull(expect_empty, boc.is_buffer_empty());
+    assert_testIsBufferEmptyOrFull(expect_full, boc.is_buffer_full());
+  }
+
+  static void testIsBufferEmptyOrFull() {
+    int bl = BufferingOopClosure::BufferLength;
+
+    testIsBufferEmptyOrFull(0,       0, true,  false);
+    testIsBufferEmptyOrFull(1,       0, false, false);
+    testIsBufferEmptyOrFull(0,       1, false, false);
+    testIsBufferEmptyOrFull(1,       1, false, false);
+    testIsBufferEmptyOrFull(10,      0, false, false);
+    testIsBufferEmptyOrFull(0,      10, false, false);
+    testIsBufferEmptyOrFull(10,     10, false, false);
+    testIsBufferEmptyOrFull(0,      bl, false, true);
+    testIsBufferEmptyOrFull(bl,      0, false, true);
+    testIsBufferEmptyOrFull(bl/2, bl/2, false, true);
+    testIsBufferEmptyOrFull(bl-1,    1, false, true);
+    testIsBufferEmptyOrFull(1,    bl-1, false, true);
+    // Processed
+    testIsBufferEmptyOrFull(bl+1,    0, false, false);
+    testIsBufferEmptyOrFull(bl*2,    0, false, true);
+  }
+
+  static void testEmptyAfterDone(int num_narrow, int num_full) {
+    FakeRoots fr(num_narrow, num_full);
+
+    DoNothingOopClosure cl;
+    BufferingOopClosure boc(&cl);
+
+    fr.oops_do(&boc, 0);
+
+    // Make sure all get processed.
+    boc.done();
+
+    assert(boc.is_buffer_empty(),
+        err_msg("Should be empty after call to done(). testEmptyAfterDone(%d, %d)",
+            num_narrow, num_full));
+  }
+
+  static void testEmptyAfterDone() {
+    int bl = BufferingOopClosure::BufferLength;
+
+    testEmptyAfterDone(0,       0);
+    testEmptyAfterDone(1,       0);
+    testEmptyAfterDone(0,       1);
+    testEmptyAfterDone(1,       1);
+    testEmptyAfterDone(10,      0);
+    testEmptyAfterDone(0,      10);
+    testEmptyAfterDone(10,     10);
+    testEmptyAfterDone(0,      bl);
+    testEmptyAfterDone(bl,      0);
+    testEmptyAfterDone(bl/2, bl/2);
+    testEmptyAfterDone(bl-1,    1);
+    testEmptyAfterDone(1,    bl-1);
+    // Processed
+    testEmptyAfterDone(bl+1,    0);
+    testEmptyAfterDone(bl*2,    0);
+  }
+
+  public:
+  static void test() {
+    testCount();
+    testIsBufferEmptyOrFull();
+    testEmptyAfterDone();
+  }
+};
+
+void TestBufferingOopClosure_test() {
+  TestBufferingOopClosure::test();
+}
+
+#endif
--- a/src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,10 +25,10 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_BUFFERINGOOPCLOSURE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_BUFFERINGOOPCLOSURE_HPP
 
-#include "memory/genOopClosures.hpp"
-#include "memory/generation.hpp"
+#include "memory/iterator.hpp"
+#include "oops/oopsHierarchy.hpp"
 #include "runtime/os.hpp"
-#include "utilities/taskqueue.hpp"
+#include "utilities/debug.hpp"
 
 // A BufferingOops closure tries to separate out the cost of finding roots
 // from the cost of applying closures to them.  It maintains an array of
@@ -39,174 +39,105 @@
 // up, the wrapped closure is applied to all elements, keeping track of
 // this elapsed time of this process, and leaving the array empty.
 // The caller must be sure to call "done" to process any unprocessed
-// buffered entriess.
-
-class Generation;
-class HeapRegion;
+// buffered entries.
 
 class BufferingOopClosure: public OopClosure {
+  friend class TestBufferingOopClosure;
 protected:
-  enum PrivateConstants {
-    BufferLength = 1024
-  };
+  static const size_t BufferLength = 1024;
 
-  StarTask  _buffer[BufferLength];
-  StarTask* _buffer_top;
-  StarTask* _buffer_curr;
+  // We need to know if the buffered addresses contain oops or narrowOops.
+  // We can't tag the addresses the way StarTask does, because we need to
+  // be able to handle unaligned addresses coming from oops embedded in code.
+  //
+  // The addresses for the full-sized oops are filled in from the bottom,
+  // while the addresses for the narrowOops are filled in from the top.
+  OopOrNarrowOopStar  _buffer[BufferLength];
+  OopOrNarrowOopStar* _oop_top;
+  OopOrNarrowOopStar* _narrowOop_bottom;
 
   OopClosure* _oc;
   double      _closure_app_seconds;
 
-  void process_buffer () {
+
+  bool is_buffer_empty() {
+    return _oop_top == _buffer && _narrowOop_bottom == (_buffer + BufferLength - 1);
+  }
+
+  bool is_buffer_full() {
+    return _narrowOop_bottom < _oop_top;
+  }
+
+  // Process addresses containing full-sized oops.
+  void process_oops() {
+    for (OopOrNarrowOopStar* curr = _buffer; curr < _oop_top; ++curr) {
+      _oc->do_oop((oop*)(*curr));
+    }
+    _oop_top = _buffer;
+  }
+
+  // Process addresses containing narrow oops.
+  void process_narrowOops() {
+    for (OopOrNarrowOopStar* curr = _buffer + BufferLength - 1; curr > _narrowOop_bottom; --curr) {
+      _oc->do_oop((narrowOop*)(*curr));
+    }
+    _narrowOop_bottom = _buffer + BufferLength - 1;
+  }
+
+  // Apply the closure to all oops and clear the buffer.
+  // Accumulate the time it took.
+  void process_buffer() {
     double start = os::elapsedTime();
-    for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) {
-      if (curr->is_narrow()) {
-        assert(UseCompressedOops, "Error");
-        _oc->do_oop((narrowOop*)(*curr));
-      } else {
-        _oc->do_oop((oop*)(*curr));
-      }
-    }
-    _buffer_curr = _buffer;
+
+    process_oops();
+    process_narrowOops();
+
     _closure_app_seconds += (os::elapsedTime() - start);
   }
 
-  template <class T> inline void do_oop_work(T* p) {
-    if (_buffer_curr == _buffer_top) {
-      process_buffer();
-    }
-    StarTask new_ref(p);
-    *_buffer_curr = new_ref;
-    ++_buffer_curr;
-  }
-
-public:
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(oop* p)       { do_oop_work(p); }
-
-  void done () {
-    if (_buffer_curr > _buffer) {
+  void process_buffer_if_full() {
+    if (is_buffer_full()) {
       process_buffer();
     }
   }
-  double closure_app_seconds () {
-    return _closure_app_seconds;
-  }
-  BufferingOopClosure (OopClosure *oc) :
-    _oc(oc),
-    _buffer_curr(_buffer), _buffer_top(_buffer + BufferLength),
-    _closure_app_seconds(0.0) { }
-};
 
-class BufferingOopsInGenClosure: public OopsInGenClosure {
-  BufferingOopClosure _boc;
-  OopsInGenClosure* _oc;
- protected:
-  template <class T> inline void do_oop_work(T* p) {
-    assert(generation()->is_in_reserved((void*)p), "Must be in!");
-    _boc.do_oop(p);
-  }
- public:
-  BufferingOopsInGenClosure(OopsInGenClosure *oc) :
-    _boc(oc), _oc(oc) {}
-
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(oop* p)       { do_oop_work(p); }
-
-  void done() {
-    _boc.done();
-  }
-
-  double closure_app_seconds () {
-    return _boc.closure_app_seconds();
-  }
-
-  void set_generation(Generation* gen) {
-    OopsInGenClosure::set_generation(gen);
-    _oc->set_generation(gen);
-  }
-
-  void reset_generation() {
-    // Make sure we finish the current work with the current generation.
-    _boc.done();
-    OopsInGenClosure::reset_generation();
-    _oc->reset_generation();
+  void add_narrowOop(narrowOop* p) {
+    assert(!is_buffer_full(), "Buffer should not be full");
+    *_narrowOop_bottom = (OopOrNarrowOopStar)p;
+    _narrowOop_bottom--;
   }
 
-};
-
-
-class BufferingOopsInHeapRegionClosure: public OopsInHeapRegionClosure {
-private:
-  enum PrivateConstants {
-    BufferLength = 1024
-  };
-
-  StarTask     _buffer[BufferLength];
-  StarTask*    _buffer_top;
-  StarTask*    _buffer_curr;
-
-  HeapRegion*  _hr_buffer[BufferLength];
-  HeapRegion** _hr_curr;
-
-  OopsInHeapRegionClosure*  _oc;
-  double                    _closure_app_seconds;
-
-  void process_buffer () {
-
-    assert((_hr_curr - _hr_buffer) == (_buffer_curr - _buffer),
-           "the two lengths should be the same");
-
-    double start = os::elapsedTime();
-    HeapRegion** hr_curr = _hr_buffer;
-    HeapRegion*  hr_prev = NULL;
-    for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) {
-      HeapRegion* region = *hr_curr;
-      if (region != hr_prev) {
-        _oc->set_region(region);
-        hr_prev = region;
-      }
-      if (curr->is_narrow()) {
-        assert(UseCompressedOops, "Error");
-        _oc->do_oop((narrowOop*)(*curr));
-      } else {
-        _oc->do_oop((oop*)(*curr));
-      }
-      ++hr_curr;
-    }
-    _buffer_curr = _buffer;
-    _hr_curr = _hr_buffer;
-    _closure_app_seconds += (os::elapsedTime() - start);
+  void add_oop(oop* p) {
+    assert(!is_buffer_full(), "Buffer should not be full");
+    *_oop_top = (OopOrNarrowOopStar)p;
+    _oop_top++;
   }
 
 public:
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(      oop* p) { do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) {
+    process_buffer_if_full();
+    add_narrowOop(p);
+  }
 
-  template <class T> void do_oop_work(T* p) {
-    if (_buffer_curr == _buffer_top) {
-      assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr");
-      process_buffer();
-    }
-    StarTask new_ref(p);
-    *_buffer_curr = new_ref;
-    ++_buffer_curr;
-    *_hr_curr = _from;
-    ++_hr_curr;
+  virtual void do_oop(oop* p)       {
+    process_buffer_if_full();
+    add_oop(p);
   }
-  void done () {
-    if (_buffer_curr > _buffer) {
-      assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr");
+
+  void done() {
+    if (!is_buffer_empty()) {
       process_buffer();
     }
   }
-  double closure_app_seconds () {
+
+  double closure_app_seconds() {
     return _closure_app_seconds;
   }
-  BufferingOopsInHeapRegionClosure (OopsInHeapRegionClosure *oc) :
+
+  BufferingOopClosure(OopClosure *oc) :
     _oc(oc),
-    _buffer_curr(_buffer), _buffer_top(_buffer + BufferLength),
-    _hr_curr(_hr_buffer),
+    _oop_top(_buffer),
+    _narrowOop_bottom(_buffer + BufferLength - 1),
     _closure_app_seconds(0.0) { }
 };
 
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -33,7 +33,7 @@
   _threads(NULL), _n_threads(0),
   _hot_card_cache(g1h)
 {
-  // Ergomonically select initial concurrent refinement parameters
+  // Ergonomically select initial concurrent refinement parameters
   if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
     FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
   }
--- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -44,8 +44,8 @@
   _vtime_accum(0.0)
 {
 
-  // Each thread has its own monitor. The i-th thread is responsible for signalling
-  // to thread i+1 if the number of buffers in the queue exceeds a threashold for this
+  // Each thread has its own monitor. The i-th thread is responsible for signaling
+  // to thread i+1 if the number of buffers in the queue exceeds a threshold for this
   // thread. Monitors are also used to wake up the threads during termination.
   // The 0th worker in notified by mutator threads and has a special monitor.
   // The last worker is used for young gen rset size sampling.
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -909,7 +909,7 @@
   }
 #endif
 
-  // Initialise marking structures. This has to be done in a STW phase.
+  // Initialize marking structures. This has to be done in a STW phase.
   reset();
 
   // For each region note start of marking.
@@ -923,8 +923,8 @@
 
   // If we force an overflow during remark, the remark operation will
   // actually abort and we'll restart concurrent marking. If we always
-  // force an oveflow during remark we'll never actually complete the
-  // marking phase. So, we initilize this here, at the start of the
+  // force an overflow during remark we'll never actually complete the
+  // marking phase. So, we initialize this here, at the start of the
   // cycle, so that at the remaining overflow number will decrease at
   // every remark and we'll eventually not need to cause one.
   force_overflow_stw()->init();
@@ -959,7 +959,7 @@
  *
  * Note, however, that this code is also used during remark and in
  * this case we should not attempt to leave / enter the STS, otherwise
- * we'll either hit an asseert (debug / fastdebug) or deadlock
+ * we'll either hit an assert (debug / fastdebug) or deadlock
  * (product). So we should only leave / enter the STS if we are
  * operating concurrently.
  *
@@ -1001,7 +1001,7 @@
       // task 0 is responsible for clearing the global data structures
       // We should be here because of an overflow. During STW we should
       // not clear the overflow flag since we rely on it being true when
-      // we exit this method to abort the pause and restart concurent
+      // we exit this method to abort the pause and restart concurrent
       // marking.
       reset_marking_state(true /* clear_overflow */);
       force_overflow()->update();
@@ -1251,7 +1251,7 @@
   CMConcurrentMarkingTask markingTask(this, cmThread());
   if (use_parallel_marking_threads()) {
     _parallel_workers->set_active_workers((int)active_workers);
-    // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
+    // Don't set _n_par_threads because it affects MT in process_strong_roots()
     // and the decisions on that MT processing is made elsewhere.
     assert(_parallel_workers->active_workers() > 0, "Should have been set");
     _parallel_workers->run_task(&markingTask);
@@ -1484,7 +1484,7 @@
     }
 
     // Set the marked bytes for the current region so that
-    // it can be queried by a calling verificiation routine
+    // it can be queried by a calling verification routine
     _region_marked_bytes = marked_bytes;
 
     return false;
@@ -1619,7 +1619,6 @@
   }
 };
 
-
 class G1ParVerifyFinalCountTask: public AbstractGangTask {
 protected:
   G1CollectedHeap* _g1h;
@@ -2307,7 +2306,7 @@
       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
       //
       // CMTask::do_marking_step() is called in a loop, which we'll exit
-      // if there's nothing more to do (i.e. we'completely drained the
+      // if there's nothing more to do (i.e. we've completely drained the
       // entries that were pushed as a a result of applying the 'keep alive'
       // closure to the entries on the discovered ref lists) or we overflow
       // the global marking stack.
@@ -2470,7 +2469,7 @@
     // reference processing is not multi-threaded and is thus
     // performed by the current thread instead of a gang worker).
     //
-    // The gang tasks involved in parallel reference procssing create
+    // The gang tasks involved in parallel reference processing create
     // their own instances of these closures, which do their own
     // synchronization among themselves.
     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
@@ -2529,10 +2528,9 @@
     assert(!rp->discovery_enabled(), "Post condition");
   }
 
-  // Now clean up stale oops in StringTable
-  StringTable::unlink(&g1_is_alive);
-  // Clean up unreferenced symbols in symbol table.
-  SymbolTable::unlink();
+  g1h->unlink_string_and_symbol_table(&g1_is_alive,
+                                      /* process_strings */ false, // currently strings are always roots
+                                      /* process_symbols */ true);
 }
 
 void ConcurrentMark::swapMarkBitMaps() {
@@ -2548,7 +2546,7 @@
 public:
   void work(uint worker_id) {
     // Since all available tasks are actually started, we should
-    // only proceed if we're supposed to be actived.
+    // only proceed if we're supposed to be active.
     if (worker_id < _cm->active_tasks()) {
       CMTask* task = _cm->task(worker_id);
       task->record_start_time();
@@ -3068,7 +3066,7 @@
 
     // 'start' should be in the heap.
     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
-    // 'end' *may* be just beyone the end of the heap (if hr is the last region)
+    // 'end' *may* be just beyond the end of the heap (if hr is the last region)
     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
 
     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
@@ -4416,7 +4414,7 @@
       // overflow was raised. This means we have to restart the
       // marking phase and start iterating over regions. However, in
       // order to do this we have to make sure that all tasks stop
-      // what they are doing and re-initialise in a safe manner. We
+      // what they are doing and re-initialize in a safe manner. We
       // will achieve this with the use of two barrier sync points.
 
       if (_cm->verbose_low()) {
@@ -4430,7 +4428,7 @@
 
         // When we exit this sync barrier we know that all tasks have
         // stopped doing marking work. So, it's now safe to
-        // re-initialise our data structures. At the end of this method,
+        // re-initialize our data structures. At the end of this method,
         // task 0 will clear the global data structures.
       }
 
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -378,19 +378,19 @@
   friend class G1CMDrainMarkingStackClosure;
 
 protected:
-  ConcurrentMarkThread* _cmThread;   // the thread doing the work
-  G1CollectedHeap*      _g1h;        // the heap.
-  uint                  _parallel_marking_threads; // the number of marking
-                                                   // threads we're use
-  uint                  _max_parallel_marking_threads; // max number of marking
-                                                   // threads we'll ever use
-  double                _sleep_factor; // how much we have to sleep, with
+  ConcurrentMarkThread* _cmThread;   // The thread doing the work
+  G1CollectedHeap*      _g1h;        // The heap
+  uint                  _parallel_marking_threads; // The number of marking
+                                                   // threads we're using
+  uint                  _max_parallel_marking_threads; // Max number of marking
+                                                       // threads we'll ever use
+  double                _sleep_factor; // How much we have to sleep, with
                                        // respect to the work we just did, to
                                        // meet the marking overhead goal
-  double                _marking_task_overhead; // marking target overhead for
+  double                _marking_task_overhead; // Marking target overhead for
                                                 // a single task
 
-  // same as the two above, but for the cleanup task
+  // Same as the two above, but for the cleanup task
   double                _cleanup_sleep_factor;
   double                _cleanup_task_overhead;
 
@@ -399,8 +399,8 @@
   // Concurrent marking support structures
   CMBitMap                _markBitMap1;
   CMBitMap                _markBitMap2;
-  CMBitMapRO*             _prevMarkBitMap; // completed mark bitmap
-  CMBitMap*               _nextMarkBitMap; // under-construction mark bitmap
+  CMBitMapRO*             _prevMarkBitMap; // Completed mark bitmap
+  CMBitMap*               _nextMarkBitMap; // Under-construction mark bitmap
 
   BitMap                  _region_bm;
   BitMap                  _card_bm;
@@ -409,43 +409,43 @@
   HeapWord*               _heap_start;
   HeapWord*               _heap_end;
 
-  // Root region tracking and claiming.
+  // Root region tracking and claiming
   CMRootRegions           _root_regions;
 
   // For gray objects
-  CMMarkStack             _markStack; // Grey objects behind global finger.
-  HeapWord* volatile      _finger;  // the global finger, region aligned,
+  CMMarkStack             _markStack; // Grey objects behind global finger
+  HeapWord* volatile      _finger;  // The global finger, region aligned,
                                     // always points to the end of the
                                     // last claimed region
 
-  // marking tasks
-  uint                    _max_worker_id;// maximum worker id
-  uint                    _active_tasks; // task num currently active
-  CMTask**                _tasks;        // task queue array (max_worker_id len)
-  CMTaskQueueSet*         _task_queues;  // task queue set
-  ParallelTaskTerminator  _terminator;   // for termination
+  // Marking tasks
+  uint                    _max_worker_id;// Maximum worker id
+  uint                    _active_tasks; // Task num currently active
+  CMTask**                _tasks;        // Task queue array (max_worker_id len)
+  CMTaskQueueSet*         _task_queues;  // Task queue set
+  ParallelTaskTerminator  _terminator;   // For termination
 
-  // Two sync barriers that are used to synchronise tasks when an
+  // Two sync barriers that are used to synchronize tasks when an
   // overflow occurs. The algorithm is the following. All tasks enter
   // the first one to ensure that they have all stopped manipulating
-  // the global data structures. After they exit it, they re-initialise
-  // their data structures and task 0 re-initialises the global data
+  // the global data structures. After they exit it, they re-initialize
+  // their data structures and task 0 re-initializes the global data
   // structures. Then, they enter the second sync barrier. This
   // ensure, that no task starts doing work before all data
-  // structures (local and global) have been re-initialised. When they
+  // structures (local and global) have been re-initialized. When they
   // exit it, they are free to start working again.
   WorkGangBarrierSync     _first_overflow_barrier_sync;
   WorkGangBarrierSync     _second_overflow_barrier_sync;
 
-  // this is set by any task, when an overflow on the global data
-  // structures is detected.
+  // This is set by any task, when an overflow on the global data
+  // structures is detected
   volatile bool           _has_overflown;
-  // true: marking is concurrent, false: we're in remark
+  // True: marking is concurrent, false: we're in remark
   volatile bool           _concurrent;
-  // set at the end of a Full GC so that marking aborts
+  // Set at the end of a Full GC so that marking aborts
   volatile bool           _has_aborted;
 
-  // used when remark aborts due to an overflow to indicate that
+  // Used when remark aborts due to an overflow to indicate that
   // another concurrent marking phase should start
   volatile bool           _restart_for_overflow;
 
@@ -455,10 +455,10 @@
   // time of remark.
   volatile bool           _concurrent_marking_in_progress;
 
-  // verbose level
+  // Verbose level
   CMVerboseLevel          _verbose_level;
 
-  // All of these times are in ms.
+  // All of these times are in ms
   NumberSeq _init_times;
   NumberSeq _remark_times;
   NumberSeq   _remark_mark_times;
@@ -467,7 +467,7 @@
   double    _total_counting_time;
   double    _total_rs_scrub_time;
 
-  double*   _accum_task_vtime;   // accumulated task vtime
+  double*   _accum_task_vtime;   // Accumulated task vtime
 
   FlexibleWorkGang* _parallel_workers;
 
@@ -487,7 +487,7 @@
   void reset_marking_state(bool clear_overflow = true);
 
   // We do this after we're done with marking so that the marking data
-  // structures are initialised to a sensible and predictable state.
+  // structures are initialized to a sensible and predictable state.
   void set_non_marking_state();
 
   // Called to indicate how many threads are currently active.
@@ -497,14 +497,14 @@
   // mark or remark) and how many threads are currently active.
   void set_concurrency_and_phase(uint active_tasks, bool concurrent);
 
-  // prints all gathered CM-related statistics
+  // Prints all gathered CM-related statistics
   void print_stats();
 
   bool cleanup_list_is_empty() {
     return _cleanup_list.is_empty();
   }
 
-  // accessor methods
+  // Accessor methods
   uint parallel_marking_threads() const     { return _parallel_marking_threads; }
   uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
   double sleep_factor()                     { return _sleep_factor; }
@@ -542,7 +542,7 @@
   // frequently.
   HeapRegion* claim_region(uint worker_id);
 
-  // It determines whether we've run out of regions to scan.
+  // It determines whether we've run out of regions to scan
   bool        out_of_regions() { return _finger == _heap_end; }
 
   // Returns the task with the given id
@@ -816,7 +816,7 @@
   inline bool do_yield_check(uint worker_i = 0);
   inline bool should_yield();
 
-  // Called to abort the marking cycle after a Full GC takes palce.
+  // Called to abort the marking cycle after a Full GC takes place.
   void abort();
 
   bool has_aborted()      { return _has_aborted; }
@@ -933,11 +933,11 @@
 
   // Similar to the above routine but there are times when we cannot
   // safely calculate the size of obj due to races and we, therefore,
-  // pass the size in as a parameter. It is the caller's reponsibility
+  // pass the size in as a parameter. It is the caller's responsibility
   // to ensure that the size passed in for obj is valid.
   inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
 
-  // Unconditionally mark the given object, and unconditinally count
+  // Unconditionally mark the given object, and unconditionally count
   // the object in the counting structures for worker id 0.
   // Should *not* be called from parallel code.
   inline bool mark_and_count(oop obj, HeapRegion* hr);
--- a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -105,7 +105,7 @@
   // will then correspond to a (non-existent) card that is also
   // just beyond the heap.
   if (g1h->is_in_g1_reserved(end) && !ct_bs->is_card_aligned(end)) {
-    // end of region is not card aligned - incremement to cover
+    // end of region is not card aligned - increment to cover
     // all the cards spanned by the region.
     end_idx += 1;
   }
@@ -222,7 +222,7 @@
   return false;
 }
 
-// Unconditionally mark the given object, and unconditinally count
+// Unconditionally mark the given object, and unconditionally count
 // the object in the counting structures for worker id 0.
 // Should *not* be called from parallel code.
 inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) {
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -70,7 +70,7 @@
 
 inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size,
                                                           bool bot_updates) {
-  // First we have to tedo the allocation, assuming we're holding the
+  // First we have to redo the allocation, assuming we're holding the
   // appropriate lock, in case another thread changed the region while
   // we were waiting to get the lock.
   HeapWord* result = attempt_allocation(word_size, bot_updates);
--- a/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -79,7 +79,7 @@
     assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
       err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
         mapping_granularity_in_bytes, end));
-    size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize);
+    size_t num_target_elems = pointer_delta(end, bottom, mapping_granularity_in_bytes);
     idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
     address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
     initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -448,7 +448,7 @@
 
   // Otherwise, find the block start using the table, but taking
   // care (cf block_start_unsafe() above) not to parse any objects/blocks
-  // on the cards themsleves.
+  // on the cards themselves.
   size_t index = _array->index_for(addr);
   assert(_array->address_for_index(index) == addr,
          "arg should be start of card");
--- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -169,7 +169,7 @@
 
     // We use the last address in hr as hr could be the
     // last region in the heap. In which case trying to find
-    // the card for hr->end() will be an OOB accesss to the
+    // the card for hr->end() will be an OOB access to the
     // card table.
     HeapWord* last = hr->end() - 1;
     assert(_g1h->g1_committed().contains(last),
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,8 +50,8 @@
 #include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_implementation/shared/isGCActiveMark.hpp"
 #include "memory/gcLocker.inline.hpp"
-#include "memory/genOopClosures.inline.hpp"
 #include "memory/generationSpec.hpp"
+#include "memory/iterator.hpp"
 #include "memory/referenceProcessor.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/oop.pcgc.inline.hpp"
@@ -1575,8 +1575,6 @@
 void
 G1CollectedHeap::
 resize_if_necessary_after_full_collection(size_t word_size) {
-  assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
-
   // Include the current allocation, if any, and bytes that will be
   // pre-allocated to support collections, as "used".
   const size_t used_after_gc = used();
@@ -2268,7 +2266,7 @@
                                 // (for efficiency/performance)
                            false);
                                 // Setting next fields of discovered
-                                // lists requires a barrier.
+                                // lists does not require a barrier.
 }
 
 size_t G1CollectedHeap::capacity() const {
@@ -2996,7 +2994,17 @@
 }
 
 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
-  return HeapRegion::GrainBytes;
+  return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes;
+}
+
+size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
+  return young_list()->eden_used_bytes();
+}
+
+// For G1 TLABs should not contain humongous objects, so the maximum TLAB size
+// must be smaller than the humongous object limit.
+size_t G1CollectedHeap::max_tlab_size() const {
+  return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
 }
 
 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
@@ -3008,11 +3016,11 @@
   // humongous objects.
 
   HeapRegion* hr = _mutator_alloc_region.get();
-  size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
+  size_t max_tlab = max_tlab_size() * wordSize;
   if (hr == NULL) {
-    return max_tlab_size;
+    return max_tlab;
   } else {
-    return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
+    return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
   }
 }
 
@@ -3077,11 +3085,7 @@
   return NULL; // keep some compilers happy
 }
 
-// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
-//       pass it as the perm_blk to SharedHeap::process_strong_roots.
-//       When process_strong_roots stop calling perm_blk->younger_refs_iterate
-//       we can change this closure to extend the simpler OopClosure.
-class VerifyRootsClosure: public OopsInGenClosure {
+class VerifyRootsClosure: public OopClosure {
 private:
   G1CollectedHeap* _g1h;
   VerifyOption     _vo;
@@ -3117,7 +3121,7 @@
   void do_oop(narrowOop* p) { do_oop_nv(p); }
 };
 
-class G1VerifyCodeRootOopClosure: public OopsInGenClosure {
+class G1VerifyCodeRootOopClosure: public OopClosure {
   G1CollectedHeap* _g1h;
   OopClosure* _root_cl;
   nmethod* _nm;
@@ -3396,14 +3400,12 @@
 
     // We apply the relevant closures to all the oops in the
     // system dictionary, the string table and the code cache.
-    const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
+    const int so = SO_AllClasses | SO_Strings | SO_AllCodeCache;
 
     // Need cleared claim bits for the strong roots processing
     ClassLoaderDataGraph::clear_claimed_marks();
 
     process_strong_roots(true,      // activate StrongRootsScope
-                         false,     // we set "is scavenging" to false,
-                                    // so we don't reset the dirty cards.
                          ScanningOption(so),  // roots scanning options
                          &rootsCl,
                          &blobsCl,
@@ -3655,6 +3657,7 @@
   // always_do_update_barrier = false;
   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
   // Fill TLAB's and such
+  accumulate_statistics_all_tlabs();
   ensure_parsability(true);
 
   if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
@@ -3679,6 +3682,8 @@
                         "derived pointer present"));
   // always_do_update_barrier = true;
 
+  resize_all_tlabs();
+
   // We have just completed a GC. Update the soft reference
   // policy with the new heap occupancy
   Universe::update_heap_info_at_gc();
@@ -4651,8 +4656,8 @@
   _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
   _mark_in_progress(_g1->mark_in_progress()) { }
 
-template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
-void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop obj) {
+template <G1Barrier barrier, bool do_mark_object>
+void G1ParCopyClosure<barrier, do_mark_object>::mark_object(oop obj) {
 #ifdef ASSERT
   HeapRegion* hr = _g1->heap_region_containing(obj);
   assert(hr != NULL, "sanity");
@@ -4663,8 +4668,8 @@
   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
 }
 
-template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
-void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
+template <G1Barrier barrier, bool do_mark_object>
+void G1ParCopyClosure<barrier, do_mark_object>
   ::mark_forwarded_object(oop from_obj, oop to_obj) {
 #ifdef ASSERT
   assert(from_obj->is_forwarded(), "from obj should be forwarded");
@@ -4687,8 +4692,8 @@
   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
 }
 
-template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
-oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
+template <G1Barrier barrier, bool do_mark_object>
+oop G1ParCopyClosure<barrier, do_mark_object>
   ::copy_to_survivor_space(oop old) {
   size_t word_sz = old->size();
   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
@@ -4784,13 +4789,11 @@
   }
 }
 
-template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
+template <G1Barrier barrier, bool do_mark_object>
 template <class T>
-void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
+void G1ParCopyClosure<barrier, do_mark_object>
 ::do_oop_work(T* p) {
   oop obj = oopDesc::load_decode_heap_oop(p);
-  assert(barrier != G1BarrierRS || obj != NULL,
-         "Precondition: G1BarrierRS implies obj is non-NULL");
 
   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
 
@@ -4810,10 +4813,7 @@
       mark_forwarded_object(obj, forwardee);
     }
 
-    // When scanning the RS, we only care about objs in CS.
-    if (barrier == G1BarrierRS) {
-      _par_scan_state->update_rs(_from, p, _worker_id);
-    } else if (barrier == G1BarrierKlass) {
+    if (barrier == G1BarrierKlass) {
       do_klass_barrier(p, forwardee);
     }
   } else {
@@ -4828,14 +4828,10 @@
   if (barrier == G1BarrierEvac && obj != NULL) {
     _par_scan_state->update_rs(_from, p, _worker_id);
   }
-
-  if (do_gen_barrier && obj != NULL) {
-    par_do_barrier(p);
-  }
-}
-
-template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
-template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
+}
+
+template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
+template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
 
 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
   assert(has_partial_array_mask(p), "invariant");
@@ -5119,15 +5115,12 @@
 
   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
 
-  assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow");
-  // Walk the code cache/strong code roots w/o buffering, because StarTask
-  // cannot handle unaligned oop locations.
-  CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
+  CodeBlobToOopClosure scan_code_roots(&buf_scan_non_heap_roots, true /* do_marking */);
 
   process_strong_roots(false, // no scoping; this is parallel code
-                       is_scavenging, so,
+                       so,
                        &buf_scan_non_heap_roots,
-                       &eager_scan_code_roots,
+                       &scan_code_roots,
                        scan_klasses
                        );
 
@@ -5173,7 +5166,7 @@
   // the collection set.
   // Note all threads participate in this set of root tasks.
   double mark_strong_code_roots_ms = 0.0;
-  if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
+  if (g1_policy()->during_initial_mark_pause() && !(so & SO_AllCodeCache)) {
     double mark_strong_roots_start = os::elapsedTime();
     mark_strong_code_roots(worker_i);
     mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
@@ -5181,9 +5174,9 @@
   g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
 
   // Now scan the complement of the collection set.
-  if (scan_rs != NULL) {
-    g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
-  }
+  CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
+  g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
+
   _process_strong_tasks->all_tasks_completed();
 }
 
@@ -5193,6 +5186,102 @@
   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
 }
 
+class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
+private:
+  BoolObjectClosure* _is_alive;
+  int _initial_string_table_size;
+  int _initial_symbol_table_size;
+
+  bool  _process_strings;
+  int _strings_processed;
+  int _strings_removed;
+
+  bool  _process_symbols;
+  int _symbols_processed;
+  int _symbols_removed;
+
+  bool _do_in_parallel;
+public:
+  G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
+    AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
+    _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
+    _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
+    _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
+
+    _initial_string_table_size = StringTable::the_table()->table_size();
+    _initial_symbol_table_size = SymbolTable::the_table()->table_size();
+    if (process_strings) {
+      StringTable::clear_parallel_claimed_index();
+    }
+    if (process_symbols) {
+      SymbolTable::clear_parallel_claimed_index();
+    }
+  }
+
+  ~G1StringSymbolTableUnlinkTask() {
+    guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
+              err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT,
+                      StringTable::parallel_claimed_index(), _initial_string_table_size));
+    guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
+              err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
+                      SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
+  }
+
+  void work(uint worker_id) {
+    if (_do_in_parallel) {
+      int strings_processed = 0;
+      int strings_removed = 0;
+      int symbols_processed = 0;
+      int symbols_removed = 0;
+      if (_process_strings) {
+        StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
+        Atomic::add(strings_processed, &_strings_processed);
+        Atomic::add(strings_removed, &_strings_removed);
+      }
+      if (_process_symbols) {
+        SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
+        Atomic::add(symbols_processed, &_symbols_processed);
+        Atomic::add(symbols_removed, &_symbols_removed);
+      }
+    } else {
+      if (_process_strings) {
+        StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
+      }
+      if (_process_symbols) {
+        SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
+      }
+    }
+  }
+
+  size_t strings_processed() const { return (size_t)_strings_processed; }
+  size_t strings_removed()   const { return (size_t)_strings_removed; }
+
+  size_t symbols_processed() const { return (size_t)_symbols_processed; }
+  size_t symbols_removed()   const { return (size_t)_symbols_removed; }
+};
+
+void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
+                                                     bool process_strings, bool process_symbols) {
+  uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
+                   _g1h->workers()->active_workers() : 1);
+
+  G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
+  if (G1CollectedHeap::use_parallel_gc_threads()) {
+    set_par_threads(n_workers);
+    workers()->run_task(&g1_unlink_task);
+    set_par_threads(0);
+  } else {
+    g1_unlink_task.work(0);
+  }
+  if (G1TraceStringSymbolTableScrubbing) {
+    gclog_or_tty->print_cr("Cleaned string and symbol table, "
+                           "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
+                           "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
+                           g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
+                           g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
+  }
+}
+
 // Weak Reference Processing support
 
 // An always "is_alive" closure that is used to preserve referents.
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -209,7 +209,7 @@
   friend class OldGCAllocRegion;
 
   // Closures used in implementation.
-  template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
+  template <G1Barrier barrier, bool do_mark_object>
   friend class G1ParCopyClosure;
   friend class G1IsAliveClosure;
   friend class G1EvacuateFollowersClosure;
@@ -1373,7 +1373,7 @@
   // Divide the heap region sequence into "chunks" of some size (the number
   // of regions divided by the number of parallel threads times some
   // overpartition factor, currently 4).  Assumes that this will be called
-  // in parallel by ParallelGCThreads worker threads with discinct worker
+  // in parallel by ParallelGCThreads worker threads with distinct worker
   // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
   // calls will use the same "claim_value", and that that claim value is
   // different from the claim_value of any heap region before the start of
@@ -1470,9 +1470,11 @@
   // Section on thread-local allocation buffers (TLABs)
   // See CollectedHeap for semantics.
 
-  virtual bool supports_tlab_allocation() const;
-  virtual size_t tlab_capacity(Thread* thr) const;
-  virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
+  bool supports_tlab_allocation() const;
+  size_t tlab_capacity(Thread* ignored) const;
+  size_t tlab_used(Thread* ignored) const;
+  size_t max_tlab_size() const;
+  size_t unsafe_max_tlab_alloc(Thread* ignored) const;
 
   // Can a compiler initialize a new object without store barriers?
   // This permission only extends from the creation of a new object
@@ -1518,7 +1520,7 @@
   // Returns "true" iff the given word_size is "very large".
   static bool isHumongous(size_t word_size) {
     // Note this has to be strictly greater-than as the TLABs
-    // are capped at the humongous thresold and we want to
+    // are capped at the humongous threshold and we want to
     // ensure that we don't try to allocate a TLAB as
     // humongous and that we don't allocate a humongous
     // object in a TLAB.
@@ -1557,7 +1559,7 @@
   void set_region_short_lived_locked(HeapRegion* hr);
   // add appropriate methods for any other surv rate groups
 
-  YoungList* young_list() { return _young_list; }
+  YoungList* young_list() const { return _young_list; }
 
   // debugging
   bool check_young_list_well_formed() {
@@ -1648,26 +1650,30 @@
 
   // Optimized nmethod scanning support routines
 
-  // Register the given nmethod with the G1 heap
+  // Register the given nmethod with the G1 heap.
   virtual void register_nmethod(nmethod* nm);
 
-  // Unregister the given nmethod from the G1 heap
+  // Unregister the given nmethod from the G1 heap.
   virtual void unregister_nmethod(nmethod* nm);
 
   // Migrate the nmethods in the code root lists of the regions
   // in the collection set to regions in to-space. In the event
   // of an evacuation failure, nmethods that reference objects
-  // that were not successfullly evacuated are not migrated.
+  // that were not successfully evacuated are not migrated.
   void migrate_strong_code_roots();
 
   // During an initial mark pause, mark all the code roots that
   // point into regions *not* in the collection set.
   void mark_strong_code_roots(uint worker_id);
 
-  // Rebuild the stong code root lists for each region
-  // after a full GC
+  // Rebuild the strong code root lists for each region
+  // after a full GC.
   void rebuild_strong_code_roots();
 
+  // Delete entries for dead interned string and clean up unreferenced symbols
+  // in symbol table, possibly in parallel.
+  void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
+
   // Verification
 
   // The following is just to alert the verification code
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -318,7 +318,7 @@
 
 void G1CollectorPolicy::initialize_alignments() {
   _space_alignment = HeapRegion::GrainBytes;
-  size_t card_table_alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
+  size_t card_table_alignment = GenRemSet::max_alignment_constraint();
   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
   _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
 }
@@ -1075,7 +1075,7 @@
   }
 
   _short_lived_surv_rate_group->start_adding_regions();
-  // do that for any other surv rate groupsx
+  // Do that for any other surv rate groups
 
   if (update_stats) {
     double cost_per_card_ms = 0.0;
@@ -1741,7 +1741,7 @@
   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
   _inc_cset_bytes_used_before += used_bytes;
 
-  // Cache the values we have added to the aggregated informtion
+  // Cache the values we have added to the aggregated information
   // in the heap region in case we have to remove this region from
   // the incremental collection set, or it is updated by the
   // rset sampling code
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -116,7 +116,7 @@
 // If only -XX:NewRatio is set we should use the specified ratio of the heap
 // as both min and max. This will be interpreted as "fixed" just like the
 // NewSize==MaxNewSize case above. But we will update the min and max
-// everytime the heap size changes.
+// every time the heap size changes.
 //
 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
 // combined with either NewSize or MaxNewSize. (A warning message is printed.)
@@ -523,9 +523,9 @@
   // synchronize updates to this field.
   size_t _inc_cset_recorded_rs_lengths;
 
-  // A concurrent refinement thread periodcially samples the young
+  // A concurrent refinement thread periodically samples the young
   // region RSets and needs to update _inc_cset_recorded_rs_lengths as
-  // the RSets grow. Instead of having to syncronize updates to that
+  // the RSets grow. Instead of having to synchronize updates to that
   // field we accumulate them in this field and add it to
   // _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
   ssize_t _inc_cset_recorded_rs_lengths_diffs;
@@ -604,7 +604,7 @@
   // Calculate and return the maximum young list target length that
   // can fit into the pause time goal. The parameters are: rs_lengths
   // represent the prediction of how large the young RSet lengths will
-  // be, base_min_length is the alreay existing number of regions in
+  // be, base_min_length is the already existing number of regions in
   // the young list, min_length and max_length are the desired min and
   // max young list length according to the user's inputs.
   uint calculate_young_list_target_length(size_t rs_lengths,
@@ -820,6 +820,8 @@
     // do that for any other surv rate groups
   }
 
+  size_t young_list_target_length() const { return _young_list_target_length; }
+
   bool is_young_list_full() {
     uint young_list_length = _g1->young_list()->length();
     uint young_list_target_length = _young_list_target_length;
--- a/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -103,7 +103,7 @@
   // The data structure implemented is a circular queue.
   // Head "points" to the most recent addition, tail to the oldest one.
   // The array is of fixed size and I don't think we'll need more than
-  // two or three entries with the current behaviour of G1 pauses.
+  // two or three entries with the current behavior of G1 pauses.
   // If the array is full, an easy fix is to look for the pauses with
   // the shortest gap between them and consolidate them.
   // For now, we have taken the expedient alternative of forgetting
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -131,7 +131,6 @@
   ClassLoaderDataGraph::clear_claimed_marks();
 
   sh->process_strong_roots(true,  // activate StrongRootsScope
-                           false, // not scavenging.
                            SharedHeap::SO_SystemClasses,
                            &GenMarkSweep::follow_root_closure,
                            &GenMarkSweep::follow_code_root_closure,
@@ -163,11 +162,8 @@
   // Prune dead klasses from subklass/sibling/implementor lists.
   Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
 
-  // Delete entries for dead interned strings.
-  StringTable::unlink(&GenMarkSweep::is_alive);
-
-  // Clean up unreferenced symbols in symbol table.
-  SymbolTable::unlink();
+  // Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
+  G1CollectedHeap::heap()->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
 
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
@@ -180,7 +176,7 @@
     // any hash values from the mark word. These hash values are
     // used when verifying the dictionaries and so removing them
     // from the mark word can make verification of the dictionaries
-    // fail. At the end of the GC, the orginal mark word values
+    // fail. At the end of the GC, the original mark word values
     // (including hash values) are restored to the appropriate
     // objects.
     if (!VerifySilently) {
@@ -311,7 +307,6 @@
   ClassLoaderDataGraph::clear_claimed_marks();
 
   sh->process_strong_roots(true,  // activate StrongRootsScope
-                           false, // not scavenging.
                            SharedHeap::SO_AllClasses,
                            &GenMarkSweep::adjust_pointer_closure,
                            NULL,  // do not touch code cache here
--- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -112,7 +112,7 @@
   // take_sample() only returns "used".  When sampling was used, there
   // were some anomolous values emitted which may have been the consequence
   // of not updating all values simultaneously (i.e., see the calculation done
-  // in eden_space_used(), is it possbile that the values used to
+  // in eden_space_used(), is it possible that the values used to
   // calculate either eden_used or survivor_used are being updated by
   // the collector when the sample is being done?).
   const bool sampled = false;
@@ -135,7 +135,7 @@
 
   //   Young collection set
   //  name "generation.0".  This is logically the young generation.
-  //  The "0, 3" are paremeters for the n-th genertaion (=0) with 3 spaces.
+  //  The "0, 3" are parameters for the n-th generation (=0) with 3 spaces.
   // See  _old_collection_counters for additional counters
   _young_collection_counters = new G1YoungGenerationCounters(this, "young");
 
@@ -254,7 +254,7 @@
     eden_counters()->update_capacity(pad_capacity(eden_space_committed()));
     eden_counters()->update_used(eden_space_used());
     // only the to survivor space (s1) is active, so we don't need to
-    // update the counteres for the from survivor space (s0)
+    // update the counters for the from survivor space (s0)
     to_counters()->update_capacity(pad_capacity(survivor_space_committed()));
     to_counters()->update_used(survivor_space_used());
     old_space_counters()->update_capacity(pad_capacity(old_space_committed()));
--- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -108,7 +108,7 @@
 // is that all the above sizes need to be recalculated when the old
 // gen changes capacity (after a GC or after a humongous allocation)
 // but only the eden occupancy changes when a new eden region is
-// allocated. So, in the latter case we have minimal recalcuation to
+// allocated. So, in the latter case we have minimal recalculation to
 // do which is important as we want to keep the eden region allocation
 // path as low-overhead as possible.
 
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
 
 // A class that scans oops in a given heap region (much as OopsInGenClosure
 // scans oops in a generation.)
-class OopsInHeapRegionClosure: public OopsInGenClosure {
+class OopsInHeapRegionClosure: public ExtendedOopClosure {
 protected:
   HeapRegion* _from;
 public:
@@ -86,13 +86,26 @@
 
 #define G1_PARTIAL_ARRAY_MASK 0x2
 
-template <class T> inline bool has_partial_array_mask(T* ref) {
+inline bool has_partial_array_mask(oop* ref) {
   return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
 }
 
-template <class T> inline T* set_partial_array_mask(T obj) {
+// We never encode partial array oops as narrowOop*, so return false immediately.
+// This allows the compiler to create optimized code when popping references from
+// the work queue.
+inline bool has_partial_array_mask(narrowOop* ref) {
+  assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
+  return false;
+}
+
+// Only implement set_partial_array_mask() for regular oops, not for narrowOops.
+// We always encode partial arrays as regular oop, to allow the
+// specialization for has_partial_array_mask() for narrowOops above.
+// This means that unintentional use of this method with narrowOops are caught
+// by the compiler.
+inline oop* set_partial_array_mask(oop obj) {
   assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
-  return (T*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
+  return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
 }
 
 template <class T> inline oop clear_partial_array_mask(T* ref) {
@@ -131,7 +144,7 @@
   template <class T> void do_klass_barrier(T* p, oop new_obj);
 };
 
-template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
+template <G1Barrier barrier, bool do_mark_object>
 class G1ParCopyClosure : public G1ParCopyHelper {
   G1ParScanClosure _scanner;
   template <class T> void do_oop_work(T* p);
@@ -166,22 +179,16 @@
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 };
 
-typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
-typedef G1ParCopyClosure<false, G1BarrierKlass, false> G1ParScanMetadataClosure;
+typedef G1ParCopyClosure<G1BarrierNone, false> G1ParScanExtRootClosure;
+typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
 
 
-typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
-typedef G1ParCopyClosure<true,  G1BarrierNone, true> G1ParScanAndMarkClosure;
-typedef G1ParCopyClosure<false, G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
-
-// The following closure types are no longer used but are retained
-// for historical reasons:
-// typedef G1ParCopyClosure<false, G1BarrierRS,   false> G1ParScanHeapRSClosure;
-// typedef G1ParCopyClosure<false, G1BarrierRS,   true> G1ParScanAndMarkHeapRSClosure;
+typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
+typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
 
 // The following closure type is defined in g1_specialized_oop_closures.hpp:
 //
-// typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
+// typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
 
 // We use a separate closure to handle references during evacuation
 // failure processing.
@@ -189,7 +196,7 @@
 // (since that closure no longer assumes that the references it
 // handles point into the collection set).
 
-typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
+typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
 
 class FilterIntoCSClosure: public ExtendedOopClosure {
   G1CollectedHeap* _g1;
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -178,7 +178,7 @@
     // The _record_refs_into_cset flag is true during the RSet
     // updating part of an evacuation pause. It is false at all
     // other times:
-    //  * rebuilding the rembered sets after a full GC
+    //  * rebuilding the remembered sets after a full GC
     //  * during concurrent refinement.
     //  * updating the remembered sets of regions in the collection
     //    set in the event of an evacuation failure (when deferred
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "gc_implementation/g1/bufferingOopClosure.hpp"
 #include "gc_implementation/g1/concurrentG1Refine.hpp"
 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
@@ -195,7 +194,7 @@
     HeapRegionRemSetIterator iter(hrrs);
     size_t card_index;
 
-    // We claim cards in block so as to recude the contention. The block size is determined by
+    // We claim cards in block so as to reduce the contention. The block size is determined by
     // the G1RSetScanBlockSize parameter.
     size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
     for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
@@ -587,7 +586,7 @@
 
   // While we are processing RSet buffers during the collection, we
   // actually don't want to scan any cards on the collection set,
-  // since we don't want to update remebered sets with entries that
+  // since we don't want to update remembered sets with entries that
   // point into the collection set, given that live objects from the
   // collection set are about to move and such entries will be stale
   // very soon. This change also deals with a reliability issue which
--- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -71,6 +71,9 @@
   diagnostic(bool, G1TraceConcRefinement, false,                            \
           "Trace G1 concurrent refinement")                                 \
                                                                             \
+  experimental(bool, G1TraceStringSymbolTableScrubbing, false,              \
+          "Trace information string and symbol table scrubbing.")           \
+                                                                            \
   product(double, G1ConcMarkStepDurationMillis, 10.0,                       \
           "Target duration of individual concurrent marking steps "         \
           "in milliseconds.")                                               \
@@ -180,7 +183,7 @@
           "When true, record recent calls to rem set operations.")          \
                                                                             \
   develop(intx, G1MaxVerifyFailures, -1,                                    \
-          "The maximum number of verification failrues to print.  "         \
+          "The maximum number of verification failures to print.  "         \
           "-1 means print all.")                                            \
                                                                             \
   develop(bool, G1ScrubRemSets, true,                                       \
--- a/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -33,18 +33,17 @@
 // Forward declarations.
 enum G1Barrier {
   G1BarrierNone,
-  G1BarrierRS,
   G1BarrierEvac,
   G1BarrierKlass
 };
 
-template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
+template<G1Barrier barrier, bool do_mark_object>
 class G1ParCopyClosure;
 
 class G1ParScanClosure;
 class G1ParPushHeapRSClosure;
 
-typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
+typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
 
 class FilterIntoCSClosure;
 class FilterOutOfRegionClosure;
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1027,7 +1027,7 @@
       }
     }
 
-    // Loook up end - 1
+    // Look up end - 1
     HeapWord* addr_4 = the_end - 1;
     HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
     if (b_start_4 != p) {
@@ -1111,7 +1111,7 @@
     // will be false, and it will pick up top() as the high water mark
     // of region. If it does so after _gc_time_stamp = ..., then it
     // will pick up the right saved_mark_word() as the high water mark
-    // of the region. Either way, the behaviour will be correct.
+    // of the region. Either way, the behavior will be correct.
     ContiguousSpace::set_saved_mark();
     OrderAccess::storestore();
     _gc_time_stamp = curr_gc_time_stamp;
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -97,7 +97,7 @@
   HeapWord* heap_end() const {return _regions.end_address_mapped(); }
 
  public:
-  // Empty contructor, we'll initialize it with the initialize() method.
+  // Empty constructor, we'll initialize it with the initialize() method.
   HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
 
   void initialize(HeapWord* bottom, HeapWord* end);
--- a/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -71,7 +71,7 @@
   assert(_lock->owned_by_self(), "Required.");
 
   // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
-  // we acquire DirtyCardQ_CBL_mon inside enqeue_complete_buffer as they
+  // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they
   // have the same rank and we may get the "possible deadlock" message
   _lock->unlock();
 
@@ -151,7 +151,7 @@
 
       // The current PtrQ may be the shared dirty card queue and
       // may be being manipulated by more than one worker thread
-      // during a pause. Since the enqueuing of the completed
+      // during a pause. Since the enqueueing of the completed
       // buffer unlocks the Shared_DirtyCardQ_lock more than one
       // worker thread can 'race' on reading the shared queue attributes
       // (_buf and _index) and multiple threads can call into this
@@ -170,7 +170,7 @@
 
       locking_enqueue_completed_buffer(buf);  // enqueue completed buffer
 
-      // While the current thread was enqueuing the buffer another thread
+      // While the current thread was enqueueing the buffer another thread
       // may have a allocated a new buffer and inserted it into this pointer
       // queue. If that happens then we just return so that the current
       // thread doesn't overwrite the buffer allocated by the other thread
--- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -91,7 +91,7 @@
       assert(new_index > 0, "we should not have already filled up the buffer");
       new_index -= oopSize;
       assert(new_index >= i,
-             "new_index should never be below i, as we alwaysr compact 'up'");
+             "new_index should never be below i, as we always compact 'up'");
       oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
       assert(new_p >= p, "the destination location should never be below "
              "the source as we always compact 'up'");
--- a/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -144,7 +144,7 @@
 
   // Attempts to ensure that the given card_index in the given region is in
   // the sparse table.  If successful (because the card was already
-  // present, or because it was successfullly added) returns "true".
+  // present, or because it was successfully added) returns "true".
   // Otherwise, returns "false" to indicate that the addition would
   // overflow the entry for the region.  The caller must transfer these
   // entries to a larger-capacity representation.
@@ -201,8 +201,7 @@
   bool has_next(size_t& card_index);
 };
 
-// Concurrent accesss to a SparsePRT must be serialized by some external
-// mutex.
+// Concurrent access to a SparsePRT must be serialized by some external mutex.
 
 class SparsePRTIter;
 class SparsePRTCleanupTask;
@@ -248,7 +247,7 @@
 
   // Attempts to ensure that the given card_index in the given region is in
   // the sparse table.  If successful (because the card was already
-  // present, or because it was successfullly added) returns "true".
+  // present, or because it was successfully added) returns "true".
   // Otherwise, returns "false" to indicate that the addition would
   // overflow the entry for the region.  The caller must transfer these
   // entries to a larger-capacity representation.
--- a/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -154,7 +154,7 @@
   // There used to be this guarantee there.
   // guarantee ((eden_size + 2*survivor_size)  <= _max_gen_size, "incorrect input arguments");
   // Code below forces this requirement.  In addition the desired eden
-  // size and disired survivor sizes are desired goals and may
+  // size and desired survivor sizes are desired goals and may
   // exceed the total generation size.
 
   assert(min_gen_size() <= orig_size && orig_size <= max_gen_size(),
--- a/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -213,7 +213,7 @@
       && sp->block_is_obj(first_block)      // first block is an object
       && !(oop(first_block)->is_objArray()  // first block is not an array (arrays are precisely dirtied)
            || oop(first_block)->is_typeArray())) {
-    // Find our least non-clean card, so that a left neighbour
+    // Find our least non-clean card, so that a left neighbor
     // does not scan an object straddling the mutual boundary
     // too far to the right, and attempt to scan a portion of
     // that object twice.
@@ -247,14 +247,14 @@
     } NOISY(else {
       tty->print_cr(" LNC: Found no dirty card in current chunk; leaving LNC entry NULL");
       // In the future, we could have this thread look for a non-NULL value to copy from its
-      // right neighbour (up to the end of the first object).
+      // right neighbor (up to the end of the first object).
       if (last_card_of_cur_chunk < last_card_of_first_obj) {
         tty->print_cr(" LNC: BEWARE!!! first obj straddles past right end of chunk:\n"
-                      "   might be efficient to get value from right neighbour?");
+                      "   might be efficient to get value from right neighbor?");
       }
     })
   } else {
-    // In this case we can help our neighbour by just asking them
+    // In this case we can help our neighbor by just asking them
     // to stop at our first card (even though it may not be dirty).
     NOISY(tty->print_cr(" LNC: first block is not a non-array object; setting LNC to first card of current chunk");)
     assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -612,14 +612,13 @@
   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
                                       gch->rem_set()->klass_rem_set());
 
-  int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
+  int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache;
 
   par_scan_state.start_strong_roots();
   gch->gen_process_strong_roots(_gen->level(),
                                 true,  // Process younger gens, if any,
                                        // as strong roots.
                                 false, // no scope; this is parallel code
-                                true,  // is scavenging
                                 SharedHeap::ScanningOption(so),
                                 &par_scan_state.to_space_root_closure(),
                                 true,   // walk *all* scavengable nmethods
@@ -1071,7 +1070,7 @@
     size_policy->avg_survived()->sample(from()->used());
   }
 
-  // We need to use a monotonically non-deccreasing time in ms
+  // We need to use a monotonically non-decreasing time in ms
   // or we will see time-warp warnings and os::javaTimeMillis()
   // does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
@@ -1403,7 +1402,7 @@
 #ifndef PRODUCT
 // It's OK to call this multi-threaded;  the worst thing
 // that can happen is that we'll get a bunch of closely
-// spaced simulated oveflows, but that's OK, in fact
+// spaced simulated overflows, but that's OK, in fact
 // probably good as it would exercise the overflow code
 // under contention.
 bool ParNewGeneration::should_simulate_overflow() {
--- a/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -118,8 +118,8 @@
 
 
 // Make checks on the current sizes of the generations and
-// the contraints on the sizes of the generations.  Push
-// up the boundary within the contraints.  A partial
+// the constraints on the sizes of the generations.  Push
+// up the boundary within the constraints.  A partial
 // push can occur.
 void AdjoiningGenerations::request_old_gen_expansion(size_t expand_in_bytes) {
   assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
--- a/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -69,7 +69,7 @@
   // the available space and attempt to move the boundary if more space
   // is needed.  The growth is not guaranteed to occur.
   void adjust_boundary_for_old_gen_needs(size_t desired_change_in_bytes);
-  // Similary for a growth of the young generation.
+  // Similarly for a growth of the young generation.
   void adjust_boundary_for_young_gen_needs(size_t eden_size, size_t survivor_size);
 
   // Return the total byte size of the reserved space
--- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -65,7 +65,7 @@
   }
 };
 
-// Checks all objects for the existance of some type of mark,
+// Checks all objects for the existence of some type of mark,
 // precise or imprecise, dirty or newgen.
 class CheckForUnmarkedObjects : public ObjectClosure {
  private:
@@ -84,7 +84,7 @@
   }
 
   // Card marks are not precise. The current system can leave us with
-  // a mismash of precise marks and beginning of object marks. This means
+  // a mismatch of precise marks and beginning of object marks. This means
   // we test for missing precise marks first. If any are found, we don't
   // fail unless the object head is also unmarked.
   virtual void do_object(oop obj) {
--- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -202,12 +202,12 @@
     list->print("list:");
   }
   if (list->is_empty()) {
-    // Enqueuing the empty list: nothing to do.
+    // Enqueueing the empty list: nothing to do.
     return;
   }
   uint list_length = list->length();
   if (is_empty()) {
-    // Enqueuing to empty list: just acquire elements.
+    // Enqueueing to empty list: just acquire elements.
     set_insert_end(list->insert_end());
     set_remove_end(list->remove_end());
     set_length(list_length);
--- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -303,7 +303,7 @@
 // load balancing (i.e., over partitioning).  The last task to be
 // executed by a GC thread in a job is a work stealing task.  A
 // GC  thread that gets a work stealing task continues to execute
-// that task until the job is done.  In the static number of GC theads
+// that task until the job is done.  In the static number of GC threads
 // case, tasks are added to a queue (FIFO).  The work stealing tasks are
 // the last to be added.  Once the tasks are added, the GC threads grab
 // a task and go.  A single thread can do all the non-work stealing tasks
--- a/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -139,11 +139,6 @@
       return true;
     }
   }
-  // No object starts in this slice; verify this using
-  // more traditional methods:  Note that no object can
-  // start before the start_addr.
-  assert(end_addr == start_addr ||
-         object_start(end_addr - 1) <= start_addr,
-         "Oops an object does start in this slice?");
+
   return false;
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -488,6 +488,10 @@
   return young_gen()->eden_space()->tlab_capacity(thr);
 }
 
+size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
+  return young_gen()->eden_space()->tlab_used(thr);
+}
+
 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
 }
@@ -673,7 +677,7 @@
 
 // Before delegating the resize to the young generation,
 // the reserved space for the young and old generations
-// may be changed to accomodate the desired resize.
+// may be changed to accommodate the desired resize.
 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
     size_t survivor_size) {
   if (UseAdaptiveGCBoundary) {
@@ -690,7 +694,7 @@
 
 // Before delegating the resize to the old generation,
 // the reserved space for the young and old generations
-// may be changed to accomodate the desired resize.
+// may be changed to accommodate the desired resize.
 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
   if (UseAdaptiveGCBoundary) {
     if (size_policy()->bytes_absorbed_from_eden() != 0) {
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -187,6 +187,7 @@
   bool supports_tlab_allocation() const { return true; }
 
   size_t tlab_capacity(Thread* thr) const;
+  size_t tlab_used(Thread* thr) const;
   size_t unsafe_max_tlab_alloc(Thread* thr) const;
 
   // Can a compiler initialize a new object without store barriers?
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -45,7 +45,7 @@
 // the do_it() method of a ThreadRootsMarkingTask is executed, it
 // starts marking from the thread's roots.
 //
-// The enqueuing of the MarkFromRootsTask and ThreadRootsMarkingTask
+// The enqueueing of the MarkFromRootsTask and ThreadRootsMarkingTask
 // do little more than create the task and put it on a queue.  The
 // queue is a GCTaskQueue and threads steal tasks from this GCTaskQueue.
 //
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
@@ -76,6 +77,38 @@
   _old_gen_policy_is_ready = false;
 }
 
+size_t PSAdaptiveSizePolicy::calculate_free_based_on_live(size_t live, uintx ratio_as_percentage) {
+  // We want to calculate how much free memory there can be based on the
+  // amount of live data currently in the old gen. Using the formula:
+  // ratio * (free + live) = free
+  // Some equation solving later we get:
+  // free = (live * ratio) / (1 - ratio)
+
+  const double ratio = ratio_as_percentage / 100.0;
+  const double ratio_inverse = 1.0 - ratio;
+  const double tmp = live * ratio;
+  size_t free = (size_t)(tmp / ratio_inverse);
+
+  return free;
+}
+
+size_t PSAdaptiveSizePolicy::calculated_old_free_size_in_bytes() const {
+  size_t free_size = (size_t)(_promo_size + avg_promoted()->padded_average());
+  size_t live = ParallelScavengeHeap::heap()->old_gen()->used_in_bytes();
+
+  if (MinHeapFreeRatio != 0) {
+    size_t min_free = calculate_free_based_on_live(live, MinHeapFreeRatio);
+    free_size = MAX2(free_size, min_free);
+  }
+
+  if (MaxHeapFreeRatio != 100) {
+    size_t max_free = calculate_free_based_on_live(live, MaxHeapFreeRatio);
+    free_size = MIN2(max_free, free_size);
+  }
+
+  return free_size;
+}
+
 void PSAdaptiveSizePolicy::major_collection_begin() {
   // Update the interval time
   _major_timer.stop();
@@ -482,7 +515,7 @@
   //   adjust down the total heap size.  Adjust down the larger of the
   //   generations.
 
-  // Add some checks for a threshhold for a change.  For example,
+  // Add some checks for a threshold for a change.  For example,
   // a change less than the necessary alignment is probably not worth
   // attempting.
 
@@ -1161,7 +1194,7 @@
     // We use the tenuring threshold to equalize the cost of major
     // and minor collections.
     // ThresholdTolerance is used to indicate how sensitive the
-    // tenuring threshold is to differences in cost betweent the
+    // tenuring threshold is to differences in cost between the
     // collection types.
 
     // Get the times of interest. This involves a little work, so
@@ -1292,3 +1325,18 @@
                           st,
                           PSScavenge::tenuring_threshold());
 }
+
+#ifndef PRODUCT
+
+void TestOldFreeSpaceCalculation_test() {
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 20) == 25, "Calculation of free memory failed");
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 50) == 100, "Calculation of free memory failed");
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 60) == 150, "Calculation of free memory failed");
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 75) == 300, "Calculation of free memory failed");
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 20) == 100, "Calculation of free memory failed");
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 50) == 400, "Calculation of free memory failed");
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 60) == 600, "Calculation of free memory failed");
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 75) == 1200, "Calculation of free memory failed");
+}
+
+#endif /* !PRODUCT */
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -37,7 +37,7 @@
 //
 // It also computes an optimal tenuring threshold between the young
 // and old generations, so as to equalize the cost of collections
-// of those generations, as well as optimial survivor space sizes
+// of those generations, as well as optimal survivor space sizes
 // for the young generation.
 //
 // While this class is specifically intended for a generational system
@@ -113,7 +113,7 @@
   // Changing the generation sizing depends on the data that is
   // gathered about the effects of changes on the pause times and
   // throughput.  These variable count the number of data points
-  // gathered.  The policy may use these counters as a threshhold
+  // gathered.  The policy may use these counters as a threshold
   // for reliable data.
   julong _young_gen_change_for_major_pause_count;
 
@@ -240,7 +240,6 @@
   void major_collection_begin();
   void major_collection_end(size_t amount_live, GCCause::Cause gc_cause);
 
-  //
   void tenured_allocation(size_t size) {
     _avg_pretenured->sample(size);
   }
@@ -248,9 +247,9 @@
   // Accessors
   // NEEDS_CLEANUP   should use sizes.hpp
 
-  size_t calculated_old_free_size_in_bytes() const {
-    return (size_t)(_promo_size + avg_promoted()->padded_average());
-  }
+  static size_t calculate_free_based_on_live(size_t live, uintx ratio_as_percentage);
+
+  size_t calculated_old_free_size_in_bytes() const;
 
   size_t average_old_live_in_bytes() const {
     return (size_t) avg_old_live()->average();
--- a/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -195,7 +195,7 @@
 
   // Update all the counters that can be updated from the size policy.
   // This should be called after all policy changes have been made
-  // and reflected internall in the size policy.
+  // and reflected internally in the size policy.
   void update_counters_from_policy();
 
   // Update counters that can be updated from fields internal to the
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -661,7 +661,7 @@
 }
 
 jlong PSMarkSweep::millis_since_last_gc() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   jlong ret_val = now - _time_of_last_gc;
@@ -674,7 +674,7 @@
 }
 
 void PSMarkSweep::reset_millis_since_last_gc() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -280,7 +280,7 @@
         "Should be true before post_resize()");
       MemRegion mangle_region(object_space()->end(), virtual_space_high);
       // Note that the object space has not yet been updated to
-      // coincede with the new underlying virtual space.
+      // coincide with the new underlying virtual space.
       SpaceMangler::mangle_region(mangle_region);
     }
     post_resize();
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -187,7 +187,7 @@
 
   void space_invariants() PRODUCT_RETURN;
 
-  // Performace Counter support
+  // Performance Counter support
   void update_counters();
 
   // Printing support
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -2176,7 +2176,7 @@
 
     heap->resize_all_tlabs();
 
-    // Resize the metaspace capactiy after a collection
+    // Resize the metaspace capacity after a collection
     MetaspaceGC::compute_new_size();
 
     if (TraceGen1Time) accumulated_time()->stop();
@@ -3285,7 +3285,7 @@
 }
 
 jlong PSParallelCompact::millis_since_last_gc() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   jlong ret_val = now - _time_of_last_gc;
@@ -3298,7 +3298,7 @@
 }
 
 void PSParallelCompact::reset_millis_since_last_gc() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -877,7 +877,7 @@
 // The summary phase calculates the total live data to the left of each region
 // XXX.  Based on that total and the bottom of the space, it can calculate the
 // starting location of the live data in XXX.  The summary phase calculates for
-// each region XXX quantites such as
+// each region XXX quantities such as
 //
 //      - the amount of live data at the beginning of a region from an object
 //        entering the region.
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -78,7 +78,7 @@
   // Returns a subregion containing all objects in this space.
   MemRegion used_region()            { return MemRegion(bottom(), top()); }
 
-  // Boolean querries.
+  // Boolean queries.
   bool is_empty() const              { return used() == 0; }
   bool not_empty() const             { return used() > 0; }
   bool contains(const void* p) const { return _bottom <= p && p < _end; }
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -529,8 +529,19 @@
           counters->update_survivor_overflowed(_survivor_overflow);
         }
 
+        size_t max_young_size = young_gen->max_size();
+
+        // Deciding a free ratio in the young generation is tricky, so if
+        // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating
+        // that the old generation size may have been limited because of them) we
+        // should then limit our young generation size using NewRatio to have it
+        // follow the old generation size.
+        if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) {
+          max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size());
+        }
+
         size_t survivor_limit =
-          size_policy->max_survivor_size(young_gen->max_size());
+          size_policy->max_survivor_size(max_young_size);
         _tenuring_threshold =
           size_policy->compute_survivor_space_size_and_threshold(
                                                            _survivor_overflow,
@@ -553,12 +564,11 @@
         // Do call at minor collections?
         // Don't check if the size_policy is ready at this
         // level.  Let the size_policy check that internally.
-        if (UseAdaptiveSizePolicy &&
-            UseAdaptiveGenerationSizePolicyAtMinorCollection &&
+        if (UseAdaptiveGenerationSizePolicyAtMinorCollection &&
             ((gc_cause != GCCause::_java_lang_system_gc) ||
               UseAdaptiveSizePolicyWithSystemGC)) {
 
-          // Calculate optimial free space amounts
+          // Calculate optimal free space amounts
           assert(young_gen->max_size() >
             young_gen->from_space()->capacity_in_bytes() +
             young_gen->to_space()->capacity_in_bytes(),
@@ -568,7 +578,7 @@
           size_t eden_live = young_gen->eden_space()->used_in_bytes();
           size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
           size_t max_old_gen_size = old_gen->max_gen_size();
-          size_t max_eden_size = young_gen->max_size() -
+          size_t max_eden_size = max_young_size -
             young_gen->from_space()->capacity_in_bytes() -
             young_gen->to_space()->capacity_in_bytes();
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -35,7 +35,7 @@
 class PSVirtualSpace : public CHeapObj<mtGC> {
   friend class VMStructs;
  protected:
-  // The space is committed/uncommited in chunks of size _alignment.  The
+  // The space is committed/uncommitted in chunks of size _alignment.  The
   // ReservedSpace passed to initialize() must be aligned to this value.
   const size_t _alignment;
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -136,7 +136,7 @@
     // generation - the less space committed, the smaller the survivor
     // space, possibly as small as an alignment. However, we are interested
     // in the case where the young generation is 100% committed, as this
-    // is the point where eden reachs its maximum size. At this point,
+    // is the point where eden reaches its maximum size. At this point,
     // the size of a survivor space is max_survivor_size.
     max_eden_size = size - 2 * max_survivor_size;
   }
@@ -288,7 +288,7 @@
   // There used to be this guarantee there.
   // guarantee ((eden_size + 2*survivor_size)  <= _max_gen_size, "incorrect input arguments");
   // Code below forces this requirement.  In addition the desired eden
-  // size and disired survivor sizes are desired goals and may
+  // size and desired survivor sizes are desired goals and may
   // exceed the total generation size.
 
   assert(min_gen_size() <= orig_size && orig_size <= max_size(), "just checking");
--- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -127,7 +127,7 @@
   void adjust_pointers();
   void compact();
 
-  // Called during/after gc
+  // Called during/after GC
   void swap_spaces();
 
   // Resize generation using suggested free space size and survivor size
@@ -146,14 +146,14 @@
   size_t free_in_words() const;
 
   // The max this generation can grow to
-  size_t max_size() const            { return _reserved.byte_size(); }
+  size_t max_size() const { return _reserved.byte_size(); }
 
   // The max this generation can grow to if the boundary between
   // the generations are allowed to move.
   size_t gen_size_limit() const { return _max_gen_size; }
 
   bool is_maximal_no_gc() const {
-    return true;  // never expands except at a GC
+    return true;  // Never expands except at a GC
   }
 
   // Allocation
--- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -121,7 +121,7 @@
 
   // Choose a number of GC threads based on the current size
   // of the heap.  This may be complicated because the size of
-  // the heap depends on factors such as the thoughput goal.
+  // the heap depends on factors such as the throughput goal.
   // Still a large heap should be collected by more GC threads.
   active_workers_by_heap_size =
       MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
@@ -168,7 +168,7 @@
 
   if (TraceDynamicGCThreads) {
      gclog_or_tty->print_cr("GCTaskManager::calc_default_active_workers() : "
-       "active_workers(): %d  new_acitve_workers: %d  "
+       "active_workers(): %d  new_active_workers: %d  "
        "prev_active_workers: %d\n"
        " active_workers_by_JT: %d  active_workers_by_heap_size: %d",
        active_workers, new_active_workers, prev_active_workers,
@@ -445,7 +445,7 @@
   // into account (i.e., don't trigger if the amount of free
   // space has suddenly jumped up).  If the current is much
   // higher than the average, use the average since it represents
-  // the longer term behavor.
+  // the longer term behavior.
   const size_t live_in_eden =
     MIN2(eden_live, (size_t) avg_eden_live()->average());
   const size_t free_in_eden = max_eden_size > live_in_eden ?
--- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -74,7 +74,7 @@
   };
 
   // Goal for the fraction of the total time during which application
-  // threads run.
+  // threads run
   const double _throughput_goal;
 
   // Last calculated sizes, in bytes, and aligned
@@ -83,21 +83,21 @@
 
   size_t _survivor_size;    // calculated survivor size in bytes
 
-  // This is a hint for the heap:  we've detected that gc times
+  // This is a hint for the heap:  we've detected that GC times
   // are taking longer than GCTimeLimit allows.
   bool _gc_overhead_limit_exceeded;
   // Use for diagnostics only.  If UseGCOverheadLimit is false,
   // this variable is still set.
   bool _print_gc_overhead_limit_would_be_exceeded;
   // Count of consecutive GC that have exceeded the
-  // GC time limit criterion.
+  // GC time limit criterion
   uint _gc_overhead_limit_count;
   // This flag signals that GCTimeLimit is being exceeded
-  // but may not have done so for the required number of consequetive
-  // collections.
+  // but may not have done so for the required number of consecutive
+  // collections
 
   // Minor collection timers used to determine both
-  // pause and interval times for collections.
+  // pause and interval times for collections
   static elapsedTimer _minor_timer;
 
   // Major collection timers, used to determine both
@@ -120,7 +120,7 @@
   // Statistics for survivor space calculation for young generation
   AdaptivePaddedAverage*   _avg_survived;
 
-  // Objects that have been directly allocated in the old generation.
+  // Objects that have been directly allocated in the old generation
   AdaptivePaddedNoZeroDevAverage*   _avg_pretenured;
 
   // Variable for estimating the major and minor pause times.
@@ -142,33 +142,33 @@
   // for making ergonomic decisions.
   double _latest_minor_mutator_interval_seconds;
 
-  // Allowed difference between major and minor gc times, used
-  // for computing tenuring_threshold.
+  // Allowed difference between major and minor GC times, used
+  // for computing tenuring_threshold
   const double _threshold_tolerance_percent;
 
-  const double _gc_pause_goal_sec; // goal for maximum gc pause
+  const double _gc_pause_goal_sec; // Goal for maximum GC pause
 
   // Flag indicating that the adaptive policy is ready to use
   bool _young_gen_policy_is_ready;
 
-  // decrease/increase the young generation for minor pause time
+  // Decrease/increase the young generation for minor pause time
   int _change_young_gen_for_min_pauses;
 
-  // decrease/increase the old generation for major pause time
+  // Decrease/increase the old generation for major pause time
   int _change_old_gen_for_maj_pauses;
 
-  //   change old geneneration for throughput
+  //   change old generation for throughput
   int _change_old_gen_for_throughput;
 
   //   change young generation for throughput
   int _change_young_gen_for_throughput;
 
   // Flag indicating that the policy would
-  //   increase the tenuring threshold because of the total major gc cost
-  //   is greater than the total minor gc cost
+  //   increase the tenuring threshold because of the total major GC cost
+  //   is greater than the total minor GC cost
   bool _increment_tenuring_threshold_for_gc_cost;
-  //   decrease the tenuring threshold because of the the total minor gc
-  //   cost is greater than the total major gc cost
+  //   decrease the tenuring threshold because of the the total minor GC
+  //   cost is greater than the total major GC cost
   bool _decrement_tenuring_threshold_for_gc_cost;
   //   decrease due to survivor size limit
   bool _decrement_tenuring_threshold_for_survivor_limit;
@@ -182,7 +182,7 @@
   // Changing the generation sizing depends on the data that is
   // gathered about the effects of changes on the pause times and
   // throughput.  These variable count the number of data points
-  // gathered.  The policy may use these counters as a threshhold
+  // gathered.  The policy may use these counters as a threshold
   // for reliable data.
   julong _young_gen_change_for_minor_throughput;
   julong _old_gen_change_for_major_throughput;
@@ -225,7 +225,7 @@
   // larger than 1.0 if just the sum of the minor cost the
   // the major cost is used.  Worse than that is the
   // fact that the minor cost and the major cost each
-  // tend toward 1.0 in the extreme of high gc costs.
+  // tend toward 1.0 in the extreme of high GC costs.
   // Limit the value of gc_cost to 1.0 so that the mutator
   // cost stays non-negative.
   virtual double gc_cost() const {
@@ -238,23 +238,23 @@
   virtual double time_since_major_gc() const;
 
   // Average interval between major collections to be used
-  // in calculating the decaying major gc cost.  An overestimate
+  // in calculating the decaying major GC cost.  An overestimate
   // of this time would be a conservative estimate because
   // this time is used to decide if the major GC cost
   // should be decayed (i.e., if the time since the last
-  // major gc is long compared to the time returned here,
+  // major GC is long compared to the time returned here,
   // then the major GC cost will be decayed).  See the
   // implementations for the specifics.
   virtual double major_gc_interval_average_for_decay() const {
     return _avg_major_interval->average();
   }
 
-  // Return the cost of the GC where the major gc cost
+  // Return the cost of the GC where the major GC cost
   // has been decayed based on the time since the last
   // major collection.
   double decaying_gc_cost() const;
 
-  // Decay the major gc cost.  Use this only for decisions on
+  // Decay the major GC cost.  Use this only for decisions on
   // whether to adjust, not to determine by how much to adjust.
   // This approximation is crude and may not be good enough for the
   // latter.
--- a/src/share/vm/gc_implementation/shared/allocationStats.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/allocationStats.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -49,11 +49,11 @@
   // estimates.
   AdaptivePaddedAverage _demand_rate_estimate;
 
-  ssize_t     _desired;         // Demand stimate computed as described above
+  ssize_t     _desired;          // Demand estimate computed as described above
   ssize_t     _coal_desired;     // desired +/- small-percent for tuning coalescing
 
-  ssize_t     _surplus;         // count - (desired +/- small-percent),
-                                // used to tune splitting in best fit
+  ssize_t     _surplus;          // count - (desired +/- small-percent),
+                                 // used to tune splitting in best fit
   ssize_t     _bfr_surp;         // surplus at start of current sweep
   ssize_t     _prev_sweep;       // count from end of previous sweep
   ssize_t     _before_sweep;     // count from before current sweep
--- a/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -31,6 +31,7 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
+#include "runtime/os.hpp"
 
 // CopyrightVersion 1.2
 
@@ -54,7 +55,7 @@
 void ConcurrentGCThread::create_and_start() {
   if (os::create_thread(this, os::cgc_thread)) {
     // XXX: need to set this to low priority
-    // unless "agressive mode" set; priority
+    // unless "aggressive mode" set; priority
     // should be just less than that of VMThread.
     os::set_priority(this, NearMaxPriority);
     if (!_should_terminate && !DisableStartThread) {
@@ -206,7 +207,7 @@
     // exceptions anyway, check and abort if this fails.
     if (res == NULL || res->osthread() == NULL) {
       vm_exit_during_initialization("java.lang.OutOfMemoryError",
-                                    "unable to create new native thread");
+                                    os::native_thread_creation_failed_msg());
     }
     java_lang_Thread::set_thread(thread_oop(), res);
     java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
--- a/src/share/vm/gc_implementation/shared/gcUtil.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/gcUtil.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -159,7 +159,7 @@
 // that no calculation of the slope has yet been done.  Returning true
 // for a slope equal to 0 reflects the intuitive expectation of the
 // dependence on the slope.  Don't use the complement of these functions
-// since that untuitive expectation is not built into the complement.
+// since that intuitive expectation is not built into the complement.
 bool LinearLeastSquareFit::decrement_will_decrease() {
   return (_slope >= 0.00);
 }
--- a/src/share/vm/gc_implementation/shared/gcUtil.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/gcUtil.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -210,7 +210,7 @@
   double y(double x);
   double slope() { return _slope; }
   // Methods to decide if a change in the dependent variable will
-  // achive a desired goal.  Note that these methods are not
+  // achieve a desired goal.  Note that these methods are not
   // complementary and both are needed.
   bool decrement_will_decrease();
   bool increment_will_decrease();
--- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -72,7 +72,7 @@
 #endif  // NOT_PRODUCT
 
 // There may be unallocated holes in the middle chunks
-// that should be filled with dead objects to ensure parseability.
+// that should be filled with dead objects to ensure parsability.
 void MutableNUMASpace::ensure_parsability() {
   for (int i = 0; i < lgrp_spaces()->length(); i++) {
     LGRPSpace *ls = lgrp_spaces()->at(i);
@@ -173,6 +173,26 @@
   return lgrp_spaces()->at(i)->space()->capacity_in_bytes();
 }
 
+size_t MutableNUMASpace::tlab_used(Thread *thr) const {
+  // Please see the comments for tlab_capacity().
+  guarantee(thr != NULL, "No thread");
+  int lgrp_id = thr->lgrp_id();
+  if (lgrp_id == -1) {
+    if (lgrp_spaces()->length() > 0) {
+      return (used_in_bytes()) / lgrp_spaces()->length();
+    } else {
+      assert(false, "There should be at least one locality group");
+      return 0;
+    }
+  }
+  int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
+  if (i == -1) {
+    return 0;
+  }
+  return lgrp_spaces()->at(i)->space()->used_in_bytes();
+}
+
+
 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
   // Please see the comments for tlab_capacity().
   guarantee(thr != NULL, "No thread");
@@ -539,7 +559,7 @@
                                   bool clear_space,
                                   bool mangle_space,
                                   bool setup_pages) {
-  assert(clear_space, "Reallocation will destory data!");
+  assert(clear_space, "Reallocation will destroy data!");
   assert(lgrp_spaces()->length() > 0, "There should be at least one space");
 
   MemRegion old_region = region(), new_region;
@@ -880,8 +900,8 @@
 }
 
 void MutableNUMASpace::verify() {
-  // This can be called after setting an arbitary value to the space's top,
-  // so an object can cross the chunk boundary. We ensure the parsablity
+  // This can be called after setting an arbitrary value to the space's top,
+  // so an object can cross the chunk boundary. We ensure the parsability
   // of the space and just walk the objects in linear fashion.
   ensure_parsability();
   MutableSpace::verify();
--- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -217,6 +217,7 @@
   using MutableSpace::capacity_in_words;
   virtual size_t capacity_in_words(Thread* thr) const;
   virtual size_t tlab_capacity(Thread* thr) const;
+  virtual size_t tlab_used(Thread* thr) const;
   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
 
   // Allocation (return NULL if full)
--- a/src/share/vm/gc_implementation/shared/mutableSpace.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/mutableSpace.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -31,7 +31,7 @@
 
 // A MutableSpace is a subtype of ImmutableSpace that supports the
 // concept of allocation. This includes the concepts that a space may
-// be only partially full, and the querry methods that go with such
+// be only partially full, and the query methods that go with such
 // an assumption. MutableSpace is also responsible for minimizing the
 // page allocation time by having the memory pretouched (with
 // AlwaysPretouch) and for optimizing page placement on NUMA systems
@@ -111,7 +111,7 @@
 
   virtual void mangle_region(MemRegion mr) PRODUCT_RETURN;
 
-  // Boolean querries.
+  // Boolean queries.
   bool is_empty() const              { return used_in_words() == 0; }
   bool not_empty() const             { return used_in_words() > 0; }
   bool contains(const void* p) const { return _bottom <= p && p < _end; }
@@ -124,6 +124,7 @@
   virtual size_t used_in_words() const                    { return pointer_delta(top(), bottom()); }
   virtual size_t free_in_words() const                    { return pointer_delta(end(),    top()); }
   virtual size_t tlab_capacity(Thread* thr) const         { return capacity_in_bytes();            }
+  virtual size_t tlab_used(Thread* thr) const             { return used_in_bytes();                }
   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const { return free_in_bytes();                }
 
   // Allocation (return NULL if full)
--- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -89,6 +89,10 @@
 // scavenge; it clears the sensor accumulators.
 void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
   assert(ResizePLAB, "Not set");
+
+  assert(is_object_aligned(max_size()) && min_size() <= max_size(),
+         "PLAB clipping computation may be incorrect");
+
   if (_allocated == 0) {
     assert(_unused == 0,
            err_msg("Inconsistency in PLAB stats: "
@@ -152,7 +156,7 @@
 
 // The buffer comes with its own BOT, with a shared (obviously) underlying
 // BlockOffsetSharedArray. We manipulate this BOT in the normal way
-// as we would for any contiguous space. However, on accasion we
+// as we would for any contiguous space. However, on occasion we
 // need to do some buffer surgery at the extremities before we
 // start using the body of the buffer for allocations. Such surgery
 // (as explained elsewhere) is to prevent allocation on a card that
--- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -92,7 +92,7 @@
   }
 
   // The total (word) size of the buffer, including both allocated and
-  // unallocted space.
+  // unallocated space.
   size_t word_sz() { return _word_sz; }
 
   // Should only be done if we are about to reset with a new buffer of the
@@ -181,16 +181,7 @@
     _used(0),
     _desired_plab_sz(desired_plab_sz_),
     _filter(wt)
-  {
-    size_t min_sz = min_size();
-    size_t max_sz = max_size();
-    size_t aligned_min_sz = align_object_size(min_sz);
-    size_t aligned_max_sz = align_object_size(max_sz);
-    assert(min_sz <= aligned_min_sz && max_sz >= aligned_max_sz &&
-           min_sz <= max_sz,
-           "PLAB clipping computation in adjust_desired_plab_sz()"
-           " may be incorrect");
-  }
+  { }
 
   static const size_t min_size() {
     return ParGCAllocBuffer::min_size();
--- a/src/share/vm/gc_implementation/shared/spaceDecorator.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/spaceDecorator.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -75,7 +75,7 @@
 
   // High water mark for allocations.  Typically, the space above
   // this point have been mangle previously and don't need to be
-  // touched again.  Space belows this point has been allocated
+  // touched again.  Space below this point has been allocated
   // and remangling is needed between the current top and this
   // high water mark.
   HeapWord* _top_for_allocations;
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -56,6 +56,7 @@
 #else /* USDT2 */
   HOTSPOT_GC_BEGIN(
                    full);
+  HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
 #endif /* USDT2 */
 }
 
@@ -64,8 +65,8 @@
   HS_DTRACE_PROBE(hotspot, gc__end);
   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
 #else /* USDT2 */
-  HOTSPOT_GC_END(
-);
+  HOTSPOT_GC_END();
+  HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
 #endif /* USDT2 */
 }
 
@@ -82,7 +83,7 @@
 
 // Allocations may fail in several threads at about the same time,
 // resulting in multiple gc requests.  We only want to do one of them.
-// In case a GC locker is active and the need for a GC is already signalled,
+// In case a GC locker is active and the need for a GC is already signaled,
 // we want to skip this GC attempt altogether, without doing a futile
 // safepoint operation.
 bool VM_GC_Operation::skip_operation() const {
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -320,6 +320,21 @@
   assert(thread->deferred_card_mark().is_empty(), "invariant");
 }
 
+size_t CollectedHeap::max_tlab_size() const {
+  // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
+  // This restriction could be removed by enabling filling with multiple arrays.
+  // If we compute that the reasonable way as
+  //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
+  // we'll overflow on the multiply, so we do the divide first.
+  // We actually lose a little by dividing first,
+  // but that just makes the TLAB  somewhat smaller than the biggest array,
+  // which is fine, since we'll be able to fill that.
+  size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
+              sizeof(jint) *
+              ((juint) max_jint / (size_t) HeapWordSize);
+  return align_size_down(max_int_size, MinObjAlignment);
+}
+
 // Helper for ReduceInitialCardMarks. For performance,
 // compiled code may elide card-marks for initializing stores
 // to a newly allocated object along the fast-path. We
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -394,14 +394,16 @@
   // the following methods:
   // Returns "true" iff the heap supports thread-local allocation buffers.
   // The default is "no".
-  virtual bool supports_tlab_allocation() const {
-    return false;
-  }
+  virtual bool supports_tlab_allocation() const = 0;
+
   // The amount of space available for thread-local allocation buffers.
-  virtual size_t tlab_capacity(Thread *thr) const {
-    guarantee(false, "thread-local allocation buffers not supported");
-    return 0;
-  }
+  virtual size_t tlab_capacity(Thread *thr) const = 0;
+
+  // The amount of used space for thread-local allocation buffers for the given thread.
+  virtual size_t tlab_used(Thread *thr) const = 0;
+
+  virtual size_t max_tlab_size() const;
+
   // An estimate of the maximum allocation that could be performed
   // for thread-local allocation buffers without triggering any
   // collection or expansion activity.
--- a/src/share/vm/gc_interface/gcCause.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/gc_interface/gcCause.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -31,7 +31,7 @@
 // This class exposes implementation details of the various
 // collector(s), and we need to be very careful with it. If
 // use of this class grows, we should split it into public
-// and implemenation-private "causes".
+// and implementation-private "causes".
 //
 
 class GCCause : public AllStatic {
--- a/src/share/vm/interpreter/linkResolver.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/interpreter/linkResolver.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -564,16 +564,7 @@
     }
   }
 
-  // 5. check if method is concrete
-  if (resolved_method->is_abstract() && !resolved_klass->is_abstract()) {
-    ResourceMark rm(THREAD);
-    THROW_MSG(vmSymbols::java_lang_AbstractMethodError(),
-              Method::name_and_sig_as_C_string(resolved_klass(),
-                                                      method_name,
-                                                      method_signature));
-  }
-
-  // 6. access checks, access checking may be turned off when calling from within the VM.
+  // 5. access checks, access checking may be turned off when calling from within the VM.
   if (check_access) {
     assert(current_klass.not_null() , "current_klass should not be null");
 
@@ -649,16 +640,6 @@
     }
   }
 
-  if (nostatics && resolved_method->is_static()) {
-    ResourceMark rm(THREAD);
-    char buf[200];
-    jio_snprintf(buf, sizeof(buf), "Expected instance not static method %s", Method::name_and_sig_as_C_string(resolved_klass(),
-                                                      resolved_method->name(),
-                                                      resolved_method->signature()));
-    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
-  }
-
-
   if (check_access) {
     // JDK8 adds non-public interface methods, and accessability check requirement
     assert(current_klass.not_null() , "current_klass should not be null");
@@ -702,6 +683,15 @@
     }
   }
 
+  if (nostatics && resolved_method->is_static()) {
+    ResourceMark rm(THREAD);
+    char buf[200];
+    jio_snprintf(buf, sizeof(buf), "Expected instance not static method %s",
+                 Method::name_and_sig_as_C_string(resolved_klass(),
+                 resolved_method->name(), resolved_method->signature()));
+    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+  }
+
   if (TraceItables && Verbose) {
     ResourceMark rm(THREAD);
     tty->print("invokeinterface resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
--- a/src/share/vm/interpreter/rewriter.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/interpreter/rewriter.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -250,8 +250,8 @@
     // We will reverse the bytecode rewriting _after_ adjusting them.
     // Adjust the cache index by offset to the invokedynamic entries in the
     // cpCache plus the delta if the invokedynamic bytecodes were adjusted.
-    cache_index = cp_cache_delta() + _first_iteration_cp_cache_limit;
-    int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index);
+    int adjustment = cp_cache_delta() + _first_iteration_cp_cache_limit;
+    int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index - adjustment);
     assert(_pool->tag_at(cp_index).is_invoke_dynamic(), "wrong index");
     // zero out 4 bytes
     Bytes::put_Java_u4(p, 0);
@@ -453,18 +453,7 @@
   return method;
 }
 
-void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
-  ResourceMark rm(THREAD);
-  Rewriter     rw(klass, klass->constants(), klass->methods(), CHECK);
-  // (That's all, folks.)
-}
-
-
-Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS)
-  : _klass(klass),
-    _pool(cpool),
-    _methods(methods)
-{
+void Rewriter::rewrite_bytecodes(TRAPS) {
   assert(_pool->cache() == NULL, "constant pool cache must not be set yet");
 
   // determine index maps for Method* rewriting
@@ -508,6 +497,29 @@
   // May have to fix invokedynamic bytecodes if invokestatic/InterfaceMethodref
   // entries had to be added.
   patch_invokedynamic_bytecodes();
+}
+
+void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
+  ResourceMark rm(THREAD);
+  Rewriter     rw(klass, klass->constants(), klass->methods(), CHECK);
+  // (That's all, folks.)
+}
+
+
+Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS)
+  : _klass(klass),
+    _pool(cpool),
+    _methods(methods)
+{
+
+  // Rewrite bytecodes - exception here exits.
+  rewrite_bytecodes(CHECK);
+
+  // Stress restoring bytecodes
+  if (StressRewriter) {
+    restore_bytecodes();
+    rewrite_bytecodes(CHECK);
+  }
 
   // allocate constant pool cache, now that we've seen all the bytecodes
   make_constant_pool_cache(THREAD);
@@ -523,6 +535,7 @@
   // so methods with jsrs in custom class lists in aren't attempted to be
   // rewritten in the RO section of the shared archive.
   // Relocated bytecodes don't have to be restored, only the cp cache entries
+  int len = _methods->length();
   for (int i = len-1; i >= 0; i--) {
     methodHandle m(THREAD, _methods->at(i));
 
--- a/src/share/vm/interpreter/rewriter.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/interpreter/rewriter.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -199,6 +199,9 @@
 
   void patch_invokedynamic_bytecodes();
 
+  // Do all the work.
+  void rewrite_bytecodes(TRAPS);
+
   // Revert bytecodes in case of an exception.
   void restore_bytecodes();
 
--- a/src/share/vm/memory/allocation.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/allocation.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -579,8 +579,8 @@
   bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; }
   bool allocated_on_C_heap()   const { return get_allocation_type() == C_HEAP; }
   bool allocated_on_arena()    const { return get_allocation_type() == ARENA; }
-  ResourceObj(); // default construtor
-  ResourceObj(const ResourceObj& r); // default copy construtor
+  ResourceObj(); // default constructor
+  ResourceObj(const ResourceObj& r); // default copy constructor
   ResourceObj& operator=(const ResourceObj& r); // default copy assignment
   ~ResourceObj();
 #endif // ASSERT
--- a/src/share/vm/memory/barrierSet.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/barrierSet.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -124,7 +124,7 @@
   virtual bool has_read_region_opt() = 0;
   virtual bool has_write_region_opt() = 0;
 
-  // These operations should assert false unless the correponding operation
+  // These operations should assert false unless the corresponding operation
   // above returns true.  Otherwise, they should perform an appropriate
   // barrier for an array whose elements are all in the given memory region.
   virtual void read_ref_array(MemRegion mr) = 0;
@@ -165,7 +165,7 @@
   // normally reserve space for such tables, and commit parts of the table
   // "covering" parts of the heap that are committed.  The constructor is
   // passed the maximum number of independently committable subregions to
-  // be covered, and the "resize_covoered_region" function allows the
+  // be covered, and the "resize_covered_region" function allows the
   // sub-parts of the heap to inform the barrier set of changes of their
   // sizes.
   BarrierSet(int max_covered_regions) :
--- a/src/share/vm/memory/binaryTreeDictionary.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/binaryTreeDictionary.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -56,7 +56,7 @@
 template <class Chunk_t, template <class> class FreeList_t>
 void TreeChunk<Chunk_t, FreeList_t>::verify_tree_chunk_list() const {
   TreeChunk<Chunk_t, FreeList_t>* nextTC = (TreeChunk<Chunk_t, FreeList_t>*)next();
-  if (prev() != NULL) { // interior list node shouldn'r have tree fields
+  if (prev() != NULL) { // interior list node shouldn't have tree fields
     guarantee(embedded_list()->parent() == NULL && embedded_list()->left() == NULL &&
               embedded_list()->right()  == NULL, "should be clear");
   }
@@ -247,7 +247,7 @@
     prevFC->link_after(nextTC);
   }
 
-  // Below this point the embeded TreeList<Chunk_t, FreeList_t> being used for the
+  // Below this point the embedded TreeList<Chunk_t, FreeList_t> being used for the
   // tree node may have changed. Don't use "this"
   // TreeList<Chunk_t, FreeList_t>*.
   // chunk should still be a free chunk (bit set in _prev)
@@ -703,7 +703,7 @@
     // The only use of this method would not pass the root of the
     // tree (as indicated by the assertion above that the tree list
     // has a parent) but the specification does not explicitly exclude the
-    // passing of the root so accomodate it.
+    // passing of the root so accommodate it.
     set_root(NULL);
   }
   debug_only(
@@ -1352,7 +1352,7 @@
 template <class Chunk_t, template <class> class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_tree() const {
   guarantee(root() == NULL || total_free_blocks() == 0 ||
-    total_size() != 0, "_total_size should't be 0?");
+    total_size() != 0, "_total_size shouldn't be 0?");
   guarantee(root() == NULL || root()->parent() == NULL, "_root shouldn't have parent");
   verify_tree_helper(root());
 }
--- a/src/share/vm/memory/binaryTreeDictionary.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/binaryTreeDictionary.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -322,7 +322,7 @@
   void       set_tree_hints(void);
   // Reset statistics for all the lists in the tree.
   void       clear_tree_census(void);
-  // Print the statistcis for all the lists in the tree.  Also may
+  // Print the statistics for all the lists in the tree.  Also may
   // print out summaries.
   void       print_dict_census(void) const;
   void       print_free_lists(outputStream* st) const;
--- a/src/share/vm/memory/blockOffsetTable.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/blockOffsetTable.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -590,7 +590,7 @@
 
   // Otherwise, find the block start using the table, but taking
   // care (cf block_start_unsafe() above) not to parse any objects/blocks
-  // on the cards themsleves.
+  // on the cards themselves.
   size_t index = _array->index_for(addr);
   assert(_array->address_for_index(index) == addr,
          "arg should be start of card");
--- a/src/share/vm/memory/blockOffsetTable.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/blockOffsetTable.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -424,7 +424,7 @@
     BlockOffsetArray(array, mr, false),
     _unallocated_block(_bottom) { }
 
-  // accessor
+  // Accessor
   HeapWord* unallocated_block() const {
     assert(BlockOffsetArrayUseUnallocatedBlock,
            "_unallocated_block is not being maintained");
--- a/src/share/vm/memory/cardTableModRefBS.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/cardTableModRefBS.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -54,8 +54,8 @@
 size_t CardTableModRefBS::compute_byte_map_size()
 {
   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
-                                        "unitialized, check declaration order");
-  assert(_page_size != 0, "unitialized, check declaration order");
+                                        "uninitialized, check declaration order");
+  assert(_page_size != 0, "uninitialized, check declaration order");
   const size_t granularity = os::vm_allocation_granularity();
   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
 }
@@ -98,7 +98,7 @@
                                   "card marking array");
   }
 
-  // The assember store_check code will do an unsigned shift of the oop,
+  // The assembler store_check code will do an unsigned shift of the oop,
   // then add it to byte_map_base, i.e.
   //
   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
@@ -243,7 +243,7 @@
   if (new_region.word_size() != old_region.word_size()) {
     // Commit new or uncommit old pages, if necessary.
     MemRegion cur_committed = _committed[ind];
-    // Extend the end of this _commited region
+    // Extend the end of this _committed region
     // to cover the end of any lower _committed regions.
     // This forms overlapping regions, but never interior regions.
     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
@@ -448,7 +448,7 @@
     // off parallelism is used, then active_workers can be used in
     // place of n_par_threads.
     //  This is an example of a path where n_par_threads is
-    // set to 0 to turn off parallism.
+    // set to 0 to turn off parallelism.
     //  [7] CardTableModRefBS::non_clean_card_iterate()
     //  [8] CardTableRS::younger_refs_in_space_iterate()
     //  [9] Generation::younger_refs_in_space_iterate()
--- a/src/share/vm/memory/cardTableRS.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/cardTableRS.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -590,7 +590,7 @@
       // Then, the case analysis above reveals that, in the worst case,
       // any such stale card will be scanned unnecessarily at most twice.
       //
-      // It is nonethelss advisable to try and get rid of some of this
+      // It is nonetheless advisable to try and get rid of some of this
       // redundant work in a subsequent (low priority) re-design of
       // the card-scanning code, if only to simplify the underlying
       // state machine analysis/proof. ysr 1/28/2002. XXX
--- a/src/share/vm/memory/cardTableRS.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/cardTableRS.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -105,8 +105,6 @@
   ~CardTableRS();
 
   // *** GenRemSet functions.
-  GenRemSet::Name rs_kind() { return GenRemSet::CardTable; }
-
   CardTableRS* as_CardTableRS() { return this; }
 
   CardTableModRefBS* ct_bs() { return _ct_bs; }
--- a/src/share/vm/memory/collectorPolicy.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/collectorPolicy.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -45,7 +45,7 @@
 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
 #endif // INCLUDE_ALL_GCS
 
-// CollectorPolicy methods.
+// CollectorPolicy methods
 
 CollectorPolicy::CollectorPolicy() :
     _space_alignment(0),
@@ -178,17 +178,14 @@
   // byte entry and the os page size is 4096, the maximum heap size should
   // be 512*4096 = 2MB aligned.
 
-  // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable
-  // is supported.
-  // Requirements of any new remembered set implementations must be added here.
-  size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
+  size_t alignment = GenRemSet::max_alignment_constraint();
 
   // Parallel GC does its own alignment of the generations to avoid requiring a
   // large page (256M on some platforms) for the permanent generation.  The
   // other collectors should also be updated to do their own alignment and then
   // this use of lcm() should be removed.
   if (UseLargePages && !UseParallelGC) {
-      // in presence of large pages we have to make sure that our
+      // In presence of large pages we have to make sure that our
       // alignment is large page aware
       alignment = lcm(os::large_page_size(), alignment);
   }
@@ -196,7 +193,7 @@
   return alignment;
 }
 
-// GenCollectorPolicy methods.
+// GenCollectorPolicy methods
 
 GenCollectorPolicy::GenCollectorPolicy() :
     _min_gen0_size(0),
@@ -378,10 +375,10 @@
     _initial_heap_byte_size = InitialHeapSize;
   }
 
-  // adjust max heap size if necessary
+  // Adjust NewSize and OldSize or MaxHeapSize to match each other
   if (NewSize + OldSize > MaxHeapSize) {
     if (_max_heap_size_cmdline) {
-      // somebody set a maximum heap size with the intention that we should not
+      // Somebody has set a maximum heap size with the intention that we should not
       // exceed it. Adjust New/OldSize as necessary.
       uintx calculated_size = NewSize + OldSize;
       double shrink_factor = (double) MaxHeapSize / calculated_size;
@@ -442,32 +439,32 @@
   // minimum gen0 sizes.
 
   if (_max_heap_byte_size == _min_heap_byte_size) {
-    // The maximum and minimum heap sizes are the same so
-    // the generations minimum and initial must be the
-    // same as its maximum.
+    // The maximum and minimum heap sizes are the same so the generations
+    // minimum and initial must be the same as its maximum.
     _min_gen0_size = max_new_size;
     _initial_gen0_size = max_new_size;
     _max_gen0_size = max_new_size;
   } else {
     size_t desired_new_size = 0;
-    if (!FLAG_IS_DEFAULT(NewSize)) {
-      // If NewSize is set ergonomically (for example by cms), it
-      // would make sense to use it.  If it is used, also use it
-      // to set the initial size.  Although there is no reason
-      // the minimum size and the initial size have to be the same,
-      // the current implementation gets into trouble during the calculation
-      // of the tenured generation sizes if they are different.
-      // Note that this makes the initial size and the minimum size
-      // generally small compared to the NewRatio calculation.
+    if (FLAG_IS_CMDLINE(NewSize)) {
+      // If NewSize is set on the command line, we must use it as
+      // the initial size and it also makes sense to use it as the
+      // lower limit.
       _min_gen0_size = NewSize;
       desired_new_size = NewSize;
       max_new_size = MAX2(max_new_size, NewSize);
+    } else if (FLAG_IS_ERGO(NewSize)) {
+      // If NewSize is set ergonomically, we should use it as a lower
+      // limit, but use NewRatio to calculate the initial size.
+      _min_gen0_size = NewSize;
+      desired_new_size =
+        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
+      max_new_size = MAX2(max_new_size, NewSize);
     } else {
       // For the case where NewSize is the default, use NewRatio
       // to size the minimum and initial generation sizes.
       // Use the default NewSize as the floor for these values.  If
-      // NewRatio is overly large, the resulting sizes can be too
-      // small.
+      // NewRatio is overly large, the resulting sizes can be too small.
       _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
       desired_new_size =
         MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
@@ -486,8 +483,7 @@
     _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);
 
     // At this point all three sizes have been checked against the
-    // maximum sizes but have not been checked for consistency
-    // among the three.
+    // maximum sizes but have not been checked for consistency among the three.
 
     // Final check min <= initial <= max
     _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
@@ -495,7 +491,7 @@
     _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
   }
 
-  // Write back to flags if necessary
+  // Write back to flags if necessary.
   if (NewSize != _initial_gen0_size) {
     FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size);
   }
@@ -541,7 +537,7 @@
 }
 
 // Minimum sizes of the generations may be different than
-// the initial sizes.  An inconsistently is permitted here
+// the initial sizes.  An inconsistency is permitted here
 // in the total size that can be specified explicitly by
 // command line specification of OldSize and NewSize and
 // also a command line specification of -Xms.  Issue a warning
@@ -553,12 +549,12 @@
   // At this point the minimum, initial and maximum sizes
   // of the overall heap and of gen0 have been determined.
   // The maximum gen1 size can be determined from the maximum gen0
-  // and maximum heap size since no explicit flags exits
+  // and maximum heap size since no explicit flags exist
   // for setting the gen1 maximum.
   _max_gen1_size = MAX2(_max_heap_byte_size - _max_gen0_size, _gen_alignment);
 
   // If no explicit command line flag has been set for the
-  // gen1 size, use what is left for gen1.
+  // gen1 size, use what is left for gen1
   if (!FLAG_IS_CMDLINE(OldSize)) {
     // The user has not specified any value but the ergonomics
     // may have chosen a value (which may or may not be consistent
@@ -570,14 +566,14 @@
     // _max_gen1_size has already been made consistent above
     FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size);
   } else {
-    // It's been explicitly set on the command line.  Use the
+    // OldSize has been explicitly set on the command line. Use the
     // OldSize and then determine the consequences.
     _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size);
     _initial_gen1_size = OldSize;
 
     // If the user has explicitly set an OldSize that is inconsistent
     // with other command line flags, issue a warning.
-    // The generation minimums and the overall heap mimimum should
+    // The generation minimums and the overall heap minimum should
     // be within one generation alignment.
     if ((_min_gen1_size + _min_gen0_size + _gen_alignment) < _min_heap_byte_size) {
       warning("Inconsistency between minimum heap size and minimum "
@@ -599,7 +595,7 @@
               _min_gen0_size, _initial_gen0_size, _max_gen0_size);
       }
     }
-    // Initial size
+    // The same as above for the old gen initial size.
     if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
                           _initial_heap_byte_size)) {
       if (PrintGCDetails && Verbose) {
@@ -609,10 +605,10 @@
       }
     }
   }
-  // Enforce the maximum gen1 size.
+
   _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);
 
-  // Check that min gen1 <= initial gen1 <= max gen1
+  // Make sure that min gen1 <= initial gen1 <= max gen1.
   _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
   _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
 
@@ -653,10 +649,9 @@
 
   HeapWord* result = NULL;
 
-  // Loop until the allocation is satisified,
-  // or unsatisfied after GC.
+  // Loop until the allocation is satisfied, or unsatisfied after GC.
   for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
-    HandleMark hm; // discard any handles allocated in each iteration
+    HandleMark hm; // Discard any handles allocated in each iteration.
 
     // First allocation attempt is lock-free.
     Generation *gen0 = gch->get_gen(0);
@@ -669,7 +664,7 @@
         return result;
       }
     }
-    unsigned int gc_count_before;  // read inside the Heap_lock locked region
+    unsigned int gc_count_before;  // Read inside the Heap_lock locked region.
     {
       MutexLocker ml(Heap_lock);
       if (PrintGC && Verbose) {
@@ -688,19 +683,19 @@
 
       if (GC_locker::is_active_and_needs_gc()) {
         if (is_tlab) {
-          return NULL;  // Caller will retry allocating individual object
+          return NULL;  // Caller will retry allocating individual object.
         }
         if (!gch->is_maximal_no_gc()) {
-          // Try and expand heap to satisfy request
+          // Try and expand heap to satisfy request.
           result = expand_heap_and_allocate(size, is_tlab);
-          // result could be null if we are out of space
+          // Result could be null if we are out of space.
           if (result != NULL) {
             return result;
           }
         }
 
         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
-          return NULL; // we didn't get to do a GC and we didn't get any memory
+          return NULL; // We didn't get to do a GC and we didn't get any memory.
         }
 
         // If this thread is not in a jni critical section, we stall
@@ -735,7 +730,7 @@
       result = op.result();
       if (op.gc_locked()) {
          assert(result == NULL, "must be NULL if gc_locked() is true");
-         continue;  // retry and/or stall as necessary
+         continue;  // Retry and/or stall as necessary.
       }
 
       // Allocation has failed and a collection
@@ -796,7 +791,7 @@
     if (!gch->is_maximal_no_gc()) {
       result = expand_heap_and_allocate(size, is_tlab);
     }
-    return result;   // could be null if we are out of space
+    return result;   // Could be null if we are out of space.
   } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
     // Do an incremental collection.
     gch->do_collection(false            /* full */,
@@ -918,10 +913,8 @@
                                        GCCause::_metadata_GC_threshold);
     VMThread::execute(&op);
 
-    // If GC was locked out, try again.  Check
-    // before checking success because the prologue
-    // could have succeeded and the GC still have
-    // been locked out.
+    // If GC was locked out, try again. Check before checking success because the
+    // prologue could have succeeded and the GC still have been locked out.
     if (op.gc_locked()) {
       continue;
     }
@@ -982,10 +975,117 @@
 }
 
 void MarkSweepPolicy::initialize_gc_policy_counters() {
-  // initialize the policy counters - 2 collectors, 3 generations
+  // Initialize the policy counters - 2 collectors, 3 generations.
   if (UseParNewGC) {
     _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
   } else {
     _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
   }
 }
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+// Testing that the NewSize flag is handled correct is hard because it
+// depends on so many other configurable variables. This test only tries to
+// verify that there are some basic rules for NewSize honored by the policies.
+class TestGenCollectorPolicy {
+public:
+  static void test() {
+    size_t flag_value;
+
+    save_flags();
+
+    // Set some limits that makes the math simple.
+    FLAG_SET_ERGO(uintx, MaxHeapSize, 180 * M);
+    FLAG_SET_ERGO(uintx, InitialHeapSize, 120 * M);
+    Arguments::set_min_heap_size(40 * M);
+
+    // If NewSize is set on the command line, it should be used
+    // for both min and initial young size if less than min heap.
+    flag_value = 20 * M;
+    FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
+    verify_min(flag_value);
+    verify_initial(flag_value);
+
+    // If NewSize is set on command line, but is larger than the min
+    // heap size, it should only be used for initial young size.
+    flag_value = 80 * M;
+    FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
+    verify_initial(flag_value);
+
+    // If NewSize has been ergonomically set, the collector policy
+    // should use it for min but calculate the initial young size
+    // using NewRatio.
+    flag_value = 20 * M;
+    FLAG_SET_ERGO(uintx, NewSize, flag_value);
+    verify_min(flag_value);
+    verify_scaled_initial(InitialHeapSize);
+
+    restore_flags();
+
+  }
+
+  static void verify_min(size_t expected) {
+    MarkSweepPolicy msp;
+    msp.initialize_all();
+
+    assert(msp.min_gen0_size() <= expected, err_msg("%zu  > %zu", msp.min_gen0_size(), expected));
+  }
+
+  static void verify_initial(size_t expected) {
+    MarkSweepPolicy msp;
+    msp.initialize_all();
+
+    assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected));
+  }
+
+  static void verify_scaled_initial(size_t initial_heap_size) {
+    MarkSweepPolicy msp;
+    msp.initialize_all();
+
+    size_t expected = msp.scale_by_NewRatio_aligned(initial_heap_size);
+    assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected));
+    assert(FLAG_IS_ERGO(NewSize) && NewSize == expected,
+        err_msg("NewSize should have been set ergonomically to %zu, but was %zu", expected, NewSize));
+  }
+
+private:
+  static size_t original_InitialHeapSize;
+  static size_t original_MaxHeapSize;
+  static size_t original_MaxNewSize;
+  static size_t original_MinHeapDeltaBytes;
+  static size_t original_NewSize;
+  static size_t original_OldSize;
+
+  static void save_flags() {
+    original_InitialHeapSize   = InitialHeapSize;
+    original_MaxHeapSize       = MaxHeapSize;
+    original_MaxNewSize        = MaxNewSize;
+    original_MinHeapDeltaBytes = MinHeapDeltaBytes;
+    original_NewSize           = NewSize;
+    original_OldSize           = OldSize;
+  }
+
+  static void restore_flags() {
+    InitialHeapSize   = original_InitialHeapSize;
+    MaxHeapSize       = original_MaxHeapSize;
+    MaxNewSize        = original_MaxNewSize;
+    MinHeapDeltaBytes = original_MinHeapDeltaBytes;
+    NewSize           = original_NewSize;
+    OldSize           = original_OldSize;
+  }
+};
+
+size_t TestGenCollectorPolicy::original_InitialHeapSize   = 0;
+size_t TestGenCollectorPolicy::original_MaxHeapSize       = 0;
+size_t TestGenCollectorPolicy::original_MaxNewSize        = 0;
+size_t TestGenCollectorPolicy::original_MinHeapDeltaBytes = 0;
+size_t TestGenCollectorPolicy::original_NewSize           = 0;
+size_t TestGenCollectorPolicy::original_OldSize           = 0;
+
+void TestNewSize_test() {
+  TestGenCollectorPolicy::test();
+}
+
+#endif
--- a/src/share/vm/memory/collectorPolicy.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/collectorPolicy.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -76,10 +76,10 @@
   size_t _heap_alignment;
 
   // Needed to keep information if MaxHeapSize was set on the command line
-  // when the flag value is aligned etc by ergonomics
+  // when the flag value is aligned etc by ergonomics.
   bool _max_heap_size_cmdline;
 
-  // The sizing of the heap are controlled by a sizing policy.
+  // The sizing of the heap is controlled by a sizing policy.
   AdaptiveSizePolicy* _size_policy;
 
   // Set to true when policy wants soft refs cleared.
@@ -102,7 +102,7 @@
     initialize_size_info();
   }
 
-  // Return maximum heap alignment that may be imposed by the policy
+  // Return maximum heap alignment that may be imposed by the policy.
   static size_t compute_heap_alignment();
 
   size_t space_alignment()        { return _space_alignment; }
@@ -180,7 +180,7 @@
                                                        size_t size,
                                                        Metaspace::MetadataType mdtype);
 
-  // Performace Counter support
+  // Performance Counter support
   GCPolicyCounters* counters()     { return _gc_policy_counters; }
 
   // Create the jstat counters for the GC policy.  By default, policy's
@@ -220,6 +220,7 @@
 };
 
 class GenCollectorPolicy : public CollectorPolicy {
+friend class TestGenCollectorPolicy;
  protected:
   size_t _min_gen0_size;
   size_t _initial_gen0_size;
@@ -231,9 +232,8 @@
 
   GenerationSpec **_generations;
 
-  // Return true if an allocation should be attempted in the older
-  // generation if it fails in the younger generation.  Return
-  // false, otherwise.
+  // Return true if an allocation should be attempted in the older generation
+  // if it fails in the younger generation.  Return false, otherwise.
   virtual bool should_try_older_generation_allocation(size_t word_size) const;
 
   void initialize_flags();
@@ -245,7 +245,7 @@
   // Try to allocate space by expanding the heap.
   virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
 
-  // Compute max heap alignment
+  // Compute max heap alignment.
   size_t compute_max_alignment();
 
  // Scale the base_size by NewRatio according to
@@ -253,7 +253,7 @@
  // and align by min_alignment()
  size_t scale_by_NewRatio_aligned(size_t base_size);
 
- // Bound the value by the given maximum minus the min_alignment
+ // Bound the value by the given maximum minus the min_alignment.
  size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
 
  public:
--- a/src/share/vm/memory/defNewGeneration.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/defNewGeneration.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -61,7 +61,6 @@
 DefNewGeneration::KeepAliveClosure::
 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
   GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
-  assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
   _rs = (CardTableRS*)rs;
 }
 
@@ -619,13 +618,12 @@
   assert(gch->no_allocs_since_save_marks(0),
          "save marks have not been newly set.");
 
-  int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
+  int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache;
 
   gch->gen_process_strong_roots(_level,
                                 true,  // Process younger gens, if any,
                                        // as strong roots.
                                 true,  // activate StrongRootsScope
-                                true,  // is scavenging
                                 SharedHeap::ScanningOption(so),
                                 &fsc_with_no_gc_barrier,
                                 true,   // walk *all* scavengable nmethods
@@ -1086,6 +1084,10 @@
   return eden()->capacity();
 }
 
+size_t DefNewGeneration::tlab_used() const {
+  return eden()->used();
+}
+
 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
   return unsafe_max_alloc_nogc();
 }
--- a/src/share/vm/memory/defNewGeneration.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/defNewGeneration.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -239,6 +239,7 @@
   // Thread-local allocation buffers
   bool supports_tlab_allocation() const { return true; }
   size_t tlab_capacity() const;
+  size_t tlab_used() const;
   size_t unsafe_max_tlab_alloc() const;
 
   // Grow the generation by the specified number of bytes.
--- a/src/share/vm/memory/genCollectedHeap.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -126,7 +126,7 @@
                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
   // It is important to do this in a way such that concurrent readers can't
-  // temporarily think somethings in the heap.  (Seen this happen in asserts.)
+  // temporarily think something is in the heap.  (Seen this happen in asserts.)
   _reserved.set_word_size(0);
   _reserved.set_start((HeapWord*)heap_rs.base());
   size_t actual_heap_size = heap_rs.size();
@@ -592,7 +592,6 @@
 gen_process_strong_roots(int level,
                          bool younger_gens_as_roots,
                          bool activate_scope,
-                         bool is_scavenging,
                          SharedHeap::ScanningOption so,
                          OopsInGenClosure* not_older_gens,
                          bool do_code_roots,
@@ -601,12 +600,12 @@
   // General strong roots.
 
   if (!do_code_roots) {
-    SharedHeap::process_strong_roots(activate_scope, is_scavenging, so,
+    SharedHeap::process_strong_roots(activate_scope, so,
                                      not_older_gens, NULL, klass_closure);
   } else {
     bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
     CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
-    SharedHeap::process_strong_roots(activate_scope, is_scavenging, so,
+    SharedHeap::process_strong_roots(activate_scope, so,
                                      not_older_gens, &code_roots, klass_closure);
   }
 
@@ -933,6 +932,16 @@
   return result;
 }
 
+size_t GenCollectedHeap::tlab_used(Thread* thr) const {
+  size_t result = 0;
+  for (int i = 0; i < _n_gens; i += 1) {
+    if (_gens[i]->supports_tlab_allocation()) {
+      result += _gens[i]->tlab_used();
+    }
+  }
+  return result;
+}
+
 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
   size_t result = 0;
   for (int i = 0; i < _n_gens; i += 1) {
@@ -1263,7 +1272,7 @@
 };
 
 jlong GenCollectedHeap::millis_since_last_gc() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   GenTimeOfLastGCClosure tolgc_cl(now);
--- a/src/share/vm/memory/genCollectedHeap.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -248,6 +248,7 @@
   // Section on TLAB's.
   virtual bool supports_tlab_allocation() const;
   virtual size_t tlab_capacity(Thread* thr) const;
+  virtual size_t tlab_used(Thread* thr) const;
   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
   virtual HeapWord* allocate_new_tlab(size_t size);
 
@@ -315,7 +316,7 @@
   }
 
   // Update the gc statistics for each generation.
-  // "level" is the level of the lastest collection
+  // "level" is the level of the latest collection.
   void update_gc_stats(int current_level, bool full) {
     for (int i = 0; i < _n_gens; i++) {
       _gens[i]->update_gc_stats(current_level, full);
@@ -411,7 +412,6 @@
                                 // The remaining arguments are in an order
                                 // consistent with SharedHeap::process_strong_roots:
                                 bool activate_scope,
-                                bool is_scavenging,
                                 SharedHeap::ScanningOption so,
                                 OopsInGenClosure* not_older_gens,
                                 bool do_code_roots,
--- a/src/share/vm/memory/genMarkSweep.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/genMarkSweep.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -148,8 +148,8 @@
   Universe::update_heap_info_at_gc();
 
   // Update time of last gc for all generations we collected
-  // (which curently is all the generations in the heap).
-  // We need to use a monotonically non-deccreasing time in ms
+  // (which currently is all the generations in the heap).
+  // We need to use a monotonically non-decreasing time in ms
   // or we will see time-warp warnings and os::javaTimeMillis()
   // does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
@@ -210,7 +210,6 @@
   gch->gen_process_strong_roots(level,
                                 false, // Younger gens are not roots.
                                 true,  // activate StrongRootsScope
-                                false, // not scavenging
                                 SharedHeap::SO_SystemClasses,
                                 &follow_root_closure,
                                 true,   // walk code active on stacks
@@ -296,7 +295,6 @@
   gch->gen_process_strong_roots(level,
                                 false, // Younger gens are not roots.
                                 true,  // activate StrongRootsScope
-                                false, // not scavenging
                                 SharedHeap::SO_AllClasses,
                                 &adjust_pointer_closure,
                                 false, // do not walk code
--- a/src/share/vm/memory/genOopClosures.inline.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/genOopClosures.inline.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -45,7 +45,6 @@
   // Barrier set for the heap, must be set after heap is initialized
   if (_rs == NULL) {
     GenRemSet* rs = SharedHeap::heap()->rem_set();
-    assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind");
     _rs = (CardTableRS*)rs;
   }
 }
--- a/src/share/vm/memory/genRemSet.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/genRemSet.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -31,8 +31,7 @@
 // enumerate ref fields that have been modified (since the last
 // enumeration.)
 
-uintx GenRemSet::max_alignment_constraint(Name nm) {
-  assert(nm == GenRemSet::CardTable, "Unrecognized GenRemSet type.");
+uintx GenRemSet::max_alignment_constraint() {
   return CardTableRS::ct_max_alignment_constraint();
 }
 
--- a/src/share/vm/memory/genRemSet.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/genRemSet.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -27,7 +27,7 @@
 
 #include "oops/oop.hpp"
 
-// A GenRemSet provides ways of iterating over pointers accross generations.
+// A GenRemSet provides ways of iterating over pointers across generations.
 // (This is especially useful for older-to-younger.)
 
 class Generation;
@@ -53,19 +53,12 @@
   KlassRemSet _klass_rem_set;
 
 public:
-  enum Name {
-    CardTable,
-    Other
-  };
-
   GenRemSet(BarrierSet * bs) : _bs(bs) {}
   GenRemSet() : _bs(NULL) {}
 
-  virtual Name rs_kind() = 0;
-
   // These are for dynamic downcasts.  Unfortunately that it names the
   // possible subtypes (but not that they are subtypes!)  Return NULL if
-  // the cast is invalide.
+  // the cast is invalid.
   virtual CardTableRS* as_CardTableRS() { return NULL; }
 
   // Return the barrier set associated with "this."
@@ -106,10 +99,9 @@
   // within the heap, this function tells whether they are met.
   virtual bool is_aligned(HeapWord* addr) = 0;
 
-  // If the RS (or BS) imposes an aligment constraint on maximum heap size.
-  // (This must be static, and dispatch on "nm", because it is called
-  // before an RS is created.)
-  static uintx max_alignment_constraint(Name nm);
+  // Returns any alignment constraint that the remembered set imposes upon the
+  // heap.
+  static uintx max_alignment_constraint();
 
   virtual void verify() = 0;
 
--- a/src/share/vm/memory/generation.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/generation.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -289,7 +289,7 @@
 
   // These functions return the addresses of the fields that define the
   // boundaries of the contiguous allocation area.  (These fields should be
-  // physicall near to one another.)
+  // physically near to one another.)
   virtual HeapWord** top_addr() const { return NULL; }
   virtual HeapWord** end_addr() const { return NULL; }
 
@@ -299,6 +299,10 @@
     guarantee(false, "Generation doesn't support thread local allocation buffers");
     return 0;
   }
+  virtual size_t tlab_used() const {
+    guarantee(false, "Generation doesn't support thread local allocation buffers");
+    return 0;
+  }
   virtual size_t unsafe_max_tlab_alloc() const {
     guarantee(false, "Generation doesn't support thread local allocation buffers");
     return 0;
@@ -485,7 +489,7 @@
   // General signature...
   virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0;
   // ...and specializations for de-virtualization.  (The general
-  // implemention of the _nv versions call the virtual version.
+  // implementation of the _nv versions call the virtual version.
   // Note that the _nv suffix is not really semantically necessary,
   // but it avoids some not-so-useful warnings on Solaris.)
 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)             \
--- a/src/share/vm/memory/heap.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/heap.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -183,7 +183,7 @@
   size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
   assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
 
-  // First check if we can satify request from freelist
+  // First check if we can satisfy request from freelist
   debug_only(verify());
   HeapBlock* block = search_freelist(number_of_segments, is_critical);
   debug_only(if (VerifyCodeCacheOften) verify());
@@ -372,7 +372,7 @@
   }
 
   // Scan for right place to put into list. List
-  // is sorted by increasing addresseses
+  // is sorted by increasing addresses
   FreeBlock* prev = NULL;
   FreeBlock* cur  = _freelist;
   while(cur != NULL && cur < b) {
--- a/src/share/vm/memory/heap.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/heap.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -127,8 +127,8 @@
   // Heap extents
   bool  reserve(size_t reserved_size, size_t committed_size, size_t segment_size);
   void  release();                               // releases all allocated memory
-  bool  expand_by(size_t size);                  // expands commited memory by size
-  void  shrink_by(size_t size);                  // shrinks commited memory by size
+  bool  expand_by(size_t size);                  // expands committed memory by size
+  void  shrink_by(size_t size);                  // shrinks committed memory by size
   void  clear();                                 // clears all heap contents
 
   // Memory allocation
--- a/src/share/vm/memory/heapInspection.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/heapInspection.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -347,7 +347,7 @@
 
 #endif // INCLUDE_SERVICES
 
-// These declarations are needed since teh declaration of KlassInfoTable and
+// These declarations are needed since the declaration of KlassInfoTable and
 // KlassInfoClosure are guarded by #if INLCUDE_SERVICES
 class KlassInfoTable;
 class KlassInfoClosure;
--- a/src/share/vm/memory/metaspace.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/metaspace.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -746,7 +746,7 @@
   assert_lock_strong(SpaceManager::expand_lock());
   _container_count++;
   assert(_container_count == container_count_slow(),
-         err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
+         err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT
                  " container_count_slow() " SIZE_FORMAT,
                  _container_count, container_count_slow()));
 }
@@ -759,7 +759,7 @@
 #ifdef ASSERT
 void VirtualSpaceNode::verify_container_count() {
   assert(_container_count == container_count_slow(),
-    err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
+    err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT
             " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
 }
 #endif
@@ -2399,7 +2399,7 @@
 
 void SpaceManager::verify() {
   // If there are blocks in the dictionary, then
-  // verfication of chunks does not work since
+  // verification of chunks does not work since
   // being in the dictionary alters a chunk.
   if (block_freelists()->total_size() == 0) {
     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
@@ -2868,7 +2868,7 @@
     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
     // If compressed class space fits in lower 32G, we don't need a base.
     if (higher_address <= (address)klass_encoding_max) {
-      lower_base = 0; // effectively lower base is zero.
+      lower_base = 0; // Effectively lower base is zero.
     }
   }
 
--- a/src/share/vm/memory/metaspaceShared.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/metaspaceShared.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -487,7 +487,7 @@
   NOT_PRODUCT(SystemDictionary::verify();)
 
   // Copy the the symbol table, and the system dictionary to the shared
-  // space in usable form.  Copy the hastable
+  // space in usable form.  Copy the hashtable
   // buckets first [read-write], then copy the linked lists of entries
   // [read-only].
 
@@ -953,7 +953,7 @@
 
   // The following data in the shared misc data region are the linked
   // list elements (HashtableEntry objects) for the symbol table, string
-  // table, and shared dictionary.  The heap objects refered to by the
+  // table, and shared dictionary.  The heap objects referred to by the
   // symbol table, string table, and shared dictionary are permanent and
   // unmovable.  Since new entries added to the string and symbol tables
   // are always added at the beginning of the linked lists, THESE LINKED
--- a/src/share/vm/memory/modRefBarrierSet.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/modRefBarrierSet.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -72,7 +72,7 @@
   bool has_read_region_opt() { return false; }
 
 
-  // These operations should assert false unless the correponding operation
+  // These operations should assert false unless the corresponding operation
   // above returns true.
   void read_ref_array(MemRegion mr) {
     assert(false, "can't call");
--- a/src/share/vm/memory/referenceProcessor.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/referenceProcessor.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -45,7 +45,7 @@
 }
 
 void ReferenceProcessor::init_statics() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 
@@ -62,7 +62,7 @@
   }
   guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
             RefDiscoveryPolicy == ReferentBasedDiscovery,
-            "Unrecongnized RefDiscoveryPolicy");
+            "Unrecognized RefDiscoveryPolicy");
   _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field();
 }
 
@@ -95,11 +95,11 @@
                                        uint      mt_discovery_degree,
                                        bool      atomic_discovery,
                                        BoolObjectClosure* is_alive_non_header,
-                                       bool      discovered_list_needs_barrier)  :
+                                       bool      discovered_list_needs_post_barrier)  :
   _discovering_refs(false),
   _enqueuing_is_done(false),
   _is_alive_non_header(is_alive_non_header),
-  _discovered_list_needs_barrier(discovered_list_needs_barrier),
+  _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier),
   _processing_is_mt(mt_processing),
   _next_id(0)
 {
@@ -152,7 +152,7 @@
   // Update (advance) the soft ref master clock field. This must be done
   // after processing the soft ref list.
 
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   jlong soft_ref_clock = java_lang_ref_SoftReference::clock();
@@ -168,7 +168,7 @@
   // javaTimeNanos(), which is guaranteed to be monotonically
   // non-decreasing provided the underlying platform provides such
   // a time source (and it is bug free).
-  // In product mode, however, protect ourselves from non-monotonicty.
+  // In product mode, however, protect ourselves from non-monotonicity.
   if (now > _soft_ref_timestamp_clock) {
     _soft_ref_timestamp_clock = now;
     java_lang_ref_SoftReference::set_clock(now);
@@ -349,7 +349,7 @@
 
   oop obj = NULL;
   oop next_d = refs_list.head();
-  if (pending_list_uses_discovered_field()) { // New behaviour
+  if (pending_list_uses_discovered_field()) { // New behavior
     // Walk down the list, self-looping the next field
     // so that the References are not considered active.
     while (obj != next_d) {
@@ -366,7 +366,7 @@
       // Post-barrier not needed when looping to self.
       java_lang_ref_Reference::set_next_raw(obj, obj);
       if (next_d == obj) {  // obj is last
-        // Swap refs_list into pendling_list_addr and
+        // Swap refs_list into pending_list_addr and
         // set obj's discovered to what we read from pending_list_addr.
         oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
         // Need post-barrier on pending_list_addr above;
@@ -376,7 +376,7 @@
         oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
       }
     }
-  } else { // Old behaviour
+  } else { // Old behavior
     // Walk down the list, copying the discovered field into
     // the next field and clearing the discovered field.
     while (obj != next_d) {
@@ -390,7 +390,7 @@
       assert(java_lang_ref_Reference::next(obj) == NULL,
              "The reference should not be enqueued");
       if (next_d == obj) {  // obj is last
-        // Swap refs_list into pendling_list_addr and
+        // Swap refs_list into pending_list_addr and
         // set obj's next to what we read from pending_list_addr.
         oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
         // Need oop_check on pending_list_addr above;
@@ -490,13 +490,13 @@
   } else {
     new_next = _next;
   }
-
-  if (UseCompressedOops) {
-    // Remove Reference object from list.
-    oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next);
-  } else {
-    // Remove Reference object from list.
-    oopDesc::store_heap_oop((oop*)_prev_next, new_next);
+  // Remove Reference object from discovered list. Note that G1 does not need a
+  // pre-barrier here because we know the Reference has already been found/marked,
+  // that's how it ended up in the discovered list in the first place.
+  oop_store_raw(_prev_next, new_next);
+  if (_discovered_list_needs_post_barrier && _prev_next != _refs_list.adr_head()) {
+    // Needs post-barrier and this is not the list head (which is not on the heap)
+    oopDesc::bs()->write_ref_field(_prev_next, new_next);
   }
   NOT_PRODUCT(_removed++);
   _refs_list.dec_length(1);
@@ -544,7 +544,7 @@
                                    OopClosure*        keep_alive,
                                    VoidClosure*       complete_gc) {
   assert(policy != NULL, "Must have a non-NULL policy");
-  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
   // Decide which softly reachable refs should be kept alive.
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
@@ -584,7 +584,7 @@
                              BoolObjectClosure* is_alive,
                              OopClosure*        keep_alive) {
   assert(discovery_is_atomic(), "Error");
-  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
     DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
@@ -621,7 +621,7 @@
                                                   OopClosure*        keep_alive,
                                                   VoidClosure*       complete_gc) {
   assert(!discovery_is_atomic(), "Error");
-  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
     HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
@@ -664,7 +664,7 @@
                                    OopClosure*        keep_alive,
                                    VoidClosure*       complete_gc) {
   ResourceMark rm;
-  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
   while (iter.has_next()) {
     iter.update_discovered();
     iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
@@ -782,8 +782,8 @@
 
 void ReferenceProcessor::set_discovered(oop ref, oop value) {
   java_lang_ref_Reference::set_discovered_raw(ref, value);
-  if (_discovered_list_needs_barrier) {
-    oopDesc::bs()->write_ref_field(ref, value);
+  if (_discovered_list_needs_post_barrier) {
+    oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(ref), value);
   }
 }
 
@@ -980,7 +980,7 @@
 
 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
   assert(!discovery_is_atomic(), "Else why call this method?");
-  DiscoveredListIterator iter(refs_list, NULL, NULL);
+  DiscoveredListIterator iter(refs_list, NULL, NULL, _discovered_list_needs_post_barrier);
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
     oop next = java_lang_ref_Reference::next(iter.obj());
@@ -1076,7 +1076,7 @@
   // elided this out for G1, but left in the test for some future
   // collector that might have need for a pre-barrier here, e.g.:-
   // oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
-  assert(!_discovered_list_needs_barrier || UseG1GC,
+  assert(!_discovered_list_needs_post_barrier || UseG1GC,
          "Need to check non-G1 collector: "
          "may need a pre-write-barrier for CAS from NULL below");
   oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
@@ -1087,7 +1087,7 @@
     // is necessary.
     refs_list.set_head(obj);
     refs_list.inc_length(1);
-    if (_discovered_list_needs_barrier) {
+    if (_discovered_list_needs_post_barrier) {
       oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
     }
 
@@ -1240,7 +1240,7 @@
   if (_discovery_is_mt) {
     add_to_discovered_list_mt(*list, obj, discovered_addr);
   } else {
-    // If "_discovered_list_needs_barrier", we do write barriers when
+    // If "_discovered_list_needs_post_barrier", we do write barriers when
     // updating the discovered reference list.  Otherwise, we do a raw store
     // here: the field will be visited later when processing the discovered
     // references.
@@ -1252,10 +1252,10 @@
     // pre-value, we can safely elide the pre-barrier here for the case of G1.
     // e.g.:- oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
     assert(discovered == NULL, "control point invariant");
-    assert(!_discovered_list_needs_barrier || UseG1GC,
+    assert(!_discovered_list_needs_post_barrier || UseG1GC,
            "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below");
     oop_store_raw(discovered_addr, next_discovered);
-    if (_discovered_list_needs_barrier) {
+    if (_discovered_list_needs_post_barrier) {
       oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
     }
     list->set_head(obj);
@@ -1341,7 +1341,7 @@
 // whose referents are still alive, whose referents are NULL or which
 // are not active (have a non-NULL next field). NOTE: When we are
 // thus precleaning the ref lists (which happens single-threaded today),
-// we do not disable refs discovery to honour the correct semantics of
+// we do not disable refs discovery to honor the correct semantics of
 // java.lang.Reference. As a result, we need to be careful below
 // that ref removal steps interleave safely with ref discovery steps
 // (in this thread).
@@ -1351,7 +1351,7 @@
                                                 OopClosure*        keep_alive,
                                                 VoidClosure*       complete_gc,
                                                 YieldClosure*      yield) {
-  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
     oop obj = iter.obj();
--- a/src/share/vm/memory/referenceProcessor.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/referenceProcessor.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -99,6 +99,7 @@
   oop                _referent;
   OopClosure*        _keep_alive;
   BoolObjectClosure* _is_alive;
+  bool               _discovered_list_needs_post_barrier;
 
   DEBUG_ONLY(
   oop                _first_seen; // cyclic linked list check
@@ -112,7 +113,8 @@
 public:
   inline DiscoveredListIterator(DiscoveredList&    refs_list,
                                 OopClosure*        keep_alive,
-                                BoolObjectClosure* is_alive):
+                                BoolObjectClosure* is_alive,
+                                bool               discovered_list_needs_post_barrier = false):
     _refs_list(refs_list),
     _prev_next(refs_list.adr_head()),
     _prev(NULL),
@@ -126,7 +128,8 @@
 #endif
     _next(NULL),
     _keep_alive(keep_alive),
-    _is_alive(is_alive)
+    _is_alive(is_alive),
+    _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier)
 { }
 
   // End Of List.
@@ -228,12 +231,12 @@
   bool        _discovery_is_mt;         // true if reference discovery is MT.
 
   // If true, setting "next" field of a discovered refs list requires
-  // write barrier(s).  (Must be true if used in a collector in which
+  // write post barrier.  (Must be true if used in a collector in which
   // elements of a discovered list may be moved during discovery: for
   // example, a collector like Garbage-First that moves objects during a
   // long-term concurrent marking phase that does weak reference
   // discovery.)
-  bool        _discovered_list_needs_barrier;
+  bool        _discovered_list_needs_post_barrier;
 
   bool        _enqueuing_is_done;       // true if all weak references enqueued
   bool        _processing_is_mt;        // true during phases when
@@ -380,8 +383,8 @@
 
  protected:
   // Set the 'discovered' field of the given reference to
-  // the given value - emitting barriers depending upon
-  // the value of _discovered_list_needs_barrier.
+  // the given value - emitting post barriers depending upon
+  // the value of _discovered_list_needs_post_barrier.
   void set_discovered(oop ref, oop value);
 
   // "Preclean" the given discovered reference list
@@ -425,7 +428,7 @@
                      bool mt_discovery  = false, uint mt_discovery_degree  = 1,
                      bool atomic_discovery = true,
                      BoolObjectClosure* is_alive_non_header = NULL,
-                     bool discovered_list_needs_barrier = false);
+                     bool discovered_list_needs_post_barrier = false);
 
   // RefDiscoveryPolicy values
   enum DiscoveryPolicy {
@@ -474,7 +477,7 @@
   bool processing_is_mt() const { return _processing_is_mt; }
   void set_mt_processing(bool mt) { _processing_is_mt = mt; }
 
-  // whether all enqueuing of weak references is complete
+  // whether all enqueueing of weak references is complete
   bool enqueuing_is_done()  { return _enqueuing_is_done; }
   void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
 
--- a/src/share/vm/memory/resourceArea.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/resourceArea.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -196,7 +196,7 @@
 // leveraging existing data structures if we simply create a way to manage this one
 // special need for a ResourceMark. If ResourceMark simply inherited from CHeapObj
 // then existing ResourceMarks would work fine since no one use new to allocate them
-// and they would be stack allocated. This leaves open the possibilty of accidental
+// and they would be stack allocated. This leaves open the possibility of accidental
 // misuse so we simple duplicate the ResourceMark functionality here.
 
 class DeoptResourceMark: public CHeapObj<mtInternal> {
--- a/src/share/vm/memory/sharedHeap.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/sharedHeap.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -137,7 +137,6 @@
 }
 
 void SharedHeap::process_strong_roots(bool activate_scope,
-                                      bool is_scavenging,
                                       ScanningOption so,
                                       OopClosure* roots,
                                       CodeBlobClosure* code_roots,
@@ -157,9 +156,11 @@
   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
     JNIHandles::oops_do(roots);
 
+  CLDToOopClosure roots_from_clds(roots);
+  // If we limit class scanning to SO_SystemClasses we need to apply a CLD closure to
+  // CLDs which are strongly reachable from the thread stacks.
+  CLDToOopClosure* roots_from_clds_p = ((so & SO_SystemClasses) ? &roots_from_clds : NULL);
   // All threads execute this; the individual threads are task groups.
-  CLDToOopClosure roots_from_clds(roots);
-  CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds);
   if (CollectedHeap::use_parallel_gc_threads()) {
     Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots);
   } else {
@@ -187,9 +188,9 @@
 
   if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
     if (so & SO_AllClasses) {
-      ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging);
+      ClassLoaderDataGraph::oops_do(roots, klass_closure, /* must_claim */ false);
     } else if (so & SO_SystemClasses) {
-      ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging);
+      ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, /* must_claim */ true);
     }
   }
 
@@ -204,17 +205,18 @@
   }
 
   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
-    if (so & SO_CodeCache) {
+    if (so & SO_ScavengeCodeCache) {
       assert(code_roots != NULL, "must supply closure for code cache");
 
-      if (is_scavenging) {
-        // We only visit parts of the CodeCache when scavenging.
-        CodeCache::scavenge_root_nmethods_do(code_roots);
-      } else {
-        // CMSCollector uses this to do intermediate-strength collections.
-        // We scan the entire code cache, since CodeCache::do_unloading is not called.
-        CodeCache::blobs_do(code_roots);
-      }
+      // We only visit parts of the CodeCache when scavenging.
+      CodeCache::scavenge_root_nmethods_do(code_roots);
+    }
+    if (so & SO_AllCodeCache) {
+      assert(code_roots != NULL, "must supply closure for code cache");
+
+      // CMSCollector uses this to do intermediate-strength collections.
+      // We scan the entire code cache, since CodeCache::do_unloading is not called.
+      CodeCache::blobs_do(code_roots);
     }
     // Verify that the code cache contents are not subject to
     // movement by a scavenging collection.
--- a/src/share/vm/memory/sharedHeap.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/sharedHeap.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -92,7 +92,7 @@
 //  0 is a "special" value in set_n_threads() which translates to
 //  setting _n_threads to 1.
 //
-//  Some code uses _n_terminiation to decide if work should be done in
+//  Some code uses _n_termination to decide if work should be done in
 //  parallel.  The notorious possibly_parallel_oops_do() in threads.cpp
 //  is an example of such code.  Look for variable "is_par" for other
 //  examples.
@@ -221,7 +221,8 @@
     SO_AllClasses          = 0x1,
     SO_SystemClasses       = 0x2,
     SO_Strings             = 0x4,
-    SO_CodeCache           = 0x8
+    SO_AllCodeCache        = 0x8,
+    SO_ScavengeCodeCache   = 0x10
   };
 
   FlexibleWorkGang* workers() const { return _workers; }
@@ -232,9 +233,9 @@
   // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
   // "SO_SystemClasses" to all the "system" classes and loaders;
   // "SO_Strings" applies the closure to all entries in StringTable;
-  // "SO_CodeCache" applies the closure to all elements of the CodeCache.
+  // "SO_AllCodeCache" applies the closure to all elements of the CodeCache.
+  // "SO_ScavengeCodeCache" applies the closure to elements on the scavenge root list in the CodeCache.
   void process_strong_roots(bool activate_scope,
-                            bool is_scavenging,
                             ScanningOption so,
                             OopClosure* roots,
                             CodeBlobClosure* code_roots,
--- a/src/share/vm/memory/space.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/space.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -112,7 +112,7 @@
   // cards are processed. For instance, CMS must remember mutator updates
   // (i.e. dirty cards) so as to re-scan mutated objects.
   // Such work can be piggy-backed here on dirty card scanning, so as to make
-  // it slightly more efficient than doing a complete non-detructive pre-scan
+  // it slightly more efficient than doing a complete non-destructive pre-scan
   // of the card table.
   MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
   if (pCl != NULL) {
@@ -324,8 +324,8 @@
 }
 
 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
-  // Space should not advertize an increase in size
-  // until after the underlying offest table has been enlarged.
+  // Space should not advertise an increase in size
+  // until after the underlying offset table has been enlarged.
   _offsets.resize(pointer_delta(new_end, bottom()));
   Space::set_end(new_end);
 }
@@ -729,7 +729,7 @@
   object_iterate_from(bm, blk);
 }
 
-// For a continguous space object_iterate() and safe_object_iterate()
+// For a ContiguousSpace object_iterate() and safe_object_iterate()
 // are the same.
 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
   object_iterate(blk);
--- a/src/share/vm/memory/space.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/space.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -59,7 +59,7 @@
 
 // Here's the Space hierarchy:
 //
-// - Space               -- an asbtract base class describing a heap area
+// - Space               -- an abstract base class describing a heap area
 //   - CompactibleSpace  -- a space supporting compaction
 //     - CompactibleFreeListSpace -- (used for CMS generation)
 //     - ContiguousSpace -- a compactible space in which all free space
@@ -162,7 +162,7 @@
   // (that is, if the space is contiguous), then this region must contain only
   // such objects: the memregion will be from the bottom of the region to the
   // saved mark.  Otherwise, the "obj_allocated_since_save_marks" method of
-  // the space must distiguish between objects in the region allocated before
+  // the space must distinguish between objects in the region allocated before
   // and after the call to save marks.
   virtual MemRegion used_region_at_save_marks() const {
     return MemRegion(bottom(), saved_mark_word());
@@ -193,7 +193,7 @@
 
   // Returns true iff the given the space contains the
   // given address as part of an allocated object. For
-  // ceratin kinds of spaces, this might be a potentially
+  // certain kinds of spaces, this might be a potentially
   // expensive operation. To prevent performance problems
   // on account of its inadvertent use in product jvm's,
   // we restrict its use to assertion checks only.
@@ -247,13 +247,13 @@
   // Return an address indicating the extent of the iteration in the
   // event that the iteration had to return because of finding an
   // uninitialized object in the space, or if the closure "cl"
-  // signalled early termination.
+  // signaled early termination.
   virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
   virtual HeapWord* object_iterate_careful_m(MemRegion mr,
                                              ObjectClosureCareful* cl);
 
   // Create and return a new dirty card to oop closure. Can be
-  // overriden to return the appropriate type of closure
+  // overridden to return the appropriate type of closure
   // depending on the type of space in which the closure will
   // operate. ResourceArea allocated.
   virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
@@ -477,13 +477,13 @@
   // be one, since compaction must succeed -- we go to the first space of
   // the previous generation if necessary, updating "cp"), reset compact_top
   // and then forward.  In either case, returns the new value of "compact_top".
-  // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
+  // If the forwarding crosses "cp->threshold", invokes the "cross_threshold"
   // function of the then-current compaction space, and updates "cp->threshold
   // accordingly".
   virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
                     HeapWord* compact_top);
 
-  // Return a size with adjusments as required of the space.
+  // Return a size with adjustments as required of the space.
   virtual size_t adjust_object_size_v(size_t size) const { return size; }
 
 protected:
@@ -503,7 +503,7 @@
 
   // Requires "allowed_deadspace_words > 0", that "q" is the start of a
   // free block of the given "word_len", and that "q", were it an object,
-  // would not move if forwared.  If the size allows, fill the free
+  // would not move if forwarded.  If the size allows, fill the free
   // block with an object, to prevent excessive compaction.  Returns "true"
   // iff the free region was made deadspace, and modifies
   // "allowed_deadspace_words" to reflect the number of available deadspace
--- a/src/share/vm/memory/tenuredGeneration.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/tenuredGeneration.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -135,7 +135,7 @@
                     free());
     }
   }
-  // If we had to expand to accomodate promotions from younger generations
+  // If we had to expand to accommodate promotions from younger generations
   if (!result && _capacity_at_prologue < capacity()) {
     result = true;
     if (PrintGC && Verbose) {
--- a/src/share/vm/memory/threadLocalAllocBuffer.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/threadLocalAllocBuffer.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -34,6 +34,7 @@
 // Thread-Local Edens support
 
 // static member initialization
+size_t           ThreadLocalAllocBuffer::_max_size       = 0;
 unsigned         ThreadLocalAllocBuffer::_target_refills = 0;
 GlobalTLABStats* ThreadLocalAllocBuffer::_global_stats   = NULL;
 
@@ -45,7 +46,7 @@
 void ThreadLocalAllocBuffer::accumulate_statistics_before_gc() {
   global_stats()->initialize();
 
-  for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
+  for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
     thread->tlab().accumulate_statistics();
     thread->tlab().initialize_statistics();
   }
@@ -60,28 +61,32 @@
 }
 
 void ThreadLocalAllocBuffer::accumulate_statistics() {
-  size_t capacity = Universe::heap()->tlab_capacity(myThread()) / HeapWordSize;
-  size_t unused   = Universe::heap()->unsafe_max_tlab_alloc(myThread()) / HeapWordSize;
-  size_t used     = capacity - unused;
-
-  // Update allocation history if a reasonable amount of eden was allocated.
-  bool update_allocation_history = used > 0.5 * capacity;
+  Thread* thread = myThread();
+  size_t capacity = Universe::heap()->tlab_capacity(thread);
+  size_t used     = Universe::heap()->tlab_used(thread);
 
   _gc_waste += (unsigned)remaining();
+  size_t total_allocated = thread->allocated_bytes();
+  size_t allocated_since_last_gc = total_allocated - _allocated_before_last_gc;
+  _allocated_before_last_gc = total_allocated;
 
   if (PrintTLAB && (_number_of_refills > 0 || Verbose)) {
     print_stats("gc");
   }
 
   if (_number_of_refills > 0) {
+    // Update allocation history if a reasonable amount of eden was allocated.
+    bool update_allocation_history = used > 0.5 * capacity;
 
     if (update_allocation_history) {
       // Average the fraction of eden allocated in a tlab by this
       // thread for use in the next resize operation.
       // _gc_waste is not subtracted because it's included in
       // "used".
-      size_t allocation = _number_of_refills * desired_size();
-      double alloc_frac = allocation / (double) used;
+      // The result can be larger than 1.0 due to direct to old allocations.
+      // These allocations should ideally not be counted but since it is not possible
+      // to filter them out here we just cap the fraction to be at most 1.0.
+      double alloc_frac = MIN2(1.0, (double) allocated_since_last_gc / used);
       _allocation_fraction.sample(alloc_frac);
     }
     global_stats()->update_allocating_threads();
@@ -126,33 +131,32 @@
 }
 
 void ThreadLocalAllocBuffer::resize_all_tlabs() {
-  for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
-    thread->tlab().resize();
+  if (ResizeTLAB) {
+    for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
+      thread->tlab().resize();
+    }
   }
 }
 
 void ThreadLocalAllocBuffer::resize() {
+  // Compute the next tlab size using expected allocation amount
+  assert(ResizeTLAB, "Should not call this otherwise");
+  size_t alloc = (size_t)(_allocation_fraction.average() *
+                          (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize));
+  size_t new_size = alloc / _target_refills;
 
-  if (ResizeTLAB) {
-    // Compute the next tlab size using expected allocation amount
-    size_t alloc = (size_t)(_allocation_fraction.average() *
-                            (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize));
-    size_t new_size = alloc / _target_refills;
-
-    new_size = MIN2(MAX2(new_size, min_size()), max_size());
-
-    size_t aligned_new_size = align_object_size(new_size);
+  new_size = MIN2(MAX2(new_size, min_size()), max_size());
 
-    if (PrintTLAB && Verbose) {
-      gclog_or_tty->print("TLAB new size: thread: " INTPTR_FORMAT " [id: %2d]"
-                          " refills %d  alloc: %8.6f desired_size: " SIZE_FORMAT " -> " SIZE_FORMAT "\n",
-                          myThread(), myThread()->osthread()->thread_id(),
-                          _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size);
-    }
-    set_desired_size(aligned_new_size);
+  size_t aligned_new_size = align_object_size(new_size);
 
-    set_refill_waste_limit(initial_refill_waste_limit());
+  if (PrintTLAB && Verbose) {
+    gclog_or_tty->print("TLAB new size: thread: " INTPTR_FORMAT " [id: %2d]"
+                        " refills %d  alloc: %8.6f desired_size: " SIZE_FORMAT " -> " SIZE_FORMAT "\n",
+                        myThread(), myThread()->osthread()->thread_id(),
+                        _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size);
   }
+  set_desired_size(aligned_new_size);
+  set_refill_waste_limit(initial_refill_waste_limit());
 }
 
 void ThreadLocalAllocBuffer::initialize_statistics() {
@@ -248,31 +252,13 @@
   return init_sz;
 }
 
-const size_t ThreadLocalAllocBuffer::max_size() {
-
-  // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
-  // This restriction could be removed by enabling filling with multiple arrays.
-  // If we compute that the reasonable way as
-  //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
-  // we'll overflow on the multiply, so we do the divide first.
-  // We actually lose a little by dividing first,
-  // but that just makes the TLAB  somewhat smaller than the biggest array,
-  // which is fine, since we'll be able to fill that.
-
-  size_t unaligned_max_size = typeArrayOopDesc::header_size(T_INT) +
-                              sizeof(jint) *
-                              ((juint) max_jint / (size_t) HeapWordSize);
-  return align_size_down(unaligned_max_size, MinObjAlignment);
-}
-
 void ThreadLocalAllocBuffer::print_stats(const char* tag) {
   Thread* thrd = myThread();
   size_t waste = _gc_waste + _slow_refill_waste + _fast_refill_waste;
   size_t alloc = _number_of_refills * _desired_size;
   double waste_percent = alloc == 0 ? 0.0 :
                       100.0 * waste / alloc;
-  size_t tlab_used  = Universe::heap()->tlab_capacity(thrd) -
-                      Universe::heap()->unsafe_max_tlab_alloc(thrd);
+  size_t tlab_used  = Universe::heap()->tlab_used(thrd);
   gclog_or_tty->print("TLAB: %s thread: " INTPTR_FORMAT " [id: %2d]"
                       " desired_size: " SIZE_FORMAT "KB"
                       " slow allocs: %d  refill waste: " SIZE_FORMAT "B"
--- a/src/share/vm/memory/threadLocalAllocBuffer.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/threadLocalAllocBuffer.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -45,7 +45,9 @@
   HeapWord* _end;                                // allocation end (excluding alignment_reserve)
   size_t    _desired_size;                       // desired size   (including alignment_reserve)
   size_t    _refill_waste_limit;                 // hold onto tlab if free() is larger than this
+  size_t    _allocated_before_last_gc;           // total bytes allocated up until the last gc
 
+  static size_t   _max_size;                     // maximum size of any TLAB
   static unsigned _target_refills;               // expected number of refills between GCs
 
   unsigned  _number_of_refills;
@@ -99,12 +101,13 @@
   static GlobalTLABStats* global_stats() { return _global_stats; }
 
 public:
-  ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight) {
+  ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight), _allocated_before_last_gc(0) {
     // do nothing.  tlabs must be inited by initialize() calls
   }
 
   static const size_t min_size()                 { return align_object_size(MinTLABSize / HeapWordSize); }
-  static const size_t max_size();
+  static const size_t max_size()                 { assert(_max_size != 0, "max_size not set up"); return _max_size; }
+  static void set_max_size(size_t max_size)      { _max_size = max_size; }
 
   HeapWord* start() const                        { return _start; }
   HeapWord* end() const                          { return _end; }
--- a/src/share/vm/memory/universe.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/memory/universe.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -816,6 +816,8 @@
     Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
   }
 
+  ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
+
   jint status = Universe::heap()->initialize();
   if (status != JNI_OK) {
     return status;
@@ -1141,7 +1143,7 @@
       SystemDictionary::ProtectionDomain_klass(), m);;
   }
 
-  // The folowing is initializing converter functions for serialization in
+  // The following is initializing converter functions for serialization in
   // JVM.cpp. If we clean up the StrictMath code above we may want to find
   // a better solution for this as well.
   initialize_converter_functions();
@@ -1183,7 +1185,7 @@
   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
 
   // CodeCache can only be updated by a thread_in_VM and they will all be
-  // stopped dring the safepoint so CodeCache will be safe to update without
+  // stopped during the safepoint so CodeCache will be safe to update without
   // holding the CodeCache_lock.
 
   KlassDepChange changes(dependee);
@@ -1204,7 +1206,7 @@
   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
 
   // CodeCache can only be updated by a thread_in_VM and they will all be
-  // stopped dring the safepoint so CodeCache will be safe to update without
+  // stopped during the safepoint so CodeCache will be safe to update without
   // holding the CodeCache_lock.
 
   CallSiteDepChange changes(call_site(), method_handle());
@@ -1235,7 +1237,7 @@
   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
 
   // CodeCache can only be updated by a thread_in_VM and they will all be
-  // stopped dring the safepoint so CodeCache will be safe to update without
+  // stopped during the safepoint so CodeCache will be safe to update without
   // holding the CodeCache_lock.
 
   // Compute the dependent nmethods
--- a/src/share/vm/oops/constantPool.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/oops/constantPool.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -82,6 +82,9 @@
 void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) {
   MetadataFactory::free_metadata(loader_data, cache());
   set_cache(NULL);
+  MetadataFactory::free_array<u2>(loader_data, reference_map());
+  set_reference_map(NULL);
+
   MetadataFactory::free_array<jushort>(loader_data, operands());
   set_operands(NULL);
 
@@ -1874,7 +1877,6 @@
 // Printing
 
 void ConstantPool::print_on(outputStream* st) const {
-  EXCEPTION_MARK;
   assert(is_constantPool(), "must be constantPool");
   st->print_cr(internal_name());
   if (flags() != 0) {
--- a/src/share/vm/oops/instanceKlass.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/oops/instanceKlass.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -554,6 +554,7 @@
     if (hk == NULL) {
       return NULL;
     } else {
+      assert(*hk != NULL, "host klass should always be set if the address is not null");
       return *hk;
     }
   }
--- a/src/share/vm/oops/metadata.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/oops/metadata.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,7 @@
   int identity_hash()                { return (int)(uintptr_t)this; }
 
   // Rehashing support for tables containing pointers to this
-  unsigned int new_hash(jint seed)   { ShouldNotReachHere();  return 0; }
+  unsigned int new_hash(juint seed)   { ShouldNotReachHere();  return 0; }
 
   virtual bool is_klass()              const volatile { return false; }
   virtual bool is_method()             const volatile { return false; }
--- a/src/share/vm/oops/method.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/oops/method.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -38,13 +38,11 @@
 #include "utilities/accessFlags.hpp"
 #include "utilities/growableArray.hpp"
 
-// A Method* represents a Java method.
+// A Method represents a Java method.
 //
 // Memory layout (each line represents a word). Note that most applications load thousands of methods,
 // so keeping the size of this structure small has a big impact on footprint.
 //
-// We put all oops and method_size first for better gc cache locality.
-//
 // The actual bytecodes are inlined after the end of the Method struct.
 //
 // There are bits in the access_flags telling whether inlined tables are present.
@@ -64,17 +62,17 @@
 // | header                                               |
 // | klass                                                |
 // |------------------------------------------------------|
-// | ConstMethod*                   (oop)                 |
+// | ConstMethod*                   (metadata)            |
 // |------------------------------------------------------|
-// | methodData                     (oop)                 |
-// | methodCounters                                       |
+// | MethodData*                    (metadata)            |
+// | MethodCounters                                       |
 // |------------------------------------------------------|
 // | access_flags                                         |
 // | vtable_index                                         |
 // |------------------------------------------------------|
 // | result_index (C++ interpreter only)                  |
 // |------------------------------------------------------|
-// | method_size             |   intrinsic_id|   flags    |
+// | method_size             | intrinsic_id  |   flags    |
 // |------------------------------------------------------|
 // | code                           (pointer)             |
 // | i2i                            (pointer)             |
--- a/src/share/vm/oops/oop.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/oops/oop.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -102,7 +102,7 @@
 }
 
 // When String table needs to rehash
-unsigned int oopDesc::new_hash(jint seed) {
+unsigned int oopDesc::new_hash(juint seed) {
   EXCEPTION_MARK;
   ResourceMark rm;
   int length;
--- a/src/share/vm/oops/oop.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/oops/oop.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -362,7 +362,7 @@
   intptr_t slow_identity_hash();
 
   // Alternate hashing code if string table is rehashed
-  unsigned int new_hash(jint seed);
+  unsigned int new_hash(juint seed);
 
   // marks are forwarded to stack when object is locked
   bool     has_displaced_mark() const;
--- a/src/share/vm/oops/symbol.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/oops/symbol.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -207,7 +207,7 @@
 }
 
 // Alternate hashing for unbalanced symbol tables.
-unsigned int Symbol::new_hash(jint seed) {
+unsigned int Symbol::new_hash(juint seed) {
   ResourceMark rm;
   // Use alternate hashing algorithm on this symbol.
   return AltHashing::murmur3_32(seed, (const jbyte*)as_C_string(), utf8_length());
--- a/src/share/vm/oops/symbol.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/oops/symbol.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -154,7 +154,7 @@
   int identity_hash()       { return _identity_hash; }
 
   // For symbol table alternate hashing
-  unsigned int new_hash(jint seed);
+  unsigned int new_hash(juint seed);
 
   // Reference counting.  See comments above this class for when to use.
   int refcount() const      { return _refcount; }
--- a/src/share/vm/opto/runtime.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/opto/runtime.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -571,8 +571,7 @@
 const TypeFunc *OptoRuntime::uncommon_trap_Type() {
   // create input type (domain)
   const Type **fields = TypeTuple::fields(1);
-  // Symbol* name of class to be loaded
-  fields[TypeFunc::Parms+0] = TypeInt::INT;
+  fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action)
   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
 
   // create result type (range)
--- a/src/share/vm/prims/jni.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/prims/jni.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -433,8 +433,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, FindClass__entry, env, name);
 #else /* USDT2 */
-  HOTSPOT_JNI_FINDCLASS_ENTRY(
-                              env, (char *)name);
+  HOTSPOT_JNI_FINDCLASS_ENTRY(env, (char *)name);
 #endif /* USDT2 */
 
   jclass result = NULL;
@@ -511,8 +510,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, FromReflectedMethod__entry, env, method);
 #else /* USDT2 */
-  HOTSPOT_JNI_FROMREFLECTEDMETHOD_ENTRY(
-                                        env, method);
+  HOTSPOT_JNI_FROMREFLECTEDMETHOD_ENTRY(env, method);
 #endif /* USDT2 */
   jmethodID ret = NULL;
   DT_RETURN_MARK(FromReflectedMethod, jmethodID, (const jmethodID&)ret);
@@ -552,8 +550,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, FromReflectedField__entry, env, field);
 #else /* USDT2 */
-  HOTSPOT_JNI_FROMREFLECTEDFIELD_ENTRY(
-                                       env, field);
+  HOTSPOT_JNI_FROMREFLECTEDFIELD_ENTRY(env, field);
 #endif /* USDT2 */
   jfieldID ret = NULL;
   DT_RETURN_MARK(FromReflectedField, jfieldID, (const jfieldID&)ret);
@@ -601,8 +598,7 @@
 #ifndef USDT2
   DTRACE_PROBE4(hotspot_jni, ToReflectedMethod__entry, env, cls, method_id, isStatic);
 #else /* USDT2 */
-  HOTSPOT_JNI_TOREFLECTEDMETHOD_ENTRY(
-                                      env, cls, (uintptr_t) method_id, isStatic);
+  HOTSPOT_JNI_TOREFLECTEDMETHOD_ENTRY(env, cls, (uintptr_t) method_id, isStatic);
 #endif /* USDT2 */
   jobject ret = NULL;
   DT_RETURN_MARK(ToReflectedMethod, jobject, (const jobject&)ret);
@@ -631,8 +627,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, GetSuperclass__entry, env, sub);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETSUPERCLASS_ENTRY(
-                                  env, sub);
+  HOTSPOT_JNI_GETSUPERCLASS_ENTRY(env, sub);
 #endif /* USDT2 */
   jclass obj = NULL;
   DT_RETURN_MARK(GetSuperclass, jclass, (const jclass&)obj);
@@ -665,8 +660,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, IsAssignableFrom__entry, env, sub, super);
 #else /* USDT2 */
-  HOTSPOT_JNI_ISASSIGNABLEFROM_ENTRY(
-                                     env, sub, super);
+  HOTSPOT_JNI_ISASSIGNABLEFROM_ENTRY(env, sub, super);
 #endif /* USDT2 */
   oop sub_mirror   = JNIHandles::resolve_non_null(sub);
   oop super_mirror = JNIHandles::resolve_non_null(super);
@@ -676,8 +670,7 @@
 #ifndef USDT2
     DTRACE_PROBE1(hotspot_jni, IsAssignableFrom__return, ret);
 #else /* USDT2 */
-    HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN(
-                                        ret);
+    HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN(ret);
 #endif /* USDT2 */
     return ret;
   }
@@ -689,8 +682,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, IsAssignableFrom__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN(
-                                      ret);
+  HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -707,8 +699,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, Throw__entry, env, obj);
 #else /* USDT2 */
-  HOTSPOT_JNI_THROW_ENTRY(
- env, obj);
+  HOTSPOT_JNI_THROW_ENTRY(env, obj);
 #endif /* USDT2 */
   jint ret = JNI_OK;
   DT_RETURN_MARK(Throw, jint, (const jint&)ret);
@@ -729,8 +720,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, ThrowNew__entry, env, clazz, message);
 #else /* USDT2 */
-  HOTSPOT_JNI_THROWNEW_ENTRY(
-                             env, clazz, (char *) message);
+  HOTSPOT_JNI_THROWNEW_ENTRY(env, clazz, (char *) message);
 #endif /* USDT2 */
   jint ret = JNI_OK;
   DT_RETURN_MARK(ThrowNew, jint, (const jint&)ret);
@@ -763,8 +753,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, ExceptionOccurred__entry, env);
 #else /* USDT2 */
-  HOTSPOT_JNI_EXCEPTIONOCCURRED_ENTRY(
-                                      env);
+  HOTSPOT_JNI_EXCEPTIONOCCURRED_ENTRY(env);
 #endif /* USDT2 */
   jni_check_async_exceptions(thread);
   oop exception = thread->pending_exception();
@@ -772,8 +761,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, ExceptionOccurred__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_EXCEPTIONOCCURRED_RETURN(
-                                       ret);
+  HOTSPOT_JNI_EXCEPTIONOCCURRED_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -784,8 +772,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, ExceptionDescribe__entry, env);
 #else /* USDT2 */
-  HOTSPOT_JNI_EXCEPTIONDESCRIBE_ENTRY(
-                                      env);
+  HOTSPOT_JNI_EXCEPTIONDESCRIBE_ENTRY(env);
 #endif /* USDT2 */
   if (thread->has_pending_exception()) {
     Handle ex(thread, thread->pending_exception());
@@ -825,8 +812,7 @@
 #ifndef USDT2
   DTRACE_PROBE(hotspot_jni, ExceptionDescribe__return);
 #else /* USDT2 */
-  HOTSPOT_JNI_EXCEPTIONDESCRIBE_RETURN(
-                                       );
+  HOTSPOT_JNI_EXCEPTIONDESCRIBE_RETURN();
 #endif /* USDT2 */
 JNI_END
 
@@ -836,8 +822,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, ExceptionClear__entry, env);
 #else /* USDT2 */
-  HOTSPOT_JNI_EXCEPTIONCLEAR_ENTRY(
-                                   env);
+  HOTSPOT_JNI_EXCEPTIONCLEAR_ENTRY(env);
 #endif /* USDT2 */
 
   // The jni code might be using this API to clear java thrown exception.
@@ -850,8 +835,7 @@
 #ifndef USDT2
   DTRACE_PROBE(hotspot_jni, ExceptionClear__return);
 #else /* USDT2 */
-  HOTSPOT_JNI_EXCEPTIONCLEAR_RETURN(
-                                    );
+  HOTSPOT_JNI_EXCEPTIONCLEAR_RETURN();
 #endif /* USDT2 */
 JNI_END
 
@@ -861,8 +845,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, FatalError__entry, env, msg);
 #else /* USDT2 */
-  HOTSPOT_JNI_FATALERROR_ENTRY(
-                               env, (char *) msg);
+  HOTSPOT_JNI_FATALERROR_ENTRY(env, (char *) msg);
 #endif /* USDT2 */
   tty->print_cr("FATAL ERROR in native method: %s", msg);
   thread->print_stack();
@@ -875,16 +858,14 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, PushLocalFrame__entry, env, capacity);
 #else /* USDT2 */
-  HOTSPOT_JNI_PUSHLOCALFRAME_ENTRY(
-                                   env, capacity);
+  HOTSPOT_JNI_PUSHLOCALFRAME_ENTRY(env, capacity);
 #endif /* USDT2 */
   //%note jni_11
   if (capacity < 0 || capacity > MAX_REASONABLE_LOCAL_CAPACITY) {
 #ifndef USDT2
     DTRACE_PROBE1(hotspot_jni, PushLocalFrame__return, JNI_ERR);
 #else /* USDT2 */
-    HOTSPOT_JNI_PUSHLOCALFRAME_RETURN(
-                                      (uint32_t)JNI_ERR);
+    HOTSPOT_JNI_PUSHLOCALFRAME_RETURN((uint32_t)JNI_ERR);
 #endif /* USDT2 */
     return JNI_ERR;
   }
@@ -897,8 +878,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, PushLocalFrame__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_PUSHLOCALFRAME_RETURN(
-                                    ret);
+  HOTSPOT_JNI_PUSHLOCALFRAME_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -909,8 +889,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, PopLocalFrame__entry, env, result);
 #else /* USDT2 */
-  HOTSPOT_JNI_POPLOCALFRAME_ENTRY(
-                                  env, result);
+  HOTSPOT_JNI_POPLOCALFRAME_ENTRY(env, result);
 #endif /* USDT2 */
   //%note jni_11
   Handle result_handle(thread, JNIHandles::resolve(result));
@@ -929,8 +908,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, PopLocalFrame__return, result);
 #else /* USDT2 */
-  HOTSPOT_JNI_POPLOCALFRAME_RETURN(
-                                   result);
+  HOTSPOT_JNI_POPLOCALFRAME_RETURN(result);
 #endif /* USDT2 */
   return result;
 JNI_END
@@ -941,16 +919,14 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, NewGlobalRef__entry, env, ref);
 #else /* USDT2 */
-  HOTSPOT_JNI_NEWGLOBALREF_ENTRY(
-                                 env, ref);
+  HOTSPOT_JNI_NEWGLOBALREF_ENTRY(env, ref);
 #endif /* USDT2 */
   Handle ref_handle(thread, JNIHandles::resolve(ref));
   jobject ret = JNIHandles::make_global(ref_handle);
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, NewGlobalRef__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_NEWGLOBALREF_RETURN(
-                                  ret);
+  HOTSPOT_JNI_NEWGLOBALREF_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -961,15 +937,13 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, DeleteGlobalRef__entry, env, ref);
 #else /* USDT2 */
-  HOTSPOT_JNI_DELETEGLOBALREF_ENTRY(
-                                    env, ref);
+  HOTSPOT_JNI_DELETEGLOBALREF_ENTRY(env, ref);
 #endif /* USDT2 */
   JNIHandles::destroy_global(ref);
 #ifndef USDT2
   DTRACE_PROBE(hotspot_jni, DeleteGlobalRef__return);
 #else /* USDT2 */
-  HOTSPOT_JNI_DELETEGLOBALREF_RETURN(
-                                     );
+  HOTSPOT_JNI_DELETEGLOBALREF_RETURN();
 #endif /* USDT2 */
 JNI_END
 
@@ -978,15 +952,13 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, DeleteLocalRef__entry, env, obj);
 #else /* USDT2 */
-  HOTSPOT_JNI_DELETELOCALREF_ENTRY(
-                                   env, obj);
+  HOTSPOT_JNI_DELETELOCALREF_ENTRY(env, obj);
 #endif /* USDT2 */
   JNIHandles::destroy_local(obj);
 #ifndef USDT2
   DTRACE_PROBE(hotspot_jni, DeleteLocalRef__return);
 #else /* USDT2 */
-  HOTSPOT_JNI_DELETELOCALREF_RETURN(
-                                    );
+  HOTSPOT_JNI_DELETELOCALREF_RETURN();
 #endif /* USDT2 */
 JNI_END
 
@@ -995,8 +967,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, IsSameObject__entry, env, r1, r2);
 #else /* USDT2 */
-  HOTSPOT_JNI_ISSAMEOBJECT_ENTRY(
-                                 env, r1, r2);
+  HOTSPOT_JNI_ISSAMEOBJECT_ENTRY(env, r1, r2);
 #endif /* USDT2 */
   oop a = JNIHandles::resolve(r1);
   oop b = JNIHandles::resolve(r2);
@@ -1004,8 +975,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, IsSameObject__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_ISSAMEOBJECT_RETURN(
-                                  ret);
+  HOTSPOT_JNI_ISSAMEOBJECT_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -1016,15 +986,13 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, NewLocalRef__entry, env, ref);
 #else /* USDT2 */
-  HOTSPOT_JNI_NEWLOCALREF_ENTRY(
-                                env, ref);
+  HOTSPOT_JNI_NEWLOCALREF_ENTRY(env, ref);
 #endif /* USDT2 */
   jobject ret = JNIHandles::make_local(env, JNIHandles::resolve(ref));
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, NewLocalRef__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_NEWLOCALREF_RETURN(
-                                 ret);
+  HOTSPOT_JNI_NEWLOCALREF_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -1034,8 +1002,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, EnsureLocalCapacity__entry, env, capacity);
 #else /* USDT2 */
-  HOTSPOT_JNI_ENSURELOCALCAPACITY_ENTRY(
-                                        env, capacity);
+  HOTSPOT_JNI_ENSURELOCALCAPACITY_ENTRY(env, capacity);
 #endif /* USDT2 */
   jint ret;
   if (capacity >= 0 && capacity <= MAX_REASONABLE_LOCAL_CAPACITY) {
@@ -1046,8 +1013,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, EnsureLocalCapacity__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_ENSURELOCALCAPACITY_RETURN(
-                                         ret);
+  HOTSPOT_JNI_ENSURELOCALCAPACITY_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -1058,8 +1024,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, GetObjectRefType__entry, env, obj);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETOBJECTREFTYPE_ENTRY(
-                                     env, obj);
+  HOTSPOT_JNI_GETOBJECTREFTYPE_ENTRY(env, obj);
 #endif /* USDT2 */
   jobjectRefType ret;
   if (JNIHandles::is_local_handle(thread, obj) ||
@@ -1074,8 +1039,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetObjectRefType__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETOBJECTREFTYPE_RETURN(
-                                      (void *) ret);
+  HOTSPOT_JNI_GETOBJECTREFTYPE_RETURN((void *) ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -1356,9 +1320,13 @@
       // interface call
       KlassHandle h_holder(THREAD, holder);
 
-      int itbl_index = m->itable_index();
-      Klass* k = h_recv->klass();
-      selected_method = InstanceKlass::cast(k)->method_at_itable(h_holder(), itbl_index, CHECK);
+      if (call_type == JNI_VIRTUAL) {
+        int itbl_index = m->itable_index();
+        Klass* k = h_recv->klass();
+        selected_method = InstanceKlass::cast(k)->method_at_itable(h_holder(), itbl_index, CHECK);
+      } else {
+        selected_method = m;
+      }
     }
   }
 
@@ -1391,6 +1359,10 @@
 
 static instanceOop alloc_object(jclass clazz, TRAPS) {
   KlassHandle k(THREAD, java_lang_Class::as_Klass(JNIHandles::resolve_non_null(clazz)));
+  if (k == NULL) {
+    ResourceMark rm(THREAD);
+    THROW_(vmSymbols::java_lang_InstantiationException(), NULL);
+  }
   k()->check_valid_for_instantiation(false, CHECK_NULL);
   InstanceKlass::cast(k())->initialize(CHECK_NULL);
   instanceOop ih = InstanceKlass::cast(k())->allocate_instance(THREAD);
@@ -1410,8 +1382,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, AllocObject__entry, env, clazz);
 #else /* USDT2 */
-  HOTSPOT_JNI_ALLOCOBJECT_ENTRY(
-                                env, clazz);
+  HOTSPOT_JNI_ALLOCOBJECT_ENTRY(env, clazz);
 #endif /* USDT2 */
   jobject ret = NULL;
   DT_RETURN_MARK(AllocObject, jobject, (const jobject&)ret);
@@ -1433,8 +1404,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, NewObjectA__entry, env, clazz, methodID);
 #else /* USDT2 */
-  HOTSPOT_JNI_NEWOBJECTA_ENTRY(
-                               env, clazz, (uintptr_t) methodID);
+  HOTSPOT_JNI_NEWOBJECTA_ENTRY(env, clazz, (uintptr_t) methodID);
 #endif /* USDT2 */
   jobject obj = NULL;
   DT_RETURN_MARK(NewObjectA, jobject, (const jobject)obj);
@@ -1459,8 +1429,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, NewObjectV__entry, env, clazz, methodID);
 #else /* USDT2 */
-  HOTSPOT_JNI_NEWOBJECTV_ENTRY(
-                               env, clazz, (uintptr_t) methodID);
+  HOTSPOT_JNI_NEWOBJECTV_ENTRY(env, clazz, (uintptr_t) methodID);
 #endif /* USDT2 */
   jobject obj = NULL;
   DT_RETURN_MARK(NewObjectV, jobject, (const jobject&)obj);
@@ -1485,8 +1454,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, NewObject__entry, env, clazz, methodID);
 #else /* USDT2 */
-  HOTSPOT_JNI_NEWOBJECT_ENTRY(
-                              env, clazz, (uintptr_t) methodID);
+  HOTSPOT_JNI_NEWOBJECT_ENTRY(env, clazz, (uintptr_t) methodID);
 #endif /* USDT2 */
   jobject obj = NULL;
   DT_RETURN_MARK(NewObject, jobject, (const jobject&)obj);
@@ -1508,8 +1476,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, GetObjectClass__entry, env, obj);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETOBJECTCLASS_ENTRY(
-                                   env, obj);
+  HOTSPOT_JNI_GETOBJECTCLASS_ENTRY(env, obj);
 #endif /* USDT2 */
   Klass* k = JNIHandles::resolve_non_null(obj)->klass();
   jclass ret =
@@ -1517,8 +1484,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetObjectClass__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETOBJECTCLASS_RETURN(
-                                    ret);
+  HOTSPOT_JNI_GETOBJECTCLASS_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -1528,8 +1494,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, IsInstanceOf__entry, env, obj, clazz);
 #else /* USDT2 */
-  HOTSPOT_JNI_ISINSTANCEOF_ENTRY(
-                                 env, obj, clazz);
+  HOTSPOT_JNI_ISINSTANCEOF_ENTRY(env, obj, clazz);
 #endif /* USDT2 */
   jboolean ret = JNI_TRUE;
   if (obj != NULL) {
@@ -1543,8 +1508,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, IsInstanceOf__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_ISINSTANCEOF_RETURN(
-                                  ret);
+  HOTSPOT_JNI_ISINSTANCEOF_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -1608,15 +1572,13 @@
 #ifndef USDT2
   DTRACE_PROBE4(hotspot_jni, GetMethodID__entry, env, clazz, name, sig);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETMETHODID_ENTRY(
-                                env, clazz, (char *) name, (char *) sig);
+  HOTSPOT_JNI_GETMETHODID_ENTRY(env, clazz, (char *) name, (char *) sig);
 #endif /* USDT2 */
   jmethodID ret = get_method_id(env, clazz, name, sig, false, thread);
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetMethodID__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETMETHODID_RETURN(
-                                 (uintptr_t) ret);
+  HOTSPOT_JNI_GETMETHODID_RETURN((uintptr_t) ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -1628,15 +1590,13 @@
 #ifndef USDT2
   DTRACE_PROBE4(hotspot_jni, GetStaticMethodID__entry, env, clazz, name, sig);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETSTATICMETHODID_ENTRY(
-                                      env, (char *) clazz, (char *) name, (char *)sig);
+  HOTSPOT_JNI_GETSTATICMETHODID_ENTRY(env, (char *) clazz, (char *) name, (char *)sig);
 #endif /* USDT2 */
   jmethodID ret = get_method_id(env, clazz, name, sig, true, thread);
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetStaticMethodID__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETSTATICMETHODID_RETURN(
-                                       (uintptr_t) ret);
+  HOTSPOT_JNI_GETSTATICMETHODID_RETURN((uintptr_t) ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -1896,8 +1856,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, CallVoidMethod__entry, env, obj, methodID);
 #else /* USDT2 */
-  HOTSPOT_JNI_CALLVOIDMETHOD_ENTRY(
-                                   env, obj, (uintptr_t) methodID);
+  HOTSPOT_JNI_CALLVOIDMETHOD_ENTRY(env, obj, (uintptr_t) methodID);
 #endif /* USDT2 */
   DT_VOID_RETURN_MARK(CallVoidMethod);
 
@@ -1915,8 +1874,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, CallVoidMethodV__entry, env, obj, methodID);
 #else /* USDT2 */
-  HOTSPOT_JNI_CALLVOIDMETHODV_ENTRY(
-                                    env, obj, (uintptr_t) methodID);
+  HOTSPOT_JNI_CALLVOIDMETHODV_ENTRY(env, obj, (uintptr_t) methodID);
 #endif /* USDT2 */
   DT_VOID_RETURN_MARK(CallVoidMethodV);
 
@@ -1931,8 +1889,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, CallVoidMethodA__entry, env, obj, methodID);
 #else /* USDT2 */
-  HOTSPOT_JNI_CALLVOIDMETHODA_ENTRY(
-                                    env, obj, (uintptr_t) methodID);
+  HOTSPOT_JNI_CALLVOIDMETHODA_ENTRY(env, obj, (uintptr_t) methodID);
 #endif /* USDT2 */
   DT_VOID_RETURN_MARK(CallVoidMethodA);
 
@@ -2194,8 +2151,7 @@
   DTRACE_PROBE4(hotspot_jni, CallNonvirtualVoidMethod__entry,
                env, obj, cls, methodID);
 #else /* USDT2 */
-  HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHOD_ENTRY(
-               env, obj, cls, (uintptr_t) methodID);
+  HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHOD_ENTRY(env, obj, cls, (uintptr_t) methodID);
 #endif /* USDT2 */
   DT_VOID_RETURN_MARK(CallNonvirtualVoidMethod);
 
@@ -2496,8 +2452,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, CallStaticVoidMethod__entry, env, cls, methodID);
 #else /* USDT2 */
-  HOTSPOT_JNI_CALLSTATICVOIDMETHOD_ENTRY(
-                                         env, cls, (uintptr_t) methodID);
+  HOTSPOT_JNI_CALLSTATICVOIDMETHOD_ENTRY(env, cls, (uintptr_t) methodID);
 #endif /* USDT2 */
   DT_VOID_RETURN_MARK(CallStaticVoidMethod);
 
@@ -2515,8 +2470,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, CallStaticVoidMethodV__entry, env, cls, methodID);
 #else /* USDT2 */
-  HOTSPOT_JNI_CALLSTATICVOIDMETHODV_ENTRY(
-                                          env, cls, (uintptr_t) methodID);
+  HOTSPOT_JNI_CALLSTATICVOIDMETHODV_ENTRY(env, cls, (uintptr_t) methodID);
 #endif /* USDT2 */
   DT_VOID_RETURN_MARK(CallStaticVoidMethodV);
 
@@ -2531,8 +2485,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, CallStaticVoidMethodA__entry, env, cls, methodID);
 #else /* USDT2 */
-  HOTSPOT_JNI_CALLSTATICVOIDMETHODA_ENTRY(
-                                          env, cls, (uintptr_t) methodID);
+  HOTSPOT_JNI_CALLSTATICVOIDMETHODA_ENTRY(env, cls, (uintptr_t) methodID);
 #endif /* USDT2 */
   DT_VOID_RETURN_MARK(CallStaticVoidMethodA);
 
@@ -2560,8 +2513,7 @@
 #ifndef USDT2
   DTRACE_PROBE4(hotspot_jni, GetFieldID__entry, env, clazz, name, sig);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETFIELDID_ENTRY(
-                               env, clazz, (char *) name, (char *) sig);
+  HOTSPOT_JNI_GETFIELDID_ENTRY(env, clazz, (char *) name, (char *) sig);
 #endif /* USDT2 */
   jfieldID ret = 0;
   DT_RETURN_MARK(GetFieldID, jfieldID, (const jfieldID&)ret);
@@ -2597,8 +2549,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, GetObjectField__entry, env, obj, fieldID);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETOBJECTFIELD_ENTRY(
-                                   env, obj, (uintptr_t) fieldID);
+  HOTSPOT_JNI_GETOBJECTFIELD_ENTRY(env, obj, (uintptr_t) fieldID);
 #endif /* USDT2 */
   oop o = JNIHandles::resolve_non_null(obj);
   Klass* k = o->klass();
@@ -2632,8 +2583,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetObjectField__return, ret);
 #else /* USDT2 */
-HOTSPOT_JNI_GETOBJECTFIELD_RETURN(
-                                  ret);
+HOTSPOT_JNI_GETOBJECTFIELD_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -2758,8 +2708,7 @@
 #ifndef USDT2
   DTRACE_PROBE4(hotspot_jni, SetObjectField__entry, env, obj, fieldID, value);
 #else /* USDT2 */
-  HOTSPOT_JNI_SETOBJECTFIELD_ENTRY(
-                                   env, obj, (uintptr_t) fieldID, value);
+  HOTSPOT_JNI_SETOBJECTFIELD_ENTRY(env, obj, (uintptr_t) fieldID, value);
 #endif /* USDT2 */
   oop o = JNIHandles::resolve_non_null(obj);
   Klass* k = o->klass();
@@ -2776,8 +2725,7 @@
 #ifndef USDT2
   DTRACE_PROBE(hotspot_jni, SetObjectField__return);
 #else /* USDT2 */
-  HOTSPOT_JNI_SETOBJECTFIELD_RETURN(
-);
+  HOTSPOT_JNI_SETOBJECTFIELD_RETURN();
 #endif /* USDT2 */
 JNI_END
 
@@ -2880,8 +2828,7 @@
   DTRACE_PROBE4(hotspot_jni, ToReflectedField__entry,
                 env, cls, fieldID, isStatic);
 #else /* USDT2 */
-  HOTSPOT_JNI_TOREFLECTEDFIELD_ENTRY(
-                env, cls, (uintptr_t) fieldID, isStatic);
+  HOTSPOT_JNI_TOREFLECTEDFIELD_ENTRY(env, cls, (uintptr_t) fieldID, isStatic);
 #endif /* USDT2 */
   jobject ret = NULL;
   DT_RETURN_MARK(ToReflectedField, jobject, (const jobject&)ret);
@@ -2925,8 +2872,7 @@
 #ifndef USDT2
   DTRACE_PROBE4(hotspot_jni, GetStaticFieldID__entry, env, clazz, name, sig);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETSTATICFIELDID_ENTRY(
-                                     env, clazz, (char *) name, (char *) sig);
+  HOTSPOT_JNI_GETSTATICFIELDID_ENTRY(env, clazz, (char *) name, (char *) sig);
 #endif /* USDT2 */
   jfieldID ret = NULL;
   DT_RETURN_MARK(GetStaticFieldID, jfieldID, (const jfieldID&)ret);
@@ -2966,8 +2912,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, GetStaticObjectField__entry, env, clazz, fieldID);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETSTATICOBJECTFIELD_ENTRY(
-                                         env, clazz, (uintptr_t) fieldID);
+  HOTSPOT_JNI_GETSTATICOBJECTFIELD_ENTRY(env, clazz, (uintptr_t) fieldID);
 #endif /* USDT2 */
 #if INCLUDE_JNI_CHECK
   DEBUG_ONLY(Klass* param_k = jniCheck::validate_class(thread, clazz);)
@@ -2983,8 +2928,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetStaticObjectField__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETSTATICOBJECTFIELD_RETURN(
-                                          ret);
+  HOTSPOT_JNI_GETSTATICOBJECTFIELD_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -3069,8 +3013,7 @@
 #ifndef USDT2
   DTRACE_PROBE4(hotspot_jni, SetStaticObjectField__entry, env, clazz, fieldID, value);
 #else /* USDT2 */
- HOTSPOT_JNI_SETSTATICOBJECTFIELD_ENTRY(
-                                        env, clazz, (uintptr_t) fieldID, value);
+ HOTSPOT_JNI_SETSTATICOBJECTFIELD_ENTRY(env, clazz, (uintptr_t) fieldID, value);
 #endif /* USDT2 */
   JNIid* id = jfieldIDWorkaround::from_static_jfieldID(fieldID);
   assert(id->is_static_field_id(), "invalid static field id");
@@ -3085,8 +3028,7 @@
 #ifndef USDT2
   DTRACE_PROBE(hotspot_jni, SetStaticObjectField__return);
 #else /* USDT2 */
-  HOTSPOT_JNI_SETSTATICOBJECTFIELD_RETURN(
-                                          );
+  HOTSPOT_JNI_SETSTATICOBJECTFIELD_RETURN();
 #endif /* USDT2 */
 JNI_END
 
@@ -3189,8 +3131,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, NewString__entry, env, unicodeChars, len);
 #else /* USDT2 */
- HOTSPOT_JNI_NEWSTRING_ENTRY(
-                             env, (uint16_t *) unicodeChars, len);
+ HOTSPOT_JNI_NEWSTRING_ENTRY(env, (uint16_t *) unicodeChars, len);
 #endif /* USDT2 */
   jstring ret = NULL;
   DT_RETURN_MARK(NewString, jstring, (const jstring&)ret);
@@ -3205,8 +3146,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, GetStringLength__entry, env, string);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETSTRINGLENGTH_ENTRY(
-                                    env, string);
+  HOTSPOT_JNI_GETSTRINGLENGTH_ENTRY(env, string);
 #endif /* USDT2 */
   jsize ret = 0;
   oop s = JNIHandles::resolve_non_null(string);
@@ -3216,8 +3156,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetStringLength__return, ret);
 #else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGLENGTH_RETURN(
-                                    ret);
+ HOTSPOT_JNI_GETSTRINGLENGTH_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -3229,8 +3168,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, GetStringChars__entry, env, string, isCopy);
 #else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGCHARS_ENTRY(
-                                  env, string, (uintptr_t *) isCopy);
+ HOTSPOT_JNI_GETSTRINGCHARS_ENTRY(env, string, (uintptr_t *) isCopy);
 #endif /* USDT2 */
   jchar* buf = NULL;
   oop s = JNIHandles::resolve_non_null(string);
@@ -3254,8 +3192,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetStringChars__return, buf);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETSTRINGCHARS_RETURN(
-                                    buf);
+  HOTSPOT_JNI_GETSTRINGCHARS_RETURN(buf);
 #endif /* USDT2 */
   return buf;
 JNI_END
@@ -3266,8 +3203,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, ReleaseStringChars__entry, env, str, chars);
 #else /* USDT2 */
-  HOTSPOT_JNI_RELEASESTRINGCHARS_ENTRY(
-                                       env, str, (uint16_t *) chars);
+  HOTSPOT_JNI_RELEASESTRINGCHARS_ENTRY(env, str, (uint16_t *) chars);
 #endif /* USDT2 */
   //%note jni_6
   if (chars != NULL) {
@@ -3278,8 +3214,7 @@
 #ifndef USDT2
   DTRACE_PROBE(hotspot_jni, ReleaseStringChars__return);
 #else /* USDT2 */
-  HOTSPOT_JNI_RELEASESTRINGCHARS_RETURN(
-);
+  HOTSPOT_JNI_RELEASESTRINGCHARS_RETURN();
 #endif /* USDT2 */
 JNI_END
 
@@ -3298,8 +3233,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, NewStringUTF__entry, env, bytes);
 #else /* USDT2 */
-  HOTSPOT_JNI_NEWSTRINGUTF_ENTRY(
-                                 env, (char *) bytes);
+  HOTSPOT_JNI_NEWSTRINGUTF_ENTRY(env, (char *) bytes);
 #endif /* USDT2 */
   jstring ret;
   DT_RETURN_MARK(NewStringUTF, jstring, (const jstring&)ret);
@@ -3315,8 +3249,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, GetStringUTFLength__entry, env, string);
 #else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGUTFLENGTH_ENTRY(
-                                      env, string);
+ HOTSPOT_JNI_GETSTRINGUTFLENGTH_ENTRY(env, string);
 #endif /* USDT2 */
   jsize ret = 0;
   oop java_string = JNIHandles::resolve_non_null(string);
@@ -3326,8 +3259,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetStringUTFLength__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETSTRINGUTFLENGTH_RETURN(
-                                        ret);
+  HOTSPOT_JNI_GETSTRINGUTFLENGTH_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -3338,8 +3270,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, GetStringUTFChars__entry, env, string, isCopy);
 #else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGUTFCHARS_ENTRY(
-                                     env, string, (uintptr_t *) isCopy);
+ HOTSPOT_JNI_GETSTRINGUTFCHARS_ENTRY(env, string, (uintptr_t *) isCopy);
 #endif /* USDT2 */
   char* result = NULL;
   oop java_string = JNIHandles::resolve_non_null(string);
@@ -3357,8 +3288,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetStringUTFChars__return, result);
 #else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGUTFCHARS_RETURN(
-                                      result);
+ HOTSPOT_JNI_GETSTRINGUTFCHARS_RETURN(result);
 #endif /* USDT2 */
   return result;
 JNI_END
@@ -3369,8 +3299,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, ReleaseStringUTFChars__entry, env, str, chars);
 #else /* USDT2 */
- HOTSPOT_JNI_RELEASESTRINGUTFCHARS_ENTRY(
-                                         env, str, (char *) chars);
+ HOTSPOT_JNI_RELEASESTRINGUTFCHARS_ENTRY(env, str, (char *) chars);
 #endif /* USDT2 */
   if (chars != NULL) {
     FreeHeap((char*) chars);
@@ -3378,8 +3307,7 @@
 #ifndef USDT2
   DTRACE_PROBE(hotspot_jni, ReleaseStringUTFChars__return);
 #else /* USDT2 */
-HOTSPOT_JNI_RELEASESTRINGUTFCHARS_RETURN(
-);
+HOTSPOT_JNI_RELEASESTRINGUTFCHARS_RETURN();
 #endif /* USDT2 */
 JNI_END
 
@@ -3389,8 +3317,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, GetArrayLength__entry, env, array);
 #else /* USDT2 */
- HOTSPOT_JNI_GETARRAYLENGTH_ENTRY(
-                                  env, array);
+ HOTSPOT_JNI_GETARRAYLENGTH_ENTRY(env, array);
 #endif /* USDT2 */
   arrayOop a = arrayOop(JNIHandles::resolve_non_null(array));
   assert(a->is_array(), "must be array");
@@ -3398,8 +3325,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetArrayLength__return, ret);
 #else /* USDT2 */
- HOTSPOT_JNI_GETARRAYLENGTH_RETURN(
-                                   ret);
+ HOTSPOT_JNI_GETARRAYLENGTH_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -3421,8 +3347,7 @@
 #ifndef USDT2
   DTRACE_PROBE4(hotspot_jni, NewObjectArray__entry, env, length, elementClass, initialElement);
 #else /* USDT2 */
- HOTSPOT_JNI_NEWOBJECTARRAY_ENTRY(
-                                  env, length, elementClass, initialElement);
+ HOTSPOT_JNI_NEWOBJECTARRAY_ENTRY(env, length, elementClass, initialElement);
 #endif /* USDT2 */
   jobjectArray ret = NULL;
   DT_RETURN_MARK(NewObjectArray, jobjectArray, (const jobjectArray&)ret);
@@ -3453,8 +3378,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, GetObjectArrayElement__entry, env, array, index);
 #else /* USDT2 */
- HOTSPOT_JNI_GETOBJECTARRAYELEMENT_ENTRY(
-                                         env, array, index);
+ HOTSPOT_JNI_GETOBJECTARRAYELEMENT_ENTRY(env, array, index);
 #endif /* USDT2 */
   jobject ret = NULL;
   DT_RETURN_MARK(GetObjectArrayElement, jobject, (const jobject&)ret);
@@ -3481,8 +3405,7 @@
 #ifndef USDT2
   DTRACE_PROBE4(hotspot_jni, SetObjectArrayElement__entry, env, array, index, value);
 #else /* USDT2 */
- HOTSPOT_JNI_SETOBJECTARRAYELEMENT_ENTRY(
-                                         env, array, index, value);
+ HOTSPOT_JNI_SETOBJECTARRAYELEMENT_ENTRY(env, array, index, value);
 #endif /* USDT2 */
   DT_VOID_RETURN_MARK(SetObjectArrayElement);
 
@@ -4034,8 +3957,7 @@
 #ifndef USDT2
   DTRACE_PROBE4(hotspot_jni, RegisterNatives__entry, env, clazz, methods, nMethods);
 #else /* USDT2 */
-  HOTSPOT_JNI_REGISTERNATIVES_ENTRY(
-                                    env, clazz, (void *) methods, nMethods);
+  HOTSPOT_JNI_REGISTERNATIVES_ENTRY(env, clazz, (void *) methods, nMethods);
 #endif /* USDT2 */
   jint ret = 0;
   DT_RETURN_MARK(RegisterNatives, jint, (const jint&)ret);
@@ -4077,8 +3999,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, UnregisterNatives__entry, env, clazz);
 #else /* USDT2 */
- HOTSPOT_JNI_UNREGISTERNATIVES_ENTRY(
-                                     env, clazz);
+ HOTSPOT_JNI_UNREGISTERNATIVES_ENTRY(env, clazz);
 #endif /* USDT2 */
   Klass* k   = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(clazz));
   //%note jni_2
@@ -4094,8 +4015,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, UnregisterNatives__return, 0);
 #else /* USDT2 */
- HOTSPOT_JNI_UNREGISTERNATIVES_RETURN(
-                                      0);
+ HOTSPOT_JNI_UNREGISTERNATIVES_RETURN(0);
 #endif /* USDT2 */
   return 0;
 JNI_END
@@ -4115,8 +4035,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, MonitorEnter__entry, env, jobj);
 #else /* USDT2 */
- HOTSPOT_JNI_MONITORENTER_ENTRY(
-                                env, jobj);
+ HOTSPOT_JNI_MONITORENTER_ENTRY(env, jobj);
 #endif /* USDT2 */
   jint ret = JNI_ERR;
   DT_RETURN_MARK(MonitorEnter, jint, (const jint&)ret);
@@ -4143,8 +4062,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, MonitorExit__entry, env, jobj);
 #else /* USDT2 */
- HOTSPOT_JNI_MONITOREXIT_ENTRY(
-                               env, jobj);
+ HOTSPOT_JNI_MONITOREXIT_ENTRY(env, jobj);
 #endif /* USDT2 */
   jint ret = JNI_ERR;
   DT_RETURN_MARK(MonitorExit, jint, (const jint&)ret);
@@ -4177,8 +4095,7 @@
 #ifndef USDT2
   DTRACE_PROBE5(hotspot_jni, GetStringRegion__entry, env, string, start, len, buf);
 #else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGREGION_ENTRY(
-                                   env, string, start, len, buf);
+ HOTSPOT_JNI_GETSTRINGREGION_ENTRY(env, string, start, len, buf);
 #endif /* USDT2 */
   DT_VOID_RETURN_MARK(GetStringRegion);
   oop s = JNIHandles::resolve_non_null(string);
@@ -4206,8 +4123,7 @@
 #ifndef USDT2
   DTRACE_PROBE5(hotspot_jni, GetStringUTFRegion__entry, env, string, start, len, buf);
 #else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGUTFREGION_ENTRY(
-                                      env, string, start, len, buf);
+ HOTSPOT_JNI_GETSTRINGUTFREGION_ENTRY(env, string, start, len, buf);
 #endif /* USDT2 */
   DT_VOID_RETURN_MARK(GetStringUTFRegion);
   oop s = JNIHandles::resolve_non_null(string);
@@ -4237,8 +4153,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, GetPrimitiveArrayCritical__entry, env, array, isCopy);
 #else /* USDT2 */
- HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_ENTRY(
-                                             env, array, (uintptr_t *) isCopy);
+ HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_ENTRY(env, array, (uintptr_t *) isCopy);
 #endif /* USDT2 */
   GC_locker::lock_critical(thread);
   if (isCopy != NULL) {
@@ -4256,8 +4171,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetPrimitiveArrayCritical__return, ret);
 #else /* USDT2 */
- HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_RETURN(
-                                              ret);
+ HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -4268,16 +4182,14 @@
 #ifndef USDT2
   DTRACE_PROBE4(hotspot_jni, ReleasePrimitiveArrayCritical__entry, env, array, carray, mode);
 #else /* USDT2 */
-  HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY(
-                                                  env, array, carray, mode);
+  HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY(env, array, carray, mode);
 #endif /* USDT2 */
   // The array, carray and mode arguments are ignored
   GC_locker::unlock_critical(thread);
 #ifndef USDT2
   DTRACE_PROBE(hotspot_jni, ReleasePrimitiveArrayCritical__return);
 #else /* USDT2 */
-HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN(
-);
+HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN();
 #endif /* USDT2 */
 JNI_END
 
@@ -4287,8 +4199,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, GetStringCritical__entry, env, string, isCopy);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY(
-                                      env, string, (uintptr_t *) isCopy);
+  HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY(env, string, (uintptr_t *) isCopy);
 #endif /* USDT2 */
   GC_locker::lock_critical(thread);
   if (isCopy != NULL) {
@@ -4307,8 +4218,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetStringCritical__return, ret);
 #else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGCRITICAL_RETURN(
-                                      (uint16_t *) ret);
+ HOTSPOT_JNI_GETSTRINGCRITICAL_RETURN((uint16_t *) ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -4319,16 +4229,14 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, ReleaseStringCritical__entry, env, str, chars);
 #else /* USDT2 */
-  HOTSPOT_JNI_RELEASESTRINGCRITICAL_ENTRY(
-                                          env, str, (uint16_t *) chars);
+  HOTSPOT_JNI_RELEASESTRINGCRITICAL_ENTRY(env, str, (uint16_t *) chars);
 #endif /* USDT2 */
   // The str and chars arguments are ignored
   GC_locker::unlock_critical(thread);
 #ifndef USDT2
   DTRACE_PROBE(hotspot_jni, ReleaseStringCritical__return);
 #else /* USDT2 */
-HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN(
-);
+HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN();
 #endif /* USDT2 */
 JNI_END
 
@@ -4338,16 +4246,14 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, NewWeakGlobalRef__entry, env, ref);
 #else /* USDT2 */
- HOTSPOT_JNI_NEWWEAKGLOBALREF_ENTRY(
-                                    env, ref);
+ HOTSPOT_JNI_NEWWEAKGLOBALREF_ENTRY(env, ref);
 #endif /* USDT2 */
   Handle ref_handle(thread, JNIHandles::resolve(ref));
   jweak ret = JNIHandles::make_weak_global(ref_handle);
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, NewWeakGlobalRef__return, ret);
 #else /* USDT2 */
- HOTSPOT_JNI_NEWWEAKGLOBALREF_RETURN(
-                                     ret);
+ HOTSPOT_JNI_NEWWEAKGLOBALREF_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -4358,15 +4264,13 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, DeleteWeakGlobalRef__entry, env, ref);
 #else /* USDT2 */
-  HOTSPOT_JNI_DELETEWEAKGLOBALREF_ENTRY(
-                                        env, ref);
+  HOTSPOT_JNI_DELETEWEAKGLOBALREF_ENTRY(env, ref);
 #endif /* USDT2 */
   JNIHandles::destroy_weak_global(ref);
 #ifndef USDT2
   DTRACE_PROBE(hotspot_jni, DeleteWeakGlobalRef__return);
 #else /* USDT2 */
-  HOTSPOT_JNI_DELETEWEAKGLOBALREF_RETURN(
-                                         );
+  HOTSPOT_JNI_DELETEWEAKGLOBALREF_RETURN();
 #endif /* USDT2 */
 JNI_END
 
@@ -4376,16 +4280,14 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, ExceptionCheck__entry, env);
 #else /* USDT2 */
- HOTSPOT_JNI_EXCEPTIONCHECK_ENTRY(
-                                  env);
+ HOTSPOT_JNI_EXCEPTIONCHECK_ENTRY(env);
 #endif /* USDT2 */
   jni_check_async_exceptions(thread);
   jboolean ret = (thread->has_pending_exception()) ? JNI_TRUE : JNI_FALSE;
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, ExceptionCheck__return, ret);
 #else /* USDT2 */
- HOTSPOT_JNI_EXCEPTIONCHECK_RETURN(
-                                   ret);
+ HOTSPOT_JNI_EXCEPTIONCHECK_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 JNI_END
@@ -4481,8 +4383,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, NewDirectByteBuffer__entry, env, address, capacity);
 #else /* USDT2 */
- HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_ENTRY(
-                                       env, address, capacity);
+ HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_ENTRY(env, address, capacity);
 #endif /* USDT2 */
 
   if (!directBufferSupportInitializeEnded) {
@@ -4490,8 +4391,7 @@
 #ifndef USDT2
       DTRACE_PROBE1(hotspot_jni, NewDirectByteBuffer__return, NULL);
 #else /* USDT2 */
-      HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN(
-                                             NULL);
+      HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN(NULL);
 #endif /* USDT2 */
       return NULL;
     }
@@ -4506,8 +4406,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, NewDirectByteBuffer__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN(
-                                         ret);
+  HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 }
@@ -4528,8 +4427,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, GetDirectBufferAddress__entry, env, buf);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETDIRECTBUFFERADDRESS_ENTRY(
-                                           env, buf);
+  HOTSPOT_JNI_GETDIRECTBUFFERADDRESS_ENTRY(env, buf);
 #endif /* USDT2 */
   void* ret = NULL;
   DT_RETURN_MARK(GetDirectBufferAddress, void*, (const void*&)ret);
@@ -4564,8 +4462,7 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, GetDirectBufferCapacity__entry, env, buf);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETDIRECTBUFFERCAPACITY_ENTRY(
-                                            env, buf);
+  HOTSPOT_JNI_GETDIRECTBUFFERCAPACITY_ENTRY(env, buf);
 #endif /* USDT2 */
   jlong ret = -1;
   DT_RETURN_MARK(GetDirectBufferCapacity, jlong, (const jlong&)ret);
@@ -4596,14 +4493,12 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetVersion__entry, env);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETVERSION_ENTRY(
-                               env);
+  HOTSPOT_JNI_GETVERSION_ENTRY(env);
 #endif /* USDT2 */
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetVersion__return, CurrentVersion);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETVERSION_RETURN(
-                                CurrentVersion);
+  HOTSPOT_JNI_GETVERSION_RETURN(CurrentVersion);
 #endif /* USDT2 */
   return CurrentVersion;
 JNI_END
@@ -4615,15 +4510,13 @@
 #ifndef USDT2
   DTRACE_PROBE2(hotspot_jni, GetJavaVM__entry, env, vm);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETJAVAVM_ENTRY(
-                              env, (void **) vm);
+  HOTSPOT_JNI_GETJAVAVM_ENTRY(env, (void **) vm);
 #endif /* USDT2 */
   *vm  = (JavaVM *)(&main_vm);
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetJavaVM__return, JNI_OK);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETJAVAVM_RETURN(
-                               JNI_OK);
+  HOTSPOT_JNI_GETJAVAVM_RETURN(JNI_OK);
 #endif /* USDT2 */
   return JNI_OK;
 JNI_END
@@ -5014,8 +4907,7 @@
 #ifndef USDT2
   HS_DTRACE_PROBE1(hotspot_jni, GetDefaultJavaVMInitArgs__entry, args_);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETDEFAULTJAVAVMINITARGS_ENTRY(
-                                             args_);
+  HOTSPOT_JNI_GETDEFAULTJAVAVMINITARGS_ENTRY(args_);
 #endif /* USDT2 */
   JDK1_1InitArgs *args = (JDK1_1InitArgs *)args_;
   jint ret = JNI_ERR;
@@ -5061,8 +4953,11 @@
 void TestMetaspaceAux_test();
 void TestMetachunk_test();
 void TestVirtualSpaceNode_test();
+void TestOldFreeSpaceCalculation_test();
+void TestNewSize_test();
 #if INCLUDE_ALL_GCS
 void TestG1BiasedArray_test();
+void TestBufferingOopClosure_test();
 #endif
 
 void execute_internal_vm_tests() {
@@ -5081,12 +4976,15 @@
     run_unit_test(QuickSort::test_quick_sort());
     run_unit_test(AltHashing::test_alt_hash());
     run_unit_test(test_loggc_filename());
+    run_unit_test(TestOldFreeSpaceCalculation_test());
+    run_unit_test(TestNewSize_test());
 #if INCLUDE_VM_STRUCTS
     run_unit_test(VMStructs::test());
 #endif
 #if INCLUDE_ALL_GCS
     run_unit_test(TestG1BiasedArray_test());
     run_unit_test(HeapRegionRemSet::test_prt());
+    run_unit_test(TestBufferingOopClosure_test());
 #endif
     tty->print_cr("All internal VM tests passed");
   }
@@ -5108,8 +5006,7 @@
 #ifndef USDT2
   HS_DTRACE_PROBE3(hotspot_jni, CreateJavaVM__entry, vm, penv, args);
 #else /* USDT2 */
-  HOTSPOT_JNI_CREATEJAVAVM_ENTRY(
-                                 (void **) vm, penv, args);
+  HOTSPOT_JNI_CREATEJAVAVM_ENTRY((void **) vm, penv, args);
 #endif /* USDT2 */
 
   jint result = JNI_ERR;
@@ -5166,6 +5063,7 @@
   result = Threads::create_vm((JavaVMInitArgs*) args, &can_try_again);
   if (result == JNI_OK) {
     JavaThread *thread = JavaThread::current();
+    assert(!thread->has_pending_exception(), "should have returned not OK");
     /* thread is thread_in_vm here */
     *vm = (JavaVM *)(&main_vm);
     *(JNIEnv**)penv = thread->jni_environment();
@@ -5202,6 +5100,19 @@
     // Since this is not a JVM_ENTRY we have to set the thread state manually before leaving.
     ThreadStateTransition::transition_and_fence(thread, _thread_in_vm, _thread_in_native);
   } else {
+    // If create_vm exits because of a pending exception, exit with that
+    // exception.  In the future when we figure out how to reclaim memory,
+    // we may be able to exit with JNI_ERR and allow the calling application
+    // to continue.
+    if (Universe::is_fully_initialized()) {
+      // otherwise no pending exception possible - VM will already have aborted
+      JavaThread* THREAD = JavaThread::current();
+      if (HAS_PENDING_EXCEPTION) {
+        HandleMark hm;
+        vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
+      }
+    }
+
     if (can_try_again) {
       // reset safe_to_recreate_vm to 1 so that retrial would be possible
       safe_to_recreate_vm = 1;
@@ -5231,8 +5142,7 @@
   HS_DTRACE_PROBE3(hotspot_jni, GetCreatedJavaVMs__entry, \
     vm_buf, bufLen, numVMs);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETCREATEDJAVAVMS_ENTRY(
-                                      (void **) vm_buf, bufLen, (uintptr_t *) numVMs);
+  HOTSPOT_JNI_GETCREATEDJAVAVMS_ENTRY((void **) vm_buf, bufLen, (uintptr_t *) numVMs);
 #endif /* USDT2 */
   if (vm_created) {
     if (numVMs != NULL) *numVMs = 1;
@@ -5243,8 +5153,7 @@
 #ifndef USDT2
   HS_DTRACE_PROBE1(hotspot_jni, GetCreatedJavaVMs__return, JNI_OK);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETCREATEDJAVAVMS_RETURN(
-                                    JNI_OK);
+  HOTSPOT_JNI_GETCREATEDJAVAVMS_RETURN(JNI_OK);
 #endif /* USDT2 */
   return JNI_OK;
 }
@@ -5262,8 +5171,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, DestroyJavaVM__entry, vm);
 #else /* USDT2 */
-  HOTSPOT_JNI_DESTROYJAVAVM_ENTRY(
-                                  vm);
+  HOTSPOT_JNI_DESTROYJAVAVM_ENTRY(vm);
 #endif /* USDT2 */
   jint res = JNI_ERR;
   DT_RETURN_MARK(DestroyJavaVM, jint, (const jint&)res);
@@ -5419,15 +5327,13 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, AttachCurrentThread__entry, vm, penv, _args);
 #else /* USDT2 */
-  HOTSPOT_JNI_ATTACHCURRENTTHREAD_ENTRY(
-                                        vm, penv, _args);
+  HOTSPOT_JNI_ATTACHCURRENTTHREAD_ENTRY(vm, penv, _args);
 #endif /* USDT2 */
   if (!vm_created) {
 #ifndef USDT2
     DTRACE_PROBE1(hotspot_jni, AttachCurrentThread__return, JNI_ERR);
 #else /* USDT2 */
-  HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN(
-                                         (uint32_t) JNI_ERR);
+  HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN((uint32_t) JNI_ERR);
 #endif /* USDT2 */
     return JNI_ERR;
   }
@@ -5437,8 +5343,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, AttachCurrentThread__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN(
-                                         ret);
+  HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 }
@@ -5448,8 +5353,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, DetachCurrentThread__entry, vm);
 #else /* USDT2 */
-  HOTSPOT_JNI_DETACHCURRENTTHREAD_ENTRY(
-                                        vm);
+  HOTSPOT_JNI_DETACHCURRENTTHREAD_ENTRY(vm);
 #endif /* USDT2 */
   VM_Exit::block_if_vm_exited();
 
@@ -5460,8 +5364,7 @@
 #ifndef USDT2
     DTRACE_PROBE1(hotspot_jni, DetachCurrentThread__return, JNI_OK);
 #else /* USDT2 */
-  HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(
-                                         JNI_OK);
+  HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(JNI_OK);
 #endif /* USDT2 */
     return JNI_OK;
   }
@@ -5471,8 +5374,7 @@
 #ifndef USDT2
     DTRACE_PROBE1(hotspot_jni, DetachCurrentThread__return, JNI_ERR);
 #else /* USDT2 */
-  HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(
-                                         (uint32_t) JNI_ERR);
+  HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN((uint32_t) JNI_ERR);
 #endif /* USDT2 */
     // Can't detach a thread that's running java, that can't work.
     return JNI_ERR;
@@ -5497,8 +5399,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, DetachCurrentThread__return, JNI_OK);
 #else /* USDT2 */
-  HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(
-                                         JNI_OK);
+  HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(JNI_OK);
 #endif /* USDT2 */
   return JNI_OK;
 }
@@ -5514,8 +5415,7 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, GetEnv__entry, vm, penv, version);
 #else /* USDT2 */
-  HOTSPOT_JNI_GETENV_ENTRY(
-                           vm, penv, version);
+  HOTSPOT_JNI_GETENV_ENTRY(vm, penv, version);
 #endif /* USDT2 */
   jint ret = JNI_ERR;
   DT_RETURN_MARK(GetEnv, jint, (const jint&)ret);
@@ -5573,15 +5473,13 @@
 #ifndef USDT2
   DTRACE_PROBE3(hotspot_jni, AttachCurrentThreadAsDaemon__entry, vm, penv, _args);
 #else /* USDT2 */
-  HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_ENTRY(
-                                                vm, penv, _args);
+  HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_ENTRY(vm, penv, _args);
 #endif /* USDT2 */
   if (!vm_created) {
 #ifndef USDT2
     DTRACE_PROBE1(hotspot_jni, AttachCurrentThreadAsDaemon__return, JNI_ERR);
 #else /* USDT2 */
-  HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN(
-                                                 (uint32_t) JNI_ERR);
+  HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN((uint32_t) JNI_ERR);
 #endif /* USDT2 */
     return JNI_ERR;
   }
@@ -5591,8 +5489,7 @@
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, AttachCurrentThreadAsDaemon__return, ret);
 #else /* USDT2 */
-  HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN(
-                                                 ret);
+  HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN(ret);
 #endif /* USDT2 */
   return ret;
 }
--- a/src/share/vm/prims/jvm.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/prims/jvm.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -521,6 +521,12 @@
   JavaThreadInObjectWaitState jtiows(thread, ms != 0);
   if (JvmtiExport::should_post_monitor_wait()) {
     JvmtiExport::post_monitor_wait((JavaThread *)THREAD, (oop)obj(), ms);
+
+    // The current thread already owns the monitor and it has not yet
+    // been added to the wait queue so the current thread cannot be
+    // made the successor. This means that the JVMTI_EVENT_MONITOR_WAIT
+    // event handler cannot accidentally consume an unpark() meant for
+    // the ParkEvent associated with this ObjectMonitor.
   }
   ObjectSynchronizer::wait(obj, ms, CHECK);
 JVM_END
@@ -2878,10 +2884,10 @@
     if (JvmtiExport::should_post_resource_exhausted()) {
       JvmtiExport::post_resource_exhausted(
         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_THREADS,
-        "unable to create new native thread");
+        os::native_thread_creation_failed_msg());
     }
     THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(),
-              "unable to create new native thread");
+              os::native_thread_creation_failed_msg());
   }
 
   Thread::start(native_thread);
--- a/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -26,6 +26,7 @@
 #include "code/codeBlob.hpp"
 #include "code/codeCache.hpp"
 #include "code/scopeDesc.hpp"
+#include "code/vtableStubs.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiCodeBlobEvents.hpp"
@@ -63,6 +64,7 @@
   // used during a collection
   static GrowableArray<JvmtiCodeBlobDesc*>* _global_code_blobs;
   static void do_blob(CodeBlob* cb);
+  static void do_vtable_stub(VtableStub* vs);
  public:
   CodeBlobCollector() {
     _code_blobs = NULL;
@@ -119,6 +121,10 @@
   if (cb->is_nmethod()) {
     return;
   }
+  // exclude VtableStubs, which are processed separately
+  if (cb->is_buffer_blob() && strcmp(cb->name(), "vtable chunks") == 0) {
+    return;
+  }
 
   // check if this starting address has been seen already - the
   // assumption is that stubs are inserted into the list before the
@@ -136,6 +142,13 @@
   _global_code_blobs->append(scb);
 }
 
+// called for each VtableStub in VtableStubs
+
+void CodeBlobCollector::do_vtable_stub(VtableStub* vs) {
+    JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(vs->is_vtable_stub() ? "vtable stub" : "itable stub",
+                                                   vs->code_begin(), vs->code_end());
+    _global_code_blobs->append(scb);
+}
 
 // collects a list of CodeBlobs in the CodeCache.
 //
@@ -166,6 +179,10 @@
     _global_code_blobs->append(new JvmtiCodeBlobDesc(desc->name(), desc->begin(), desc->end()));
   }
 
+  // Vtable stubs are not described with StubCodeDesc,
+  // process them separately
+  VtableStubs::vtable_stub_do(do_vtable_stub);
+
   // next iterate over all the non-nmethod code blobs and add them to
   // the list - as noted above this will filter out duplicates and
   // enclosing blobs.
--- a/src/share/vm/prims/jvmtiEnv.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/prims/jvmtiEnv.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1360,8 +1360,10 @@
   if (state == NULL) {
     return JVMTI_ERROR_THREAD_NOT_ALIVE;
   }
-  uint32_t debug_bits = 0;
-  if (is_thread_fully_suspended(java_thread, true, &debug_bits)) {
+
+  // It is only safe to perform the direct operation on the current
+  // thread. All other usage needs to use a vm-safepoint-op for safety.
+  if (java_thread == JavaThread::current()) {
     err = get_frame_count(state, count_ptr);
   } else {
     // get java stack frame count at safepoint.
@@ -1476,9 +1478,10 @@
 jvmtiError
 JvmtiEnv::GetFrameLocation(JavaThread* java_thread, jint depth, jmethodID* method_ptr, jlocation* location_ptr) {
   jvmtiError err = JVMTI_ERROR_NONE;
-  uint32_t debug_bits = 0;
-
-  if (is_thread_fully_suspended(java_thread, true, &debug_bits)) {
+
+  // It is only safe to perform the direct operation on the current
+  // thread. All other usage needs to use a vm-safepoint-op for safety.
+  if (java_thread == JavaThread::current()) {
     err = get_frame_location(java_thread, depth, method_ptr, location_ptr);
   } else {
     // JVMTI get java stack frame location at safepoint.
--- a/src/share/vm/prims/jvmtiEnvBase.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/prims/jvmtiEnvBase.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -533,7 +533,11 @@
   VMOp_Type type() const { return VMOp_GetFrameCount; }
   jvmtiError result()    { return _result; }
   void doit() {
-    _result = ((JvmtiEnvBase*)_env)->get_frame_count(_state, _count_ptr);
+    _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+    JavaThread* jt = _state->get_thread();
+    if (Threads::includes(jt) && !jt->is_exiting() && jt->threadObj() != NULL) {
+      _result = ((JvmtiEnvBase*)_env)->get_frame_count(_state, _count_ptr);
+    }
   }
 };
 
@@ -559,8 +563,12 @@
   VMOp_Type type() const { return VMOp_GetFrameLocation; }
   jvmtiError result()    { return _result; }
   void doit() {
-    _result = ((JvmtiEnvBase*)_env)->get_frame_location(_java_thread, _depth,
-                                                        _method_ptr, _location_ptr);
+    _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+    if (Threads::includes(_java_thread) && !_java_thread->is_exiting() &&
+        _java_thread->threadObj() != NULL) {
+      _result = ((JvmtiEnvBase*)_env)->get_frame_location(_java_thread, _depth,
+                                                          _method_ptr, _location_ptr);
+    }
   }
 };
 
--- a/src/share/vm/prims/jvmtiEnvThreadState.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/prims/jvmtiEnvThreadState.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -272,7 +272,7 @@
     // There can be a race condition between a VM_Operation reaching a safepoint
     // and the target thread exiting from Java execution.
     // We must recheck the last Java frame still exists.
-    if (_thread->has_last_Java_frame()) {
+    if (!_thread->is_exiting() && _thread->has_last_Java_frame()) {
       javaVFrame* vf = _thread->last_java_vframe(&rm);
       assert(vf != NULL, "must have last java frame");
       Method* method = vf->method();
--- a/src/share/vm/prims/jvmtiTagMap.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/prims/jvmtiTagMap.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -2790,6 +2790,7 @@
   return true;
 }
 
+#ifdef ASSERT
 // verify that a static oop field is in range
 static inline bool verify_static_oop(InstanceKlass* ik,
                                      oop mirror, int offset) {
@@ -2804,6 +2805,7 @@
     return false;
   }
 }
+#endif // #ifdef ASSERT
 
 // a class references its super class, interfaces, class loader, ...
 // and finally its static fields
--- a/src/share/vm/prims/whitebox.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/prims/whitebox.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -105,7 +105,7 @@
 WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
   CollectorPolicy * p = Universe::heap()->collector_policy();
   gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap "
-    SIZE_FORMAT" Maximum heap "SIZE_FORMAT" Min alignment "SIZE_FORMAT" Max alignment "SIZE_FORMAT,
+    SIZE_FORMAT" Maximum heap "SIZE_FORMAT" Space alignment "SIZE_FORMAT" Heap alignment "SIZE_FORMAT,
     p->min_heap_byte_size(), p->initial_heap_byte_size(), p->max_heap_byte_size(),
     p->space_alignment(), p->heap_alignment());
 }
--- a/src/share/vm/runtime/advancedThresholdPolicy.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -306,7 +306,7 @@
  *    profiling can start at level 0 and finish at level 3.
  *
  * b. 0 -> 2 -> 3 -> 4.
- *    This case occures when the load on C2 is deemed too high. So, instead of transitioning
+ *    This case occurs when the load on C2 is deemed too high. So, instead of transitioning
  *    into state 3 directly and over-profiling while a method is in the C2 queue we transition to
  *    level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
  *
--- a/src/share/vm/runtime/arguments.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/arguments.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -104,7 +104,7 @@
 const char*  Arguments::_java_vendor_url_bug    = DEFAULT_VENDOR_URL_BUG;
 const char*  Arguments::_sun_java_launcher      = DEFAULT_JAVA_LAUNCHER;
 int    Arguments::_sun_java_launcher_pid        = -1;
-bool   Arguments::_created_by_gamma_launcher    = false;
+bool   Arguments::_sun_java_launcher_is_altjvm  = false;
 
 // These parameters are reset in method parse_vm_init_args(JavaVMInitArgs*)
 bool   Arguments::_AlwaysCompileLoopMethods     = AlwaysCompileLoopMethods;
@@ -154,7 +154,8 @@
 
 // Process java launcher properties.
 void Arguments::process_sun_java_launcher_properties(JavaVMInitArgs* args) {
-  // See if sun.java.launcher or sun.java.launcher.pid is defined.
+  // See if sun.java.launcher, sun.java.launcher.is_altjvm or
+  // sun.java.launcher.pid is defined.
   // Must do this before setting up other system properties,
   // as some of them may depend on launcher type.
   for (int index = 0; index < args->nOptions; index++) {
@@ -165,6 +166,12 @@
       process_java_launcher_argument(tail, option->extraInfo);
       continue;
     }
+    if (match_option(option, "-Dsun.java.launcher.is_altjvm=", &tail)) {
+      if (strcmp(tail, "true") == 0) {
+        _sun_java_launcher_is_altjvm = true;
+      }
+      continue;
+    }
     if (match_option(option, "-Dsun.java.launcher.pid=", &tail)) {
       _sun_java_launcher_pid = atoi(tail);
       continue;
@@ -181,7 +188,7 @@
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.name", VM_Version::vm_name(),  false));
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.info", VM_Version::vm_info_string(),  true));
 
-  // following are JVMTI agent writeable properties.
+  // Following are JVMTI agent writable properties.
   // Properties values are set to NULL and they are
   // os specific they are initialized in os::init_system_properties_values().
   _java_ext_dirs = new SystemProperty("java.ext.dirs", NULL,  true);
@@ -882,7 +889,7 @@
     arg_len = equal_sign - argname;
   }
 
-  Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true);
+  Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true, true);
   if (found_flag != NULL) {
     char locked_message_buf[BUFLEN];
     found_flag->get_locked_message(locked_message_buf, BUFLEN);
@@ -1017,9 +1024,10 @@
     _java_command = value;
 
     // Record value in Arguments, but let it get passed to Java.
-  } else if (strcmp(key, "sun.java.launcher.pid") == 0) {
-    // launcher.pid property is private and is processed
-    // in process_sun_java_launcher_properties();
+  } else if (strcmp(key, "sun.java.launcher.is_altjvm") == 0 ||
+             strcmp(key, "sun.java.launcher.pid") == 0) {
+    // sun.java.launcher.is_altjvm and sun.java.launcher.pid property are
+    // private and are processed in process_sun_java_launcher_properties();
     // the sun.java.launcher property is passed on to the java application
     FreeHeap(key);
     if (eq != NULL) {
@@ -1310,7 +1318,7 @@
   if (!FLAG_IS_DEFAULT(OldPLABSize)) {
     if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) {
       // OldPLABSize is not the default value but CMSParPromoteBlocksToClaim
-      // is.  In this situtation let CMSParPromoteBlocksToClaim follow
+      // is.  In this situation let CMSParPromoteBlocksToClaim follow
       // the value (either from the command line or ergonomics) of
       // OldPLABSize.  Following OldPLABSize is an ergonomics decision.
       FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, OldPLABSize);
@@ -1573,6 +1581,16 @@
     vm_exit(1);
   }
 
+  if (UseAdaptiveSizePolicy) {
+    // We don't want to limit adaptive heap sizing's freedom to adjust the heap
+    // unless the user actually sets these flags.
+    if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
+      FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
+    }
+    if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
+      FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
+    }
+  }
 
   // If InitialSurvivorRatio or MinSurvivorRatio were not specified, but the
   // SurvivorRatio has been set, reset their default values to SurvivorRatio +
@@ -1804,9 +1822,6 @@
 
 void Arguments::process_java_launcher_argument(const char* launcher, void* extra_info) {
   _sun_java_launcher = strdup(launcher);
-  if (strcmp("gamma", _sun_java_launcher) == 0) {
-    _created_by_gamma_launcher = true;
-  }
 }
 
 bool Arguments::created_by_java_launcher() {
@@ -1814,8 +1829,8 @@
   return strcmp(DEFAULT_JAVA_LAUNCHER, _sun_java_launcher) != 0;
 }
 
-bool Arguments::created_by_gamma_launcher() {
-  return _created_by_gamma_launcher;
+bool Arguments::sun_java_launcher_is_altjvm() {
+  return _sun_java_launcher_is_altjvm;
 }
 
 //===========================================================================================================
@@ -1848,7 +1863,7 @@
 }
 
 bool Arguments::verify_percentage(uintx value, const char* name) {
-  if (value <= 100) {
+  if (is_percentage(value)) {
     return true;
   }
   jio_fprintf(defaultStream::error_stream(),
@@ -1936,6 +1951,34 @@
   return count_p < 2 && count_t < 2;
 }
 
+bool Arguments::verify_MinHeapFreeRatio(FormatBuffer<80>& err_msg, uintx min_heap_free_ratio) {
+  if (!is_percentage(min_heap_free_ratio)) {
+    err_msg.print("MinHeapFreeRatio must have a value between 0 and 100");
+    return false;
+  }
+  if (min_heap_free_ratio > MaxHeapFreeRatio) {
+    err_msg.print("MinHeapFreeRatio (" UINTX_FORMAT ") must be less than or "
+                  "equal to MaxHeapFreeRatio (" UINTX_FORMAT ")", min_heap_free_ratio,
+                  MaxHeapFreeRatio);
+    return false;
+  }
+  return true;
+}
+
+bool Arguments::verify_MaxHeapFreeRatio(FormatBuffer<80>& err_msg, uintx max_heap_free_ratio) {
+  if (!is_percentage(max_heap_free_ratio)) {
+    err_msg.print("MaxHeapFreeRatio must have a value between 0 and 100");
+    return false;
+  }
+  if (max_heap_free_ratio < MinHeapFreeRatio) {
+    err_msg.print("MaxHeapFreeRatio (" UINTX_FORMAT ") must be greater than or "
+                  "equal to MinHeapFreeRatio (" UINTX_FORMAT ")", max_heap_free_ratio,
+                  MinHeapFreeRatio);
+    return false;
+  }
+  return true;
+}
+
 // Check consistency of GC selection
 bool Arguments::check_gc_consistency() {
   check_gclog_consistency();
@@ -2041,8 +2084,6 @@
   status = status && verify_interval(AdaptiveSizePolicyWeight, 0, 100,
                               "AdaptiveSizePolicyWeight");
   status = status && verify_percentage(ThresholdTolerance, "ThresholdTolerance");
-  status = status && verify_percentage(MinHeapFreeRatio, "MinHeapFreeRatio");
-  status = status && verify_percentage(MaxHeapFreeRatio, "MaxHeapFreeRatio");
 
   // Divide by bucket size to prevent a large size from causing rollover when
   // calculating amount of memory needed to be allocated for the String table.
@@ -2052,15 +2093,19 @@
   status = status && verify_interval(SymbolTableSize, minimumSymbolTableSize,
     (max_uintx / SymbolTable::bucket_size()), "SymbolTable size");
 
-  if (MinHeapFreeRatio > MaxHeapFreeRatio) {
-    jio_fprintf(defaultStream::error_stream(),
-                "MinHeapFreeRatio (" UINTX_FORMAT ") must be less than or "
-                "equal to MaxHeapFreeRatio (" UINTX_FORMAT ")\n",
-                MinHeapFreeRatio, MaxHeapFreeRatio);
-    status = false;
+  {
+    // Using "else if" below to avoid printing two error messages if min > max.
+    // This will also prevent us from reporting both min>100 and max>100 at the
+    // same time, but that is less annoying than printing two identical errors IMHO.
+    FormatBuffer<80> err_msg("");
+    if (!verify_MinHeapFreeRatio(err_msg, MinHeapFreeRatio)) {
+      jio_fprintf(defaultStream::error_stream(), "%s\n", err_msg.buffer());
+      status = false;
+    } else if (!verify_MaxHeapFreeRatio(err_msg, MaxHeapFreeRatio)) {
+      jio_fprintf(defaultStream::error_stream(), "%s\n", err_msg.buffer());
+      status = false;
+    }
   }
-  // Keeping the heap 100% free is hard ;-) so limit it to 99%.
-  MinHeapFreeRatio = MIN2(MinHeapFreeRatio, (uintx) 99);
 
   // Min/MaxMetaspaceFreeRatio
   status = status && verify_percentage(MinMetaspaceFreeRatio, "MinMetaspaceFreeRatio");
@@ -2693,7 +2738,7 @@
     } else if (match_option(option, "-Xmaxf", &tail)) {
       char* err;
       int maxf = (int)(strtod(tail, &err) * 100);
-      if (*err != '\0' || maxf < 0 || maxf > 100) {
+      if (*err != '\0' || *tail == '\0' || maxf < 0 || maxf > 100) {
         jio_fprintf(defaultStream::error_stream(),
                     "Bad max heap free percentage size: %s\n",
                     option->optionString);
@@ -2705,7 +2750,7 @@
     } else if (match_option(option, "-Xminf", &tail)) {
       char* err;
       int minf = (int)(strtod(tail, &err) * 100);
-      if (*err != '\0' || minf < 0 || minf > 100) {
+      if (*err != '\0' || *tail == '\0' || minf < 0 || minf > 100) {
         jio_fprintf(defaultStream::error_stream(),
                     "Bad min heap free percentage size: %s\n",
                     option->optionString);
@@ -3650,9 +3695,9 @@
   // Set per-collector flags
   if (UseParallelGC || UseParallelOldGC) {
     set_parallel_gc_flags();
-  } else if (UseConcMarkSweepGC) { // should be done before ParNew check below
+  } else if (UseConcMarkSweepGC) { // Should be done before ParNew check below
     set_cms_and_parnew_gc_flags();
-  } else if (UseParNewGC) {  // skipped if CMS is set above
+  } else if (UseParNewGC) {  // Skipped if CMS is set above
     set_parnew_gc_flags();
   } else if (UseG1GC) {
     set_g1_gc_flags();
@@ -3666,22 +3711,26 @@
               " using -XX:ParallelGCThreads=N");
     }
   }
+  if (MinHeapFreeRatio == 100) {
+    // Keeping the heap 100% free is hard ;-) so limit it to 99%.
+    FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
+  }
 #else // INCLUDE_ALL_GCS
   assert(verify_serial_gc_flags(), "SerialGC unset");
 #endif // INCLUDE_ALL_GCS
 
-  // Initialize Metaspace flags and alignments.
+  // Initialize Metaspace flags and alignments
   Metaspace::ergo_initialize();
 
   // Set bytecode rewriting flags
   set_bytecode_flags();
 
-  // Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled.
+  // Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled
   set_aggressive_opts_flags();
 
   // Turn off biased locking for locking debug mode flags,
-  // which are subtlely different from each other but neither works with
-  // biased locking.
+  // which are subtly different from each other but neither works with
+  // biased locking
   if (UseHeavyMonitors
 #ifdef COMPILER1
       || !UseFastLocking
@@ -3769,32 +3818,28 @@
     }
   }
 
-  // set PauseAtExit if the gamma launcher was used and a debugger is attached
-  // but only if not already set on the commandline
-  if (Arguments::created_by_gamma_launcher() && os::is_debugger_attached()) {
-    bool set = false;
-    CommandLineFlags::wasSetOnCmdline("PauseAtExit", &set);
-    if (!set) {
-      FLAG_SET_DEFAULT(PauseAtExit, true);
-    }
-  }
-
   return JNI_OK;
 }
 
 jint Arguments::adjust_after_os() {
-#if INCLUDE_ALL_GCS
-  if (UseParallelGC || UseParallelOldGC) {
-    if (UseNUMA) {
+  if (UseNUMA) {
+    if (UseParallelGC || UseParallelOldGC) {
       if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
-        FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
+         FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
       }
-      // For those collectors or operating systems (eg, Windows) that do
-      // not support full UseNUMA, we will map to UseNUMAInterleaving for now
-      UseNUMAInterleaving = true;
+    }
+    // UseNUMAInterleaving is set to ON for all collectors and
+    // platforms when UseNUMA is set to ON. NUMA-aware collectors
+    // such as the parallel collector for Linux and Solaris will
+    // interleave old gen and survivor spaces on top of NUMA
+    // allocation policy for the eden space.
+    // Non NUMA-aware collectors such as CMS, G1 and Serial-GC on
+    // all platforms and ParallelGC on Windows will interleave all
+    // of the heap spaces across NUMA nodes.
+    if (FLAG_IS_DEFAULT(UseNUMAInterleaving)) {
+      FLAG_SET_ERGO(bool, UseNUMAInterleaving, true);
     }
   }
-#endif // INCLUDE_ALL_GCS
   return JNI_OK;
 }
 
--- a/src/share/vm/runtime/arguments.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/arguments.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -27,6 +27,7 @@
 
 #include "runtime/java.hpp"
 #include "runtime/perfData.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/top.hpp"
 
 // Arguments parses the command line and recognizes options
@@ -268,14 +269,14 @@
   static const char* _java_vendor_url_bug;
 
   // sun.java.launcher, private property to provide information about
-  // java/gamma launcher
+  // java launcher
   static const char* _sun_java_launcher;
 
   // sun.java.launcher.pid, private property
   static int    _sun_java_launcher_pid;
 
-  // was this VM created by the gamma launcher
-  static bool   _created_by_gamma_launcher;
+  // was this VM created via the -XXaltjvm=<path> option
+  static bool   _sun_java_launcher_is_altjvm;
 
   // Option flags
   static bool   _has_profile;
@@ -370,11 +371,16 @@
   static jint parse_vm_init_args(const JavaVMInitArgs* args);
   static jint parse_each_vm_init_arg(const JavaVMInitArgs* args, SysClassPath* scp_p, bool* scp_assembly_required_p, Flag::Flags origin);
   static jint finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_required);
-  static bool is_bad_option(const JavaVMOption* option, jboolean ignore,
-    const char* option_type);
+  static bool is_bad_option(const JavaVMOption* option, jboolean ignore, const char* option_type);
+
   static bool is_bad_option(const JavaVMOption* option, jboolean ignore) {
     return is_bad_option(option, ignore, NULL);
   }
+
+  static bool is_percentage(uintx val) {
+    return val <= 100;
+  }
+
   static bool verify_interval(uintx val, uintx min,
                               uintx max, const char* name);
   static bool verify_min_value(intx val, intx min, const char* name);
@@ -440,11 +446,20 @@
   static jint apply_ergo();
   // Adjusts the arguments after the OS have adjusted the arguments
   static jint adjust_after_os();
+
+  // Verifies that the given value will fit as a MinHeapFreeRatio. If not, an error
+  // message is returned in the provided buffer.
+  static bool verify_MinHeapFreeRatio(FormatBuffer<80>& err_msg, uintx min_heap_free_ratio);
+
+  // Verifies that the given value will fit as a MaxHeapFreeRatio. If not, an error
+  // message is returned in the provided buffer.
+  static bool verify_MaxHeapFreeRatio(FormatBuffer<80>& err_msg, uintx max_heap_free_ratio);
+
   // Check for consistency in the selection of the garbage collector.
   static bool check_gc_consistency();
   static void check_deprecated_gcs();
   static void check_deprecated_gc_flags();
-  // Check consistecy or otherwise of VM argument settings
+  // Check consistency or otherwise of VM argument settings
   static bool check_vm_args_consistency();
   // Check stack pages settings
   static bool check_stack_pages();
@@ -483,8 +498,8 @@
   static const char* sun_java_launcher()    { return _sun_java_launcher; }
   // Was VM created by a Java launcher?
   static bool created_by_java_launcher();
-  // Was VM created by the gamma Java launcher?
-  static bool created_by_gamma_launcher();
+  // -Dsun.java.launcher.is_altjvm
+  static bool sun_java_launcher_is_altjvm();
   // -Dsun.java.launcher.pid
   static int sun_java_launcher_pid()        { return _sun_java_launcher_pid; }
 
@@ -494,7 +509,7 @@
   // -Xprof
   static bool has_profile()                 { return _has_profile; }
 
-  // -Xms, -Xmx
+  // -Xms
   static uintx min_heap_size()              { return _min_heap_size; }
   static void  set_min_heap_size(uintx v)   { _min_heap_size = v;  }
 
--- a/src/share/vm/runtime/compilationPolicy.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/compilationPolicy.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -233,7 +233,7 @@
 }
 
 void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) {
-  // Delay next back-branch event but pump up invocation counter to triger
+  // Delay next back-branch event but pump up invocation counter to trigger
   // whole method compilation.
   MethodCounters* mcs = m->method_counters();
   assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
@@ -251,7 +251,7 @@
 //
 // CounterDecay
 //
-// Interates through invocation counters and decrements them. This
+// Iterates through invocation counters and decrements them. This
 // is done at each safepoint.
 //
 class CounterDecay : public AllStatic {
@@ -321,7 +321,7 @@
 }
 
 // This method can be called by any component of the runtime to notify the policy
-// that it's recommended to delay the complation of this method.
+// that it's recommended to delay the compilation of this method.
 void NonTieredCompPolicy::delay_compilation(Method* method) {
   MethodCounters* mcs = method->method_counters();
   if (mcs != NULL) {
--- a/src/share/vm/runtime/compilationPolicy.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/compilationPolicy.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -72,7 +72,7 @@
   // reprofile request
   virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) = 0;
   // delay_compilation(method) can be called by any component of the runtime to notify the policy
-  // that it's recommended to delay the complation of this method.
+  // that it's recommended to delay the compilation of this method.
   virtual void delay_compilation(Method* method) = 0;
   // disable_compilation() is called whenever the runtime decides to disable compilation of the
   // specified method.
--- a/src/share/vm/runtime/deoptimization.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/deoptimization.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -383,7 +383,7 @@
   frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
   deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
 
-  // It's possible that the number of paramters at the call site is
+  // It's possible that the number of parameters at the call site is
   // different than number of arguments in the callee when method
   // handles are used.  If the caller is interpreted get the real
   // value so that the proper amount of space can be added to it's
@@ -543,7 +543,7 @@
     // popframe condition bit set, we should always clear it now
     thread->clear_popframe_condition();
 #else
-    // C++ interpeter will clear has_pending_popframe when it enters
+    // C++ interpreter will clear has_pending_popframe when it enters
     // with method_resume. For deopt_resume2 we clear it now.
     if (thread->popframe_forcing_deopt_reexecution())
         thread->clear_popframe_condition();
--- a/src/share/vm/runtime/deoptimization.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/deoptimization.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -206,7 +206,7 @@
   // Called by assembly stub after execution has returned to
   // deoptimized frame and after the stack unrolling.
   // @argument thread.     Thread where stub_frame resides.
-  // @argument exec_mode.  Determines how execution should be continuted in top frame.
+  // @argument exec_mode.  Determines how execution should be continued in top frame.
   //                       0 means continue after current byte code
   //                       1 means exception has happened, handle exception
   //                       2 means reexecute current bytecode (for uncommon traps).
--- a/src/share/vm/runtime/frame.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/frame.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -649,7 +649,7 @@
 #endif
 }
 
-// Return whether the frame is in the VM or os indicating a Hotspot problem.
+// Print whether the frame is in the VM or OS indicating a HotSpot problem.
 // Otherwise, it's likely a bug in the native library that the Java code calls,
 // hopefully indicating where to submit bugs.
 void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
@@ -928,7 +928,7 @@
     // klass, and the klass needs to be kept alive while executing. The GCs
     // don't trace through method pointers, so typically in similar situations
     // the mirror or the class loader of the klass are installed as a GC root.
-    // To minimze the overhead of doing that here, we ask the GC to pass down a
+    // To minimize the overhead of doing that here, we ask the GC to pass down a
     // closure that knows how to keep klasses alive given a ClassLoaderData.
     cld_f->do_cld(m->method_holder()->class_loader_data());
   }
--- a/src/share/vm/runtime/globals.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/globals.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -31,6 +31,7 @@
 #include "utilities/ostream.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/top.hpp"
+#include "trace/tracing.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1_globals.hpp"
 #endif // INCLUDE_ALL_GCS
@@ -62,6 +63,14 @@
 MATERIALIZE_FLAGS_EXT
 
 
+static bool is_product_build() {
+#ifdef PRODUCT
+  return true;
+#else
+  return false;
+#endif
+}
+
 void Flag::check_writable() {
   if (is_constant_in_binary()) {
     fatal(err_msg("flag is constant: %s", _name));
@@ -235,6 +244,27 @@
 // Get custom message for this locked flag, or return NULL if
 // none is available.
 void Flag::get_locked_message(char* buf, int buflen) const {
+  buf[0] = '\0';
+  if (is_diagnostic() && !is_unlocked()) {
+    jio_snprintf(buf, buflen, "Error: VM option '%s' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions.\n",
+                 _name);
+    return;
+  }
+  if (is_experimental() && !is_unlocked()) {
+    jio_snprintf(buf, buflen, "Error: VM option '%s' is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions.\n",
+                 _name);
+    return;
+  }
+  if (is_develop() && is_product_build()) {
+    jio_snprintf(buf, buflen, "Error: VM option '%s' is develop and is available only in debug version of VM.\n",
+                 _name);
+    return;
+  }
+  if (is_notproduct() && is_product_build()) {
+    jio_snprintf(buf, buflen, "Error: VM option '%s' is notproduct and is available only in debug version of VM.\n",
+                 _name);
+    return;
+  }
   get_locked_message_ext(buf, buflen);
 }
 
@@ -464,13 +494,13 @@
 }
 
 // Search the flag table for a named flag
-Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked) {
+Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked, bool return_flag) {
   for (Flag* current = &flagTable[0]; current->_name != NULL; current++) {
     if (str_equal(current->_name, name, length)) {
       // Found a matching entry.
       // Don't report notproduct and develop flags in product builds.
       if (current->is_constant_in_binary()) {
-        return NULL;
+        return (return_flag == true ? current : NULL);
       }
       // Report locked flags only if allowed.
       if (!(current->is_unlocked() || current->is_unlocker())) {
@@ -564,6 +594,17 @@
   return true;
 }
 
+template<class E, class T>
+static void trace_flag_changed(const char* name, const T old_value, const T new_value, const Flag::Flags origin)
+{
+  E e;
+  e.set_name(name);
+  e.set_old_value(old_value);
+  e.set_new_value(new_value);
+  e.set_origin(origin);
+  e.commit();
+}
+
 bool CommandLineFlags::boolAt(char* name, size_t len, bool* value) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
@@ -577,6 +618,7 @@
   if (result == NULL) return false;
   if (!result->is_bool()) return false;
   bool old_value = result->get_bool();
+  trace_flag_changed<EventBooleanFlagChanged, bool>(name, old_value, *value, origin);
   result->set_bool(*value);
   *value = old_value;
   result->set_origin(origin);
@@ -586,6 +628,7 @@
 void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_bool(), "wrong flag type");
+  trace_flag_changed<EventBooleanFlagChanged, bool>(faddr->_name, faddr->get_bool(), value, origin);
   faddr->set_bool(value);
   faddr->set_origin(origin);
 }
@@ -603,6 +646,7 @@
   if (result == NULL) return false;
   if (!result->is_intx()) return false;
   intx old_value = result->get_intx();
+  trace_flag_changed<EventLongFlagChanged, s8>(name, old_value, *value, origin);
   result->set_intx(*value);
   *value = old_value;
   result->set_origin(origin);
@@ -612,6 +656,7 @@
 void CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_intx(), "wrong flag type");
+  trace_flag_changed<EventLongFlagChanged, s8>(faddr->_name, faddr->get_intx(), value, origin);
   faddr->set_intx(value);
   faddr->set_origin(origin);
 }
@@ -629,6 +674,7 @@
   if (result == NULL) return false;
   if (!result->is_uintx()) return false;
   uintx old_value = result->get_uintx();
+  trace_flag_changed<EventUnsignedLongFlagChanged, u8>(name, old_value, *value, origin);
   result->set_uintx(*value);
   *value = old_value;
   result->set_origin(origin);
@@ -638,6 +684,7 @@
 void CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_uintx(), "wrong flag type");
+  trace_flag_changed<EventUnsignedLongFlagChanged, u8>(faddr->_name, faddr->get_uintx(), value, origin);
   faddr->set_uintx(value);
   faddr->set_origin(origin);
 }
@@ -655,6 +702,7 @@
   if (result == NULL) return false;
   if (!result->is_uint64_t()) return false;
   uint64_t old_value = result->get_uint64_t();
+  trace_flag_changed<EventUnsignedLongFlagChanged, u8>(name, old_value, *value, origin);
   result->set_uint64_t(*value);
   *value = old_value;
   result->set_origin(origin);
@@ -664,6 +712,7 @@
 void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_uint64_t(), "wrong flag type");
+  trace_flag_changed<EventUnsignedLongFlagChanged, u8>(faddr->_name, faddr->get_uint64_t(), value, origin);
   faddr->set_uint64_t(value);
   faddr->set_origin(origin);
 }
@@ -681,6 +730,7 @@
   if (result == NULL) return false;
   if (!result->is_double()) return false;
   double old_value = result->get_double();
+  trace_flag_changed<EventDoubleFlagChanged, double>(name, old_value, *value, origin);
   result->set_double(*value);
   *value = old_value;
   result->set_origin(origin);
@@ -690,6 +740,7 @@
 void CommandLineFlagsEx::doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_double(), "wrong flag type");
+  trace_flag_changed<EventDoubleFlagChanged, double>(faddr->_name, faddr->get_double(), value, origin);
   faddr->set_double(value);
   faddr->set_origin(origin);
 }
@@ -707,6 +758,7 @@
   if (result == NULL) return false;
   if (!result->is_ccstr()) return false;
   ccstr old_value = result->get_ccstr();
+  trace_flag_changed<EventStringFlagChanged, const char*>(name, old_value, *value, origin);
   char* new_value = NULL;
   if (*value != NULL) {
     new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1, mtInternal);
@@ -728,6 +780,7 @@
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type");
   ccstr old_value = faddr->get_ccstr();
+  trace_flag_changed<EventStringFlagChanged, const char*>(faddr->_name, old_value, value, origin);
   char* new_value = NEW_C_HEAP_ARRAY(char, strlen(value)+1, mtInternal);
   strcpy(new_value, value);
   faddr->set_ccstr(new_value);
--- a/src/share/vm/runtime/globals.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/globals.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -255,7 +255,7 @@
   // number of flags
   static size_t numFlags;
 
-  static Flag* find_flag(const char* name, size_t length, bool allow_locked = false);
+  static Flag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false);
   static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false);
 
   void check_writable();
@@ -579,7 +579,7 @@
           "Force NUMA optimizations on single-node/UMA systems")            \
                                                                             \
   product(uintx, NUMAChunkResizeWeight, 20,                                 \
-          "Percentage (0-100) used to weigh the current sample when "       \
+          "Percentage (0-100) used to weight the current sample when "      \
           "computing exponentially decaying average for "                   \
           "AdaptiveNUMAChunkSizing")                                        \
                                                                             \
@@ -1274,6 +1274,9 @@
   develop(bool, TraceJNICalls, false,                                       \
           "Trace JNI calls")                                                \
                                                                             \
+  develop(bool, StressRewriter, false,                                      \
+          "Stress linktime bytecode rewriting")                             \
+                                                                            \
   notproduct(bool, TraceJVMCalls, false,                                    \
           "Trace JVM calls")                                                \
                                                                             \
@@ -1516,7 +1519,7 @@
           "allocation")                                                     \
                                                                             \
   product(uintx, PLABWeight, 75,                                            \
-          "Percentage (0-100) used to weigh the current sample when "       \
+          "Percentage (0-100) used to weight the current sample when "      \
           "computing exponentially decaying average for ResizePLAB")        \
                                                                             \
   product(bool, ResizePLAB, true,                                           \
@@ -1625,11 +1628,11 @@
           "is shifted to the right within the period between young GCs")    \
                                                                             \
   product(uintx, CMSExpAvgFactor, 50,                                       \
-          "Percentage (0-100) used to weigh the current sample when "       \
+          "Percentage (0-100) used to weight the current sample when "      \
           "computing exponential averages for CMS statistics")              \
                                                                             \
   product(uintx, CMS_FLSWeight, 75,                                         \
-          "Percentage (0-100) used to weigh the current sample when "       \
+          "Percentage (0-100) used to weight the current sample when "      \
           "computing exponentially decaying averages for CMS FLS "          \
           "statistics")                                                     \
                                                                             \
@@ -1741,19 +1744,15 @@
           "to simulate overflow; a smaller number increases frequency")     \
                                                                             \
   product(uintx, CMSMaxAbortablePrecleanLoops, 0,                           \
-          "(Temporary, subject to experimentation) "                        \
           "Maximum number of abortable preclean iterations, if > 0")        \
                                                                             \
   product(intx, CMSMaxAbortablePrecleanTime, 5000,                          \
-          "(Temporary, subject to experimentation) "                        \
           "Maximum time in abortable preclean (in milliseconds)")           \
                                                                             \
   product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100,              \
-          "(Temporary, subject to experimentation) "                        \
           "Nominal minimum work per abortable preclean iteration")          \
                                                                             \
   manageable(intx, CMSAbortablePrecleanWaitMillis, 100,                     \
-          "(Temporary, subject to experimentation) "                        \
           "Time that we sleep between iterations when not given "           \
           "enough work per iteration")                                      \
                                                                             \
@@ -1969,13 +1968,13 @@
           "(other young collectors)")                                       \
                                                                             \
   develop(uintx, PromotionFailureALotInterval, 5,                           \
-          "Total collections between promotion failures alot")              \
+          "Total collections between promotion failures a lot")             \
                                                                             \
   experimental(uintx, WorkStealingSleepMillis, 1,                           \
           "Sleep time when sleep is used for yields")                       \
                                                                             \
   experimental(uintx, WorkStealingYieldsBeforeSleep, 5000,                  \
-          "Number of yields before a sleep is done during workstealing")    \
+          "Number of yields before a sleep is done during work stealing")   \
                                                                             \
   experimental(uintx, WorkStealingHardSpins, 4096,                          \
           "Number of iterations in a spin loop between checks on "          \
@@ -2053,7 +2052,7 @@
           "size; deprecated: to be renamed to MaxRAMFraction")              \
                                                                             \
   product(uintx, MinRAMFraction, 2,                                         \
-          "Minimum fraction (1/n) of real memory used for maxmimum heap "   \
+          "Minimum fraction (1/n) of real memory used for maximum heap "    \
           "size on systems with small physical memory size")                \
                                                                             \
   product(uintx, InitialRAMFraction, 64,                                    \
@@ -3160,15 +3159,15 @@
           "Maximum size of class area in Metaspace when compressed "        \
           "class pointers are used")                                        \
                                                                             \
-  product(uintx, MinHeapFreeRatio,    40,                                   \
+  manageable(uintx, MinHeapFreeRatio, 40,                                   \
           "The minimum percentage of heap free after GC to avoid expansion."\
-          " For most GCs this applies to the old generation. In G1 it"      \
-          " applies to the whole heap. Not supported by ParallelGC.")       \
-                                                                            \
-  product(uintx, MaxHeapFreeRatio,    70,                                   \
+          " For most GCs this applies to the old generation. In G1 and"     \
+          " ParallelGC it applies to the whole heap.")                      \
+                                                                            \
+  manageable(uintx, MaxHeapFreeRatio, 70,                                   \
           "The maximum percentage of heap free after GC to avoid shrinking."\
-          " For most GCs this applies to the old generation. In G1 it"      \
-          " applies to the whole heap. Not supported by ParallelGC.")       \
+          " For most GCs this applies to the old generation. In G1 and"     \
+          " ParallelGC it applies to the whole heap.")                      \
                                                                             \
   product(intx, SoftRefLRUPolicyMSPerMB, 1000,                              \
           "Number of milliseconds per MB of free space in the heap")        \
@@ -3665,7 +3664,7 @@
   product(uintx, MaxDirectMemorySize, 0,                                    \
           "Maximum total size of NIO direct-buffer allocations")            \
                                                                             \
-  /* temporary developer defined flags  */                                  \
+  /* Flags used for temporary code during development  */                   \
                                                                             \
   diagnostic(bool, UseNewCode, false,                                       \
           "Testing Only: Use the new version while testing")                \
--- a/src/share/vm/runtime/globals_extension.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/globals_extension.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -31,7 +31,7 @@
 
 // Construct enum of Flag_<cmdline-arg> constants.
 
-// Parens left off in the following for the enum decl below.
+// Parenthesis left off in the following for the enum decl below.
 #define FLAG_MEMBER(flag) Flag_##flag
 
 #define RUNTIME_PRODUCT_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
--- a/src/share/vm/runtime/handles.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/handles.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -267,7 +267,7 @@
 // HandleMarks manually.
 //
 // A HandleMark constructor will record the current handle area top, and the
-// desctructor will reset the top, destroying all handles allocated in between.
+// destructor will reset the top, destroying all handles allocated in between.
 // The following code will therefore NOT work:
 //
 //   Handle h;
--- a/src/share/vm/runtime/java.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/java.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -608,6 +608,7 @@
   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
 #else /* USDT2 */
   HOTSPOT_VM_SHUTDOWN();
+  HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
 #endif /* USDT2 */
 }
 
--- a/src/share/vm/runtime/javaCalls.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/javaCalls.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -302,7 +302,7 @@
   // Check if we need to wrap a potential OS exception handler around thread
   // This is used for e.g. Win32 structured exception handlers
   assert(THREAD->is_Java_thread(), "only JavaThreads can make JavaCalls");
-  // Need to wrap each and everytime, since there might be native code down the
+  // Need to wrap each and every time, since there might be native code down the
   // stack that has installed its own exception handlers
   os::os_exception_wrapper(call_helper, result, &method, args, THREAD);
 }
@@ -337,7 +337,7 @@
     // A klass might not be initialized since JavaCall's might be used during the executing of
     // the <clinit>. For example, a Thread.start might start executing on an object that is
     // not fully initialized! (bad Java programming style)
-    assert(holder->is_linked(), "rewritting must have taken place");
+    assert(holder->is_linked(), "rewriting must have taken place");
   }
 #endif
 
--- a/src/share/vm/runtime/jniHandles.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/jniHandles.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -195,8 +195,10 @@
   int _count;
 public:
   CountHandleClosure(): _count(0) {}
-  virtual void do_oop(oop* unused) {
-    _count++;
+  virtual void do_oop(oop* ooph) {
+    if (*ooph != JNIHandles::deleted_handle()) {
+      _count++;
+    }
   }
   virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
   int count() { return _count; }
@@ -461,7 +463,7 @@
     // Append new block
     Thread* thread = Thread::current();
     Handle obj_handle(thread, obj);
-    // This can block, so we need to preserve obj accross call.
+    // This can block, so we need to preserve obj across call.
     _last->_next = JNIHandleBlock::allocate_block(thread);
     _last = _last->_next;
     _allocate_before_rebuild--;
@@ -528,7 +530,7 @@
   return result;
 }
 
-// This method is not thread-safe, i.e., must be called whule holding a lock on the
+// This method is not thread-safe, i.e., must be called while holding a lock on the
 // structure.
 long JNIHandleBlock::memory_usage() const {
   return length() * sizeof(JNIHandleBlock);
--- a/src/share/vm/runtime/jniHandles.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/jniHandles.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -106,7 +106,7 @@
   JNIHandleBlock* _next;                        // Link to next block
 
   // The following instance variables are only used by the first block in a chain.
-  // Having two types of blocks complicates the code and the space overhead in negligble.
+  // Having two types of blocks complicates the code and the space overhead in negligible.
   JNIHandleBlock* _last;                        // Last block in use
   JNIHandleBlock* _pop_frame_link;              // Block to restore on PopLocalFrame call
   oop*            _free_list;                   // Handle free list
--- a/src/share/vm/runtime/mutex.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/mutex.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -280,16 +280,6 @@
   return x & 0x7FFFFFFF ;
 }
 
-static inline jint MarsagliaXOR (jint * const a) {
-  jint x = *a ;
-  if (x == 0) x = UNS(a)|1 ;
-  x ^= x << 6;
-  x ^= ((unsigned)x) >> 21;
-  x ^= x << 7 ;
-  *a = x ;
-  return x & 0x7FFFFFFF ;
-}
-
 static int Stall (int its) {
   static volatile jint rv = 1 ;
   volatile int OnFrame = 0 ;
@@ -507,7 +497,7 @@
   _OnDeck = NULL ;
 
   // Note that we current drop the inner lock (clear OnDeck) in the slow-path
-  // epilog immediately after having acquired the outer lock.
+  // epilogue immediately after having acquired the outer lock.
   // But instead we could consider the following optimizations:
   // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
   //    This might avoid potential reacquisition of the inner lock in IUlock().
@@ -931,7 +921,7 @@
 
   check_block_state(Self);
   if (Self->is_Java_thread()) {
-    // Horribile dictu - we suffer through a state transition
+    // Horrible dictu - we suffer through a state transition
     assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
     ThreadBlockInVM tbivm ((JavaThread *) Self) ;
     ILock (Self) ;
@@ -963,7 +953,7 @@
 }
 
 
-// Returns true if thread succeceed [sic] in grabbing the lock, otherwise false.
+// Returns true if thread succeeds in grabbing the lock, otherwise false.
 
 bool Monitor::try_lock() {
   Thread * const Self = Thread::current();
--- a/src/share/vm/runtime/mutex.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/mutex.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -90,7 +90,7 @@
   // A special lock: Is a lock where you are guaranteed not to block while you are
   // holding it, i.e., no vm operation can happen, taking other locks, etc.
   // NOTE: It is critical that the rank 'special' be the lowest (earliest)
-  // (except for "event"?) for the deadlock dection to work correctly.
+  // (except for "event"?) for the deadlock detection to work correctly.
   // The rank native is only for use in Mutex's created by JVM_RawMonitorCreate,
   // which being external to the VM are not subject to deadlock detection.
   // The rank safepoint is used only for synchronization in reaching a
@@ -241,7 +241,7 @@
 //
 // Currently, however, the base object is a monitor.  Monitor contains all the
 // logic for wait(), notify(), etc.   Mutex extends monitor and restricts the
-// visiblity of wait(), notify(), and notify_all().
+// visibility of wait(), notify(), and notify_all().
 //
 // Another viable alternative would have been to have Monitor extend Mutex and
 // implement all the normal mutex and wait()-notify() logic in Mutex base class.
--- a/src/share/vm/runtime/mutexLocker.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/mutexLocker.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -46,7 +46,7 @@
 // Mutexes used in the VM.
 
 extern Mutex*   Patching_lock;                   // a lock used to guard code patching of compiled code
-extern Monitor* SystemDictionary_lock;           // a lock on the system dictonary
+extern Monitor* SystemDictionary_lock;           // a lock on the system dictionary
 extern Mutex*   PackageTable_lock;               // a lock on the class loader package table
 extern Mutex*   CompiledIC_lock;                 // a lock used to guard compiled IC patching and access
 extern Mutex*   InlineCacheBuffer_lock;          // a lock used to guard the InlineCacheBuffer
@@ -348,8 +348,8 @@
 //   - reentrant locking
 //   - locking out of order
 //
-// Only too be used for verify code, where we can relaxe out dead-lock
-// dection code a bit (unsafe, but probably ok). This code is NEVER to
+// Only to be used for verify code, where we can relax out dead-lock
+// detection code a bit (unsafe, but probably ok). This code is NEVER to
 // be included in a product version.
 //
 class VerifyMutexLocker: StackObj {
@@ -361,7 +361,7 @@
     _mutex     = mutex;
     _reentrant = mutex->owned_by_self();
     if (!_reentrant) {
-      // We temp. diable strict safepoint checking, while we require the lock
+      // We temp. disable strict safepoint checking, while we require the lock
       FlagSetting fs(StrictSafepointChecks, false);
       _mutex->lock();
     }
--- a/src/share/vm/runtime/objectMonitor.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/objectMonitor.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -234,7 +234,7 @@
 // * Taken together, the cxq and the EntryList constitute or form a
 //   single logical queue of threads stalled trying to acquire the lock.
 //   We use two distinct lists to improve the odds of a constant-time
-//   dequeue operation after acquisition (in the ::enter() epilog) and
+//   dequeue operation after acquisition (in the ::enter() epilogue) and
 //   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
 //   A key desideratum is to minimize queue & monitor metadata manipulation
 //   that occurs while holding the monitor lock -- that is, we want to
@@ -382,6 +382,12 @@
     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
     if (JvmtiExport::should_post_monitor_contended_enter()) {
       JvmtiExport::post_monitor_contended_enter(jt, this);
+
+      // The current thread does not yet own the monitor and does not
+      // yet appear on any queues that would get it made the successor.
+      // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
+      // handler cannot accidentally consume an unpark() meant for the
+      // ParkEvent associated with this ObjectMonitor.
     }
 
     OSThreadContendState osts(Self->osthread());
@@ -439,6 +445,12 @@
   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
   if (JvmtiExport::should_post_monitor_contended_entered()) {
     JvmtiExport::post_monitor_contended_entered(jt, this);
+
+    // The current thread already owns the monitor and is not going to
+    // call park() for the remainder of the monitor enter protocol. So
+    // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
+    // event handler consumed an unpark() issued by the thread that
+    // just exited the monitor.
   }
 
   if (event.should_commit()) {
@@ -677,7 +689,7 @@
         // non-null and elect a new "Responsible" timer thread.
         //
         // This thread executes:
-        //    ST Responsible=null; MEMBAR    (in enter epilog - here)
+        //    ST Responsible=null; MEMBAR    (in enter epilogue - here)
         //    LD cxq|EntryList               (in subsequent exit)
         //
         // Entering threads in the slow/contended path execute:
@@ -1456,6 +1468,14 @@
         // Note: 'false' parameter is passed here because the
         // wait was not timed out due to thread interrupt.
         JvmtiExport::post_monitor_waited(jt, this, false);
+
+        // In this short circuit of the monitor wait protocol, the
+        // current thread never drops ownership of the monitor and
+        // never gets added to the wait queue so the current thread
+        // cannot be made the successor. This means that the
+        // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
+        // consume an unpark() meant for the ParkEvent associated with
+        // this ObjectMonitor.
      }
      if (event.should_commit()) {
        post_monitor_wait_event(&event, 0, millis, false);
@@ -1499,21 +1519,6 @@
    exit (true, Self) ;                    // exit the monitor
    guarantee (_owner != Self, "invariant") ;
 
-   // As soon as the ObjectMonitor's ownership is dropped in the exit()
-   // call above, another thread can enter() the ObjectMonitor, do the
-   // notify(), and exit() the ObjectMonitor. If the other thread's
-   // exit() call chooses this thread as the successor and the unpark()
-   // call happens to occur while this thread is posting a
-   // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
-   // handler using RawMonitors and consuming the unpark().
-   //
-   // To avoid the problem, we re-post the event. This does no harm
-   // even if the original unpark() was not consumed because we are the
-   // chosen successor for this monitor.
-   if (node._notified != 0 && _succ == Self) {
-      node._event->unpark();
-   }
-
    // The thread is on the WaitSet list - now park() it.
    // On MP systems it's conceivable that a brief spin before we park
    // could be profitable.
@@ -1595,6 +1600,25 @@
      // post monitor waited event. Note that this is past-tense, we are done waiting.
      if (JvmtiExport::should_post_monitor_waited()) {
        JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
+
+       if (node._notified != 0 && _succ == Self) {
+         // In this part of the monitor wait-notify-reenter protocol it
+         // is possible (and normal) for another thread to do a fastpath
+         // monitor enter-exit while this thread is still trying to get
+         // to the reenter portion of the protocol.
+         //
+         // The ObjectMonitor was notified and the current thread is
+         // the successor which also means that an unpark() has already
+         // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
+         // consume the unpark() that was done when the successor was
+         // set because the same ParkEvent is shared between Java
+         // monitors and JVM/TI RawMonitors (for now).
+         //
+         // We redo the unpark() to ensure forward progress, i.e., we
+         // don't want all pending threads hanging (parked) with none
+         // entering the unlocked monitor.
+         node._event->unpark();
+       }
      }
 
      if (event.should_commit()) {
@@ -2031,7 +2055,7 @@
           TEVENT (Spin abort -- too many spinners) ;
           return 0 ;
        }
-       // Slighty racy, but benign ...
+       // Slightly racy, but benign ...
        Adjust (&_Spinner, 1) ;
     }
 
--- a/src/share/vm/runtime/objectMonitor.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/objectMonitor.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -101,7 +101,7 @@
   static int Spinner_offset_in_bytes()     { return offset_of(ObjectMonitor, _Spinner);    }
 
  public:
-  // Eventaully we'll make provisions for multiple callbacks, but
+  // Eventually we'll make provisions for multiple callbacks, but
   // now one will suffice.
   static int (*SpinCallbackFunction)(intptr_t, int) ;
   static intptr_t SpinCallbackArgument ;
@@ -272,7 +272,7 @@
   // type int, or int32_t but not intptr_t.  There's no reason
   // to use 64-bit fields for these variables on a 64-bit JVM.
 
-  volatile intptr_t  _count;        // reference count to prevent reclaimation/deflation
+  volatile intptr_t  _count;        // reference count to prevent reclamation/deflation
                                     // at stop-the-world time.  See deflate_idle_monitors().
                                     // _count is approximately |_WaitSet| + |_EntryList|
  protected:
--- a/src/share/vm/runtime/orderAccess.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/orderAccess.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -61,13 +61,13 @@
 //
 // Ensures that Load1 completes before Store2 and any subsequent store
 // operations.  Loads before Load1 may *not* float below Store2 and any
-// subseqeuent store operations.
+// subsequent store operations.
 //
 // StoreLoad:  Store1(s); StoreLoad; Load2
 //
 // Ensures that Store1 completes before Load2 and any subsequent load
 // operations.  Stores before Store1 may *not* float below Load2 and any
-// subseqeuent load operations.
+// subsequent load operations.
 //
 //
 // We define two further operations, 'release' and 'acquire'.  They are
@@ -176,7 +176,7 @@
 // compilers that we currently use (SunStudio, gcc and VC++) respect the
 // semantics of volatile here. If you build HotSpot using other
 // compilers, you may need to verify that no compiler reordering occurs
-// across the sequence point respresented by the volatile access.
+// across the sequence point represented by the volatile access.
 //
 //
 //                os::is_MP Considered Redundant
@@ -311,7 +311,7 @@
  private:
   // This is a helper that invokes the StubRoutines::fence_entry()
   // routine if it exists, It should only be used by platforms that
-  // don't another way to do the inline eassembly.
+  // don't have another way to do the inline assembly.
   static void StubRoutines_fence();
 };
 
--- a/src/share/vm/runtime/os.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/os.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -236,7 +236,7 @@
   while (true) {
     int sig;
     {
-      // FIXME : Currently we have not decieded what should be the status
+      // FIXME : Currently we have not decided what should be the status
       //         for this java thread blocked here. Once we decide about
       //         that we should fix this.
       sig = os::signal_wait();
@@ -362,7 +362,7 @@
       // exceptions anyway, check and abort if this fails.
       if (signal_thread == NULL || signal_thread->osthread() == NULL) {
         vm_exit_during_initialization("java.lang.OutOfMemoryError",
-                                      "unable to create new native thread");
+                                      os::native_thread_creation_failed_msg());
       }
 
       java_lang_Thread::set_thread(thread_oop(), signal_thread);
@@ -583,7 +583,7 @@
   ptrdiff_t size = *size_addr_from_base(start_of_prev_block);
   u_char* obj = start_of_prev_block + space_before;
   if (size <= 0 ) {
-    // start is bad; mayhave been confused by OS data inbetween objects
+    // start is bad; may have been confused by OS data in between objects
     // search one more backwards
     start_of_prev_block = find_cushion_backwards(start_of_prev_block);
     size = *size_addr_from_base(start_of_prev_block);
@@ -1011,7 +1011,7 @@
   if (Universe::heap()->is_in(addr)) {
     HeapWord* p = Universe::heap()->block_start(addr);
     bool print = false;
-    // If we couldn't find it it just may mean that heap wasn't parseable
+    // If we couldn't find it it just may mean that heap wasn't parsable
     // See if we were just given an oop directly
     if (p != NULL && Universe::heap()->block_is_obj(p)) {
       print = true;
@@ -1197,7 +1197,7 @@
                            char fileSep,
                            char pathSep) {
     assert((fileSep == '/' && pathSep == ':') ||
-           (fileSep == '\\' && pathSep == ';'), "unexpected seperator chars");
+           (fileSep == '\\' && pathSep == ';'), "unexpected separator chars");
 
     // Scan the format string to determine the length of the actual
     // boot classpath, and handle platform dependencies as well.
@@ -1443,7 +1443,7 @@
 // >= 2 physical CPU's and >=2GB of memory, with some fuzz
 // because the graphics memory (?) sometimes masks physical memory.
 // If you want to change the definition of a server class machine
-// on some OS or platform, e.g., >=4GB on Windohs platforms,
+// on some OS or platform, e.g., >=4GB on Windows platforms,
 // then you'll have to parameterize this method based on that state,
 // as was done for logical processors here, or replicate and
 // specialize this method for each platform.  (Or fix os to have
--- a/src/share/vm/runtime/os.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/os.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -399,7 +399,7 @@
     // was equal.  However, some platforms mask off faulting addresses
     // to the page size, so now we just check that the address is
     // within the page.  This makes the thread argument unnecessary,
-    // but we retain the NULL check to preserve existing behaviour.
+    // but we retain the NULL check to preserve existing behavior.
     if (thread == NULL) return false;
     address page = (address) _mem_serialize_page;
     return addr >= page && addr < (page + os::vm_page_size());
@@ -434,7 +434,10 @@
   static intx current_thread_id();
   static int current_process_id();
   static int sleep(Thread* thread, jlong ms, bool interruptable);
-  static int naked_sleep();
+  // Short standalone OS sleep suitable for slow path spin loop.
+  // Ignores Thread.interrupt() (so keep it short).
+  // ms = 0, will sleep for the least amount of time allowed by the OS.
+  static void naked_short_sleep(jlong ms);
   static void infinite_sleep(); // never returns, use with CAUTION
   static void yield();        // Yields to all threads with same priority
   enum YieldResult {
@@ -544,7 +547,7 @@
 
   // Loads .dll/.so and
   // in case of error it checks if .dll/.so was built for the
-  // same architecture as Hotspot is running on
+  // same architecture as HotSpot is running on
   static void* dll_load(const char *name, char *ebuf, int ebuflen);
 
   // lookup symbol in a shared library
@@ -815,6 +818,10 @@
 # include "os_bsd_zero.hpp"
 #endif
 
+#ifndef OS_NATIVE_THREAD_CREATION_FAILED_MSG
+#define OS_NATIVE_THREAD_CREATION_FAILED_MSG "unable to create native thread: possibly out of memory or process/resource limits reached"
+#endif
+
  public:
 #ifndef PLATFORM_PRINT_NATIVE_STACK
   // No platform-specific code for printing the native stack.
@@ -837,6 +844,9 @@
   // Hint to the underlying OS that a task switch would not be good.
   // Void return because it's a hint and can fail.
   static void hint_no_preempt();
+  static const char* native_thread_creation_failed_msg() {
+    return OS_NATIVE_THREAD_CREATION_FAILED_MSG;
+  }
 
   // Used at creation if requested by the diagnostic flag PauseAtStartup.
   // Causes the VM to wait until an external stimulus has been applied
--- a/src/share/vm/runtime/park.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/park.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -59,58 +59,22 @@
 
   // Start by trying to recycle an existing but unassociated
   // ParkEvent from the global free list.
-  for (;;) {
-    ev = FreeList ;
-    if (ev == NULL) break ;
-    // 1: Detach - sequester or privatize the list
-    // Tantamount to ev = Swap (&FreeList, NULL)
-    if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
-       continue ;
+  // Using a spin lock since we are part of the mutex impl.
+  // 8028280: using concurrent free list without memory management can leak
+  // pretty badly it turns out.
+  Thread::SpinAcquire(&ListLock, "ParkEventFreeListAllocate");
+  {
+    ev = FreeList;
+    if (ev != NULL) {
+      FreeList = ev->FreeNext;
     }
-
-    // We've detached the list.  The list in-hand is now
-    // local to this thread.   This thread can operate on the
-    // list without risk of interference from other threads.
-    // 2: Extract -- pop the 1st element from the list.
-    ParkEvent * List = ev->FreeNext ;
-    if (List == NULL) break ;
-    for (;;) {
-        // 3: Try to reattach the residual list
-        guarantee (List != NULL, "invariant") ;
-        ParkEvent * Arv =  (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
-        if (Arv == NULL) break ;
-
-        // New nodes arrived.  Try to detach the recent arrivals.
-        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
-            continue ;
-        }
-        guarantee (Arv != NULL, "invariant") ;
-        // 4: Merge Arv into List
-        ParkEvent * Tail = List ;
-        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
-        Tail->FreeNext = Arv ;
-    }
-    break ;
   }
+  Thread::SpinRelease(&ListLock);
 
   if (ev != NULL) {
     guarantee (ev->AssociatedWith == NULL, "invariant") ;
   } else {
     // Do this the hard way -- materialize a new ParkEvent.
-    // In rare cases an allocating thread might detach a long list --
-    // installing null into FreeList -- and then stall or be obstructed.
-    // A 2nd thread calling Allocate() would see FreeList == null.
-    // The list held privately by the 1st thread is unavailable to the 2nd thread.
-    // In that case the 2nd thread would have to materialize a new ParkEvent,
-    // even though free ParkEvents existed in the system.  In this case we end up
-    // with more ParkEvents in circulation than we need, but the race is
-    // rare and the outcome is benign.  Ideally, the # of extant ParkEvents
-    // is equal to the maximum # of threads that existed at any one time.
-    // Because of the race mentioned above, segments of the freelist
-    // can be transiently inaccessible.  At worst we may end up with the
-    // # of ParkEvents in circulation slightly above the ideal.
-    // Note that if we didn't have the TSM/immortal constraint, then
-    // when reattaching, above, we could trim the list.
     ev = new ParkEvent () ;
     guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
   }
@@ -124,13 +88,14 @@
   if (ev == NULL) return ;
   guarantee (ev->FreeNext == NULL      , "invariant") ;
   ev->AssociatedWith = NULL ;
-  for (;;) {
-    // Push ev onto FreeList
-    // The mechanism is "half" lock-free.
-    ParkEvent * List = FreeList ;
-    ev->FreeNext = List ;
-    if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
+  // Note that if we didn't have the TSM/immortal constraint, then
+  // when reattaching we could trim the list.
+  Thread::SpinAcquire(&ListLock, "ParkEventFreeListRelease");
+  {
+    ev->FreeNext = FreeList;
+    FreeList = ev;
   }
+  Thread::SpinRelease(&ListLock);
 }
 
 // Override operator new and delete so we can ensure that the
@@ -152,7 +117,7 @@
 
 // 6399321 As a temporary measure we copied & modified the ParkEvent::
 // allocate() and release() code for use by Parkers.  The Parker:: forms
-// will eventually be removed as we consolide and shift over to ParkEvents
+// will eventually be removed as we consolidate and shift over to ParkEvents
 // for both builtin synchronization and JSR166 operations.
 
 volatile int Parker::ListLock = 0 ;
@@ -164,56 +129,21 @@
 
   // Start by trying to recycle an existing but unassociated
   // Parker from the global free list.
-  for (;;) {
-    p = FreeList ;
-    if (p  == NULL) break ;
-    // 1: Detach
-    // Tantamount to p = Swap (&FreeList, NULL)
-    if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
-       continue ;
+  // 8028280: using concurrent free list without memory management can leak
+  // pretty badly it turns out.
+  Thread::SpinAcquire(&ListLock, "ParkerFreeListAllocate");
+  {
+    p = FreeList;
+    if (p != NULL) {
+      FreeList = p->FreeNext;
     }
-
-    // We've detached the list.  The list in-hand is now
-    // local to this thread.   This thread can operate on the
-    // list without risk of interference from other threads.
-    // 2: Extract -- pop the 1st element from the list.
-    Parker * List = p->FreeNext ;
-    if (List == NULL) break ;
-    for (;;) {
-        // 3: Try to reattach the residual list
-        guarantee (List != NULL, "invariant") ;
-        Parker * Arv =  (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
-        if (Arv == NULL) break ;
-
-        // New nodes arrived.  Try to detach the recent arrivals.
-        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
-            continue ;
-        }
-        guarantee (Arv != NULL, "invariant") ;
-        // 4: Merge Arv into List
-        Parker * Tail = List ;
-        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
-        Tail->FreeNext = Arv ;
-    }
-    break ;
   }
+  Thread::SpinRelease(&ListLock);
 
   if (p != NULL) {
     guarantee (p->AssociatedWith == NULL, "invariant") ;
   } else {
     // Do this the hard way -- materialize a new Parker..
-    // In rare cases an allocating thread might detach
-    // a long list -- installing null into FreeList --and
-    // then stall.  Another thread calling Allocate() would see
-    // FreeList == null and then invoke the ctor.  In this case we
-    // end up with more Parkers in circulation than we need, but
-    // the race is rare and the outcome is benign.
-    // Ideally, the # of extant Parkers is equal to the
-    // maximum # of threads that existed at any one time.
-    // Because of the race mentioned above, segments of the
-    // freelist can be transiently inaccessible.  At worst
-    // we may end up with the # of Parkers in circulation
-    // slightly above the ideal.
     p = new Parker() ;
   }
   p->AssociatedWith = t ;          // Associate p with t
@@ -227,11 +157,12 @@
   guarantee (p->AssociatedWith != NULL, "invariant") ;
   guarantee (p->FreeNext == NULL      , "invariant") ;
   p->AssociatedWith = NULL ;
-  for (;;) {
-    // Push p onto FreeList
-    Parker * List = FreeList ;
-    p->FreeNext = List ;
-    if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
+
+  Thread::SpinAcquire(&ListLock, "ParkerFreeListRelease");
+  {
+    p->FreeNext = FreeList;
+    FreeList = p;
   }
+  Thread::SpinRelease(&ListLock);
 }
 
--- a/src/share/vm/runtime/perfData.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/perfData.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -39,7 +39,7 @@
 PerfDataList*   PerfDataManager::_constants = NULL;
 
 /*
- * The jvmstat global and subsysem jvmstat counter name spaces. The top
+ * The jvmstat global and subsystem jvmstat counter name spaces. The top
  * level name spaces imply the interface stability level of the counter,
  * which generally follows the Java package, class, and property naming
  * conventions. The CounterNS enumeration values should be used to index
--- a/src/share/vm/runtime/perfData.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/perfData.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -116,7 +116,7 @@
  *
  * A PerfData subtype is not required to provide an implementation for
  * each variability classification. For example, the String type provides
- * Variable and Constant variablility classifications in the PerfStringVariable
+ * Variable and Constant variability classifications in the PerfStringVariable
  * and PerfStringConstant classes, but does not provide a counter type.
  *
  * Performance data are also described by a unit of measure. Units allow
@@ -172,10 +172,10 @@
  *   foo_counter->inc();
  *
  * Creating a performance counter that holds a variably change long
- * data value with untis specified in U_Bytes in the "com.sun.ci
+ * data value with units specified in U_Bytes in the "com.sun.ci
  * name space.
  *
- *   PerfLongVariable* bar_varible;
+ *   PerfLongVariable* bar_variable;
  *   bar_variable = PerfDataManager::create_long_variable(COM_CI, "bar",
 .*                                                        PerfData::U_Bytes,
  *                                                        optionalInitialValue,
@@ -203,7 +203,7 @@
  *    In this example, the PerfData pointer can be ignored as the caller
  *    is relying on the StatSampler PeriodicTask to sample the given
  *    address at a regular interval. The interval is defined by the
- *    PerfDataSamplingInterval global variable, and is applyied on
+ *    PerfDataSamplingInterval global variable, and is applied on
  *    a system wide basis, not on an per-counter basis.
  *
  * Creating a performance counter in an arbitrary name space that utilizes
@@ -234,7 +234,7 @@
  * the UsePerfData flag. Counters will be created on the c-heap
  * if UsePerfData is false.
  *
- * Until further noice, all PerfData objects should be created and
+ * Until further notice, all PerfData objects should be created and
  * manipulated within a guarded block. The guard variable is
  * UsePerfData, a product flag set to true by default. This flag may
  * be removed from the product in the future.
@@ -586,7 +586,7 @@
  *
  * The abstraction is not complete. A more general container class
  * would provide an Iterator abstraction that could be used to
- * traverse the lists. This implementation still relys upon integer
+ * traverse the lists. This implementation still relies upon integer
  * iterators and the at(int index) method. However, the GrowableArray
  * is not directly visible outside this class and can be replaced by
  * some other implementation, as long as that implementation provides
--- a/src/share/vm/runtime/perfMemory.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/perfMemory.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -55,7 +55,7 @@
  * of the fields must be changed along with their counterparts in the
  * PerfDataBuffer Java class. The first four bytes of this structure
  * should never change, or compatibility problems between the monitoring
- * applications and Hotspot VMs will result. The reserved fields are
+ * applications and HotSpot VMs will result. The reserved fields are
  * available for future enhancements.
  */
 typedef struct {
--- a/src/share/vm/runtime/reflection.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/reflection.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -482,7 +482,7 @@
     ik = InstanceKlass::cast(hc);
 
     // There's no way to make a host class loop short of patching memory.
-    // Therefore there cannot be a loop here unles there's another bug.
+    // Therefore there cannot be a loop here unless there's another bug.
     // Still, let's check for it.
     assert(--inf_loop_check > 0, "no host_klass loop");
   }
--- a/src/share/vm/runtime/reflection.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/reflection.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -138,9 +138,9 @@
   static BasicType basic_type_mirror_to_basic_type(oop basic_type_mirror, TRAPS);
 
 public:
-  // Method invokation through java.lang.reflect.Method
+  // Method invocation through java.lang.reflect.Method
   static oop      invoke_method(oop method_mirror, Handle receiver, objArrayHandle args, TRAPS);
-  // Method invokation through java.lang.reflect.Constructor
+  // Method invocation through java.lang.reflect.Constructor
   static oop      invoke_constructor(oop method_mirror, objArrayHandle args, TRAPS);
 
 };
--- a/src/share/vm/runtime/registerMap.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/registerMap.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -70,7 +70,7 @@
 //   3) The RegisterMap keeps track of the values of callee-saved registers
 //      from frame to frame (hence, the name).  For some stack traversal the
 //      values of the callee-saved registers does not matter, e.g., if you
-//      only need the static properies such as frame type, pc, and such.
+//      only need the static properties such as frame type, pc, and such.
 //      Updating of the RegisterMap can be turned off by instantiating the
 //      register map as: RegisterMap map(thread, false);
 
--- a/src/share/vm/runtime/relocator.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/relocator.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -141,7 +141,7 @@
 }
 
 // size is the new size of the instruction at bci. Hence, if size is less than the current
-// instruction sice, we will shrink the code.
+// instruction size, we will shrink the code.
 methodHandle Relocator::insert_space_at(int bci, int size, u_char inst_buffer[], TRAPS) {
   _changes = new GrowableArray<ChangeItem*> (10);
   _changes->push(new ChangeWiden(bci, size, inst_buffer));
@@ -192,7 +192,7 @@
     // Execute operation
     if (!ci->handle_code_change(this)) return false;
 
-    // Shuffel items up
+    // Shuffle items up
     for (int index = 1; index < _changes->length(); index++) {
       _changes->at_put(index-1, _changes->at(index));
     }
@@ -214,7 +214,7 @@
 }
 
 // We need a special instruction size method, since lookupswitches and tableswitches might not be
-// properly alligned during relocation
+// properly aligned during relocation
 int Relocator::rc_instr_len(int bci) {
   Bytecodes::Code bc= code_at(bci);
   switch (bc) {
@@ -611,7 +611,7 @@
 
   // In case we have shrunken a tableswitch/lookupswitch statement, we store the last
   // bytes that get overwritten. We have to copy the bytes after the change_jumps method
-  // has been called, since it is likly to update last offset in a tableswitch/lookupswitch
+  // has been called, since it is likely to update last offset in a tableswitch/lookupswitch
   if (delta < 0) {
     assert(delta>=-3, "we cannot overwrite more than 3 bytes");
     memcpy(_overwrite, addr_at(bci + ilen + delta), -delta);
--- a/src/share/vm/runtime/safepoint.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/safepoint.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -156,7 +156,7 @@
   // stopped by different mechanisms:
   //
   //  1. Running interpreted
-  //     The interpeter dispatch table is changed to force it to
+  //     The interpreter dispatch table is changed to force it to
   //     check for a safepoint condition between bytecodes.
   //  2. Running in native code
   //     When returning from the native code, a Java thread must check
@@ -282,7 +282,7 @@
       // See the comments in synchronizer.cpp for additional remarks on spinning.
       //
       // In the future we might:
-      // 1. Modify the safepoint scheme to avoid potentally unbounded spinning.
+      // 1. Modify the safepoint scheme to avoid potentially unbounded spinning.
       //    This is tricky as the path used by a thread exiting the JVM (say on
       //    on JNI call-out) simply stores into its state field.  The burden
       //    is placed on the VM thread, which must poll (spin).
@@ -489,7 +489,7 @@
     ConcurrentGCThread::safepoint_desynchronize();
   }
 #endif // INCLUDE_ALL_GCS
-  // record this time so VMThread can keep track how much time has elasped
+  // record this time so VMThread can keep track how much time has elapsed
   // since last safepoint.
   _end_of_last_safepoint = os::javaTimeMillis();
 }
@@ -826,7 +826,7 @@
 void SafepointSynchronize::print_safepoint_timeout(SafepointTimeoutReason reason) {
   if (!timeout_error_printed) {
     timeout_error_printed = true;
-    // Print out the thread infor which didn't reach the safepoint for debugging
+    // Print out the thread info which didn't reach the safepoint for debugging
     // purposes (useful when there are lots of threads in the debugger).
     tty->print_cr("");
     tty->print_cr("# SafepointSynchronize::begin: Timeout detected:");
@@ -1093,7 +1093,7 @@
       if (caller_fr.is_deoptimized_frame()) {
         // The exception patch will destroy registers that are still
         // live and will be needed during deoptimization. Defer the
-        // Async exception should have defered the exception until the
+        // Async exception should have deferred the exception until the
         // next safepoint which will be detected when we get into
         // the interpreter so if we have an exception now things
         // are messed up.
--- a/src/share/vm/runtime/safepoint.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/safepoint.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -59,7 +59,7 @@
  public:
   enum SynchronizeState {
       _not_synchronized = 0,                   // Threads not synchronized at a safepoint
-                                               // Keep this value 0. See the coment in do_call_back()
+                                               // Keep this value 0. See the comment in do_call_back()
       _synchronizing    = 1,                   // Synchronizing in progress
       _synchronized     = 2                    // All Java threads are stopped at a safepoint. Only VM thread is running
   };
@@ -91,7 +91,7 @@
   } SafepointStats;
 
  private:
-  static volatile SynchronizeState _state;     // Threads might read this flag directly, without acquireing the Threads_lock
+  static volatile SynchronizeState _state;     // Threads might read this flag directly, without acquiring the Threads_lock
   static volatile int _waiting_to_block;       // number of threads we are waiting for to block
   static int _current_jni_active_count;        // Counts the number of active critical natives during the safepoint
 
@@ -106,7 +106,7 @@
 private:
   static long       _end_of_last_safepoint;     // Time of last safepoint in milliseconds
 
-  // statistics
+  // Statistics
   static jlong            _safepoint_begin_time;     // time when safepoint begins
   static SafepointStats*  _safepoint_stats;          // array of SafepointStats struct
   static int              _cur_stat_index;           // current index to the above array
@@ -155,7 +155,7 @@
     _current_jni_active_count++;
   }
 
-  // Called when a thread volantary blocks
+  // Called when a thread voluntarily blocks
   static void   block(JavaThread *thread);
   static void   signal_thread_at_safepoint()              { _waiting_to_block--; }
 
@@ -172,7 +172,7 @@
   static bool is_cleanup_needed();
   static void do_cleanup_tasks();
 
-  // debugging
+  // Debugging
   static void print_state()                                PRODUCT_RETURN;
   static void safepoint_msg(const char* format, ...)       PRODUCT_RETURN;
 
@@ -183,7 +183,7 @@
   static void set_is_at_safepoint()                        { _state = _synchronized; }
   static void set_is_not_at_safepoint()                    { _state = _not_synchronized; }
 
-  // assembly support
+  // Assembly support
   static address address_of_state()                        { return (address)&_state; }
 
   static address safepoint_counter_addr()                  { return (address)&_safepoint_counter; }
--- a/src/share/vm/runtime/serviceThread.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/serviceThread.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -27,6 +27,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/serviceThread.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/os.hpp"
 #include "prims/jvmtiImpl.hpp"
 #include "services/gcNotifier.hpp"
 #include "services/diagnosticArgument.hpp"
@@ -66,7 +67,7 @@
     // exceptions anyway, check and abort if this fails.
     if (thread == NULL || thread->osthread() == NULL) {
       vm_exit_during_initialization("java.lang.OutOfMemoryError",
-                                    "unable to create new native thread");
+                                    os::native_thread_creation_failed_msg());
     }
 
     java_lang_Thread::set_thread(thread_oop(), thread);
--- a/src/share/vm/runtime/sharedRuntime.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -472,7 +472,7 @@
   return (jdouble)x;
 JRT_END
 
-// Exception handling accross interpreter/compiler boundaries
+// Exception handling across interpreter/compiler boundaries
 //
 // exception_handler_for_return_address(...) returns the continuation address.
 // The continuation address is the entry point of the exception handler of the
@@ -694,8 +694,8 @@
     // Allow abbreviated catch tables.  The idea is to allow a method
     // to materialize its exceptions without committing to the exact
     // routing of exceptions.  In particular this is needed for adding
-    // a synthethic handler to unlock monitors when inlining
-    // synchonized methods since the unlock path isn't represented in
+    // a synthetic handler to unlock monitors when inlining
+    // synchronized methods since the unlock path isn't represented in
     // the bytecodes.
     t = table.entry_for(catch_pco, -1, 0);
   }
@@ -819,7 +819,7 @@
           // Exception happened in CodeCache. Must be either:
           // 1. Inline-cache check in C2I handler blob,
           // 2. Inline-cache check in nmethod, or
-          // 3. Implict null exception in nmethod
+          // 3. Implicit null exception in nmethod
 
           if (!cb->is_nmethod()) {
             bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob();
@@ -2915,7 +2915,7 @@
 // called from very start of a compiled OSR nmethod.  A temp array is
 // allocated to hold the interesting bits of the interpreter frame.  All
 // active locks are inflated to allow them to move.  The displaced headers and
-// active interpeter locals are copied into the temp buffer.  Then we return
+// active interpreter locals are copied into the temp buffer.  Then we return
 // back to the compiled code.  The compiled code then pops the current
 // interpreter frame off the stack and pushes a new compiled frame.  Then it
 // copies the interpreter locals and displaced headers where it wants.
--- a/src/share/vm/runtime/sharedRuntime.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/sharedRuntime.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -400,7 +400,7 @@
   // present if we see that compiled code is present the compiled call site
   // will be patched/re-resolved so that later calls will run compiled.
 
-  // Aditionally a c2i blob need to have a unverified entry because it can be reached
+  // Additionally a c2i blob need to have a unverified entry because it can be reached
   // in situations where the call site is an inlined cache site and may go megamorphic.
 
   // A i2c adapter is simpler than the c2i adapter. This is because it is assumed
@@ -594,7 +594,7 @@
 // arguments for a Java-compiled call, and jumps to Rmethod-> code()->
 // code_begin().  It is broken to call it without an nmethod assigned.
 // The usual behavior is to lift any register arguments up out of the
-// stack and possibly re-pack the extra arguments to be contigious.
+// stack and possibly re-pack the extra arguments to be contiguous.
 // I2C adapters will save what the interpreter's stack pointer will be
 // after arguments are popped, then adjust the interpreter's frame
 // size to force alignment and possibly to repack the arguments.
@@ -611,7 +611,7 @@
 // outgoing stack args will be dead after the copy.
 //
 // Native wrappers, like adapters, marshal arguments.  Unlike adapters they
-// also perform an offical frame push & pop.  They have a call to the native
+// also perform an official frame push & pop.  They have a call to the native
 // routine in their middles and end in a return (instead of ending in a jump).
 // The native wrappers are stored in real nmethods instead of the BufferBlobs
 // used by the adapters.  The code generation happens here because it's very
@@ -628,7 +628,7 @@
 
 #ifdef ASSERT
   // Captures code and signature used to generate this adapter when
-  // verifing adapter equivalence.
+  // verifying adapter equivalence.
   unsigned char* _saved_code;
   int            _saved_code_length;
 #endif
--- a/src/share/vm/runtime/sharedRuntimeTrans.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/sharedRuntimeTrans.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -117,7 +117,7 @@
 #endif
 
 /* __ieee754_log(x)
- * Return the logrithm of x
+ * Return the logarithm of x
  *
  * Method :
  *   1. Argument Reduction: find k and f such that
--- a/src/share/vm/runtime/sharedRuntimeTrig.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/sharedRuntimeTrig.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -223,7 +223,7 @@
  *
  *      fq[]    final product of x*(2/pi) in fq[0],..,fq[jk]
  *
- *      ih      integer. If >0 it indicats q[] is >= 0.5, hence
+ *      ih      integer. If >0 it indicates q[] is >= 0.5, hence
  *              it also indicates the *sign* of the result.
  *
  */
@@ -347,7 +347,7 @@
   if(z==0.0) {
     jz -= 1; q0 -= 24;
     while(iq[jz]==0) { jz--; q0-=24;}
-  } else { /* break z into 24-bit if neccessary */
+  } else { /* break z into 24-bit if necessary */
     z = scalbnA(z,-q0);
     if(z>=two24B) {
       fw = (double)((int)(twon24*z));
@@ -409,7 +409,7 @@
 
 /*
  * ====================================================
- * Copyright (c) 1993 Oracle and/or its affilates. All rights reserved.
+ * Copyright (c) 1993 Oracle and/or its affiliates. All rights reserved.
  *
  * Developed at SunPro, a Sun Microsystems, Inc. business.
  * Permission to use, copy, modify, and distribute this
--- a/src/share/vm/runtime/signature.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/signature.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -152,7 +152,7 @@
   _parameter_index = 0;
 }
 
-// Optimized version of iterat_parameters when fingerprint is known
+// Optimized version of iterate_parameters when fingerprint is known
 void SignatureIterator::iterate_parameters( uint64_t fingerprint ) {
   uint64_t saved_fingerprint = fingerprint;
 
--- a/src/share/vm/runtime/simpleThresholdPolicy.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/simpleThresholdPolicy.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -387,7 +387,7 @@
                                                      int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
   // If the method is already compiling, quickly bail out.
   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
-    // Use loop event as an opportinity to also check there's been
+    // Use loop event as an opportunity to also check there's been
     // enough calls.
     CompLevel cur_level = comp_level(mh());
     CompLevel next_level = call_event(mh(), cur_level);
--- a/src/share/vm/runtime/statSampler.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/statSampler.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -222,8 +222,8 @@
  * The list of System Properties that have corresponding PerfData
  * string instrumentation created by retrieving the named property's
  * value from System.getProperty() and unconditionally creating a
- * PerfStringConstant object initialized to the retreived value. This
- * is not an exhustive list of Java properties with corresponding string
+ * PerfStringConstant object initialized to the retrieved value. This
+ * is not an exhaustive list of Java properties with corresponding string
  * instrumentation as the create_system_property_instrumentation() method
  * creates other property based instrumentation conditionally.
  */
@@ -325,7 +325,7 @@
   // create string instrumentation for various Java properties.
   create_system_property_instrumentation(CHECK);
 
-  // hotspot flags (from .hotspotrc) and args (from command line)
+  // HotSpot flags (from .hotspotrc) and args (from command line)
   //
   PerfDataManager::create_string_constant(JAVA_RT, "vmFlags",
                                           Arguments::jvm_flags(), CHECK);
--- a/src/share/vm/runtime/stubCodeGenerator.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/stubCodeGenerator.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -111,7 +111,7 @@
 };
 
 
-// Stack-allocated helper class used to assciate a stub code with a name.
+// Stack-allocated helper class used to associate a stub code with a name.
 // All stub code generating functions that use a StubCodeMark will be registered
 // in the global StubCodeDesc list and the generated stub code can be identified
 // later via an address pointing into it.
--- a/src/share/vm/runtime/synchronizer.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/synchronizer.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -373,23 +373,24 @@
 // -----------------------------------------------------------------------------
 //  Wait/Notify/NotifyAll
 // NOTE: must use heavy weight monitor to handle wait()
-void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
+int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
   if (UseBiasedLocking) {
     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   }
   if (millis < 0) {
     TEVENT (wait - throw IAX) ;
-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
+    THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
   }
   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
   monitor->wait(millis, true, THREAD);
 
-  /* This dummy call is in place to get around dtrace bug 6254741.  Once
-     that's fixed we can uncomment the following line and remove the call */
+  // This dummy call is in place to get around dtrace bug 6254741.  Once
+  // that's fixed we can uncomment the following line, remove the call
+  // and change this function back into a "void" func.
   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
-  dtrace_waited_probe(monitor, obj, THREAD);
+  return dtrace_waited_probe(monitor, obj, THREAD);
 }
 
 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
@@ -737,10 +738,10 @@
 }
 
 // Be aware of this method could revoke bias of the lock object.
-// This method querys the ownership of the lock handle specified by 'h_obj'.
+// This method queries the ownership of the lock handle specified by 'h_obj'.
 // If the current thread owns the lock, it returns owner_self. If no
 // thread owns the lock, it returns owner_none. Otherwise, it will return
-// ower_other.
+// owner_other.
 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
 (JavaThread *self, Handle h_obj) {
   // The caller must beware this method can revoke bias, and
--- a/src/share/vm/runtime/synchronizer.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/synchronizer.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
   // to use enter() and exit() in order to make sure user be ware
   // of the performance and semantics difference. They are normally
   // used by ObjectLocker etc. The interpreter and compiler use
-  // assembly copies of these routines. Please keep them synchornized.
+  // assembly copies of these routines. Please keep them synchronized.
   //
   // attempt_rebias flag is used by UseBiasedLocking implementation
   static void fast_enter  (Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS);
@@ -68,7 +68,7 @@
   static void jni_exit    (oop obj,    Thread* THREAD);
 
   // Handle all interpreter, compiler and jni cases
-  static void wait               (Handle obj, jlong millis, TRAPS);
+  static int  wait               (Handle obj, jlong millis, TRAPS);
   static void notify             (Handle obj,               TRAPS);
   static void notifyall          (Handle obj,               TRAPS);
 
--- a/src/share/vm/runtime/thread.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/thread.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -770,7 +770,7 @@
 void JavaThread::record_jump(address target, address instr, const char* file, int line) {
 
   // This should not need to be atomic as the only way for simultaneous
-  // updates is via interrupts. Even then this should be rare or non-existant
+  // updates is via interrupts. Even then this should be rare or non-existent
   // and we don't care that much anyway.
 
   int index = _jmp_ring_index;
@@ -928,10 +928,10 @@
         // Threads_lock is special, since the safepoint synchronization will not start before this is
         // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
         // since it is used to transfer control between JavaThreads and the VMThread
-        // Do not *exclude* any locks unless you are absolutly sure it is correct. Ask someone else first!
+        // Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
         if ( (cur->allow_vm_block() &&
               cur != Threads_lock &&
-              cur != Compile_lock &&               // Temporary: should not be necessary when we get spearate compilation
+              cur != Compile_lock &&               // Temporary: should not be necessary when we get separate compilation
               cur != VMOperationRequest_lock &&
               cur != VMOperationQueue_lock) ||
               cur->rank() == Mutex::special) {
@@ -1274,7 +1274,7 @@
         time_slept = 0;
         time_before_loop = now;
     } else {
-        // need to recalulate since we might have new tasks in _tasks
+        // need to recalculate since we might have new tasks in _tasks
         time_slept = (int) ((now - time_before_loop) / 1000000);
     }
 
@@ -1641,7 +1641,7 @@
   // initialize thread-local alloc buffer related fields
   this->initialize_tlab();
 
-  // used to test validitity of stack trace backs
+  // used to test validity of stack trace backs
   this->record_base_of_stack_pointer();
 
   // Record real stack base and size.
@@ -3304,6 +3304,58 @@
   // If CompilerThreads ever become non-JavaThreads, add them here
 }
 
+
+void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) {
+  TraceTime timer("Initialize java.lang classes", TraceStartupTime);
+
+  if (EagerXrunInit && Arguments::init_libraries_at_startup()) {
+    create_vm_init_libraries();
+  }
+
+  initialize_class(vmSymbols::java_lang_String(), CHECK);
+
+  // Initialize java_lang.System (needed before creating the thread)
+  initialize_class(vmSymbols::java_lang_System(), CHECK);
+  initialize_class(vmSymbols::java_lang_ThreadGroup(), CHECK);
+  Handle thread_group = create_initial_thread_group(CHECK);
+  Universe::set_main_thread_group(thread_group());
+  initialize_class(vmSymbols::java_lang_Thread(), CHECK);
+  oop thread_object = create_initial_thread(thread_group, main_thread, CHECK);
+  main_thread->set_threadObj(thread_object);
+  // Set thread status to running since main thread has
+  // been started and running.
+  java_lang_Thread::set_thread_status(thread_object,
+                                      java_lang_Thread::RUNNABLE);
+
+  // The VM creates & returns objects of this class. Make sure it's initialized.
+  initialize_class(vmSymbols::java_lang_Class(), CHECK);
+
+  // The VM preresolves methods to these classes. Make sure that they get initialized
+  initialize_class(vmSymbols::java_lang_reflect_Method(), CHECK);
+  initialize_class(vmSymbols::java_lang_ref_Finalizer(),  CHECK);
+  call_initializeSystemClass(CHECK);
+
+  // get the Java runtime name after java.lang.System is initialized
+  JDK_Version::set_runtime_name(get_java_runtime_name(THREAD));
+  JDK_Version::set_runtime_version(get_java_runtime_version(THREAD));
+
+  // an instance of OutOfMemory exception has been allocated earlier
+  initialize_class(vmSymbols::java_lang_OutOfMemoryError(), CHECK);
+  initialize_class(vmSymbols::java_lang_NullPointerException(), CHECK);
+  initialize_class(vmSymbols::java_lang_ClassCastException(), CHECK);
+  initialize_class(vmSymbols::java_lang_ArrayStoreException(), CHECK);
+  initialize_class(vmSymbols::java_lang_ArithmeticException(), CHECK);
+  initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK);
+  initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK);
+  initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK);
+}
+
+void Threads::initialize_jsr292_core_classes(TRAPS) {
+  initialize_class(vmSymbols::java_lang_invoke_MethodHandle(), CHECK);
+  initialize_class(vmSymbols::java_lang_invoke_MemberName(), CHECK);
+  initialize_class(vmSymbols::java_lang_invoke_MethodHandleNatives(), CHECK);
+}
+
 jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
 
   extern void JDK_Version_init();
@@ -3323,7 +3375,7 @@
   // Initialize system properties.
   Arguments::init_system_properties();
 
-  // So that JDK version can be used as a discrimintor when parsing arguments
+  // So that JDK version can be used as a discriminator when parsing arguments
   JDK_Version_init();
 
   // Update/Initialize System properties after JDK version number is known
@@ -3362,7 +3414,7 @@
   jint adjust_after_os_result = Arguments::adjust_after_os();
   if (adjust_after_os_result != JNI_OK) return adjust_after_os_result;
 
-  // intialize TLS
+  // initialize TLS
   ThreadLocalStorage::init();
 
   // Bootstrap native memory tracking, so it can start recording memory
@@ -3473,13 +3525,13 @@
     VMThread::execute(&verify_op);
   }
 
-  EXCEPTION_MARK;
+  Thread* THREAD = Thread::current();
 
   // At this point, the Universe is initialized, but we have not executed
   // any byte code.  Now is a good time (the only time) to dump out the
   // internal state of the JVM for sharing.
   if (DumpSharedSpaces) {
-    MetaspaceShared::preload_and_dump(CHECK_0);
+    MetaspaceShared::preload_and_dump(CHECK_JNI_ERR);
     ShouldNotReachHere();
   }
 
@@ -3490,74 +3542,12 @@
   // Notify JVMTI agents that VM has started (JNI is up) - nop if no agents.
   JvmtiExport::post_vm_start();
 
-  {
-    TraceTime timer("Initialize java.lang classes", TraceStartupTime);
-
-    if (EagerXrunInit && Arguments::init_libraries_at_startup()) {
-      create_vm_init_libraries();
-    }
-
-    initialize_class(vmSymbols::java_lang_String(), CHECK_0);
-
-    // Initialize java_lang.System (needed before creating the thread)
-    initialize_class(vmSymbols::java_lang_System(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_ThreadGroup(), CHECK_0);
-    Handle thread_group = create_initial_thread_group(CHECK_0);
-    Universe::set_main_thread_group(thread_group());
-    initialize_class(vmSymbols::java_lang_Thread(), CHECK_0);
-    oop thread_object = create_initial_thread(thread_group, main_thread, CHECK_0);
-    main_thread->set_threadObj(thread_object);
-    // Set thread status to running since main thread has
-    // been started and running.
-    java_lang_Thread::set_thread_status(thread_object,
-                                        java_lang_Thread::RUNNABLE);
-
-    // The VM creates & returns objects of this class. Make sure it's initialized.
-    initialize_class(vmSymbols::java_lang_Class(), CHECK_0);
-
-    // The VM preresolves methods to these classes. Make sure that they get initialized
-    initialize_class(vmSymbols::java_lang_reflect_Method(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_ref_Finalizer(),  CHECK_0);
-    call_initializeSystemClass(CHECK_0);
-
-    // get the Java runtime name after java.lang.System is initialized
-    JDK_Version::set_runtime_name(get_java_runtime_name(THREAD));
-    JDK_Version::set_runtime_version(get_java_runtime_version(THREAD));
-
-    // an instance of OutOfMemory exception has been allocated earlier
-    initialize_class(vmSymbols::java_lang_OutOfMemoryError(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_NullPointerException(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_ClassCastException(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_ArrayStoreException(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_ArithmeticException(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK_0);
-  }
-
-  // See        : bugid 4211085.
-  // Background : the static initializer of java.lang.Compiler tries to read
-  //              property"java.compiler" and read & write property "java.vm.info".
-  //              When a security manager is installed through the command line
-  //              option "-Djava.security.manager", the above properties are not
-  //              readable and the static initializer for java.lang.Compiler fails
-  //              resulting in a NoClassDefFoundError.  This can happen in any
-  //              user code which calls methods in java.lang.Compiler.
-  // Hack :       the hack is to pre-load and initialize this class, so that only
-  //              system domains are on the stack when the properties are read.
-  //              Currently even the AWT code has calls to methods in java.lang.Compiler.
-  //              On the classic VM, java.lang.Compiler is loaded very early to load the JIT.
-  // Future Fix : the best fix is to grant everyone permissions to read "java.compiler" and
-  //              read and write"java.vm.info" in the default policy file. See bugid 4211383
-  //              Once that is done, we should remove this hack.
-  initialize_class(vmSymbols::java_lang_Compiler(), CHECK_0);
-
-  // More hackery - the static initializer of java.lang.Compiler adds the string "nojit" to
-  // the java.vm.info property if no jit gets loaded through java.lang.Compiler (the hotspot
-  // compiler does not get loaded through java.lang.Compiler).  "java -version" with the
-  // hotspot vm says "nojit" all the time which is confusing.  So, we reset it here.
-  // This should also be taken out as soon as 4211383 gets fixed.
-  reset_vm_info_property(CHECK_0);
+  initialize_java_lang_classes(main_thread, CHECK_JNI_ERR);
+
+  // We need this for ClassDataSharing - the initial vm.info property is set
+  // with the default value of CDS "sharing" which may be reset through
+  // command line options.
+  reset_vm_info_property(CHECK_JNI_ERR);
 
   quicken_jni_functions();
 
@@ -3586,10 +3576,7 @@
   // Note that we do not use CHECK_0 here since we are inside an EXCEPTION_MARK and
   // set_init_completed has just been called, causing exceptions not to be shortcut
   // anymore. We call vm_exit_during_initialization directly instead.
-  SystemDictionary::compute_java_system_loader(THREAD);
-  if (HAS_PENDING_EXCEPTION) {
-    vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
-  }
+  SystemDictionary::compute_java_system_loader(CHECK_JNI_ERR);
 
 #if INCLUDE_ALL_GCS
   // Support for ConcurrentMarkSweep. This should be cleaned up
@@ -3597,12 +3584,9 @@
   // once things are properly refactored. XXX YSR
   if (UseConcMarkSweepGC || UseG1GC) {
     if (UseConcMarkSweepGC) {
-      ConcurrentMarkSweepThread::makeSurrogateLockerThread(THREAD);
+      ConcurrentMarkSweepThread::makeSurrogateLockerThread(CHECK_JNI_ERR);
     } else {
-      ConcurrentMarkThread::makeSurrogateLockerThread(THREAD);
-    }
-    if (HAS_PENDING_EXCEPTION) {
-      vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
+      ConcurrentMarkThread::makeSurrogateLockerThread(CHECK_JNI_ERR);
     }
   }
 #endif // INCLUDE_ALL_GCS
@@ -3645,19 +3629,16 @@
   CompileBroker::compilation_init();
 #endif
 
+  // Pre-initialize some JSR292 core classes to avoid deadlock during class loading.
+  // It is done after compilers are initialized, because otherwise compilations of
+  // signature polymorphic MH intrinsics can be missed
+  // (see SystemDictionary::find_method_handle_intrinsic).
   if (EnableInvokeDynamic) {
-    // Pre-initialize some JSR292 core classes to avoid deadlock during class loading.
-    // It is done after compilers are initialized, because otherwise compilations of
-    // signature polymorphic MH intrinsics can be missed
-    // (see SystemDictionary::find_method_handle_intrinsic).
-    initialize_class(vmSymbols::java_lang_invoke_MethodHandle(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_invoke_MemberName(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_invoke_MethodHandleNatives(), CHECK_0);
+    initialize_jsr292_core_classes(CHECK_JNI_ERR);
   }
 
 #if INCLUDE_MANAGEMENT
   Management::initialize(THREAD);
-#endif // INCLUDE_MANAGEMENT
 
   if (HAS_PENDING_EXCEPTION) {
     // management agent fails to start possibly due to
@@ -3665,6 +3646,7 @@
     // stack trace if appropriate. Simply exit VM.
     vm_exit(1);
   }
+#endif // INCLUDE_MANAGEMENT
 
   if (Arguments::has_profile())       FlatProfiler::engage(main_thread, true);
   if (MemProfiling)                   MemProfiler::engage();
@@ -4159,7 +4141,7 @@
 // but the garbage collector must provide a safe context for them to run.
 // In particular, these things should never be called when the Threads_lock
 // is held by some other thread. (Note: the Safepoint abstraction also
-// uses the Threads_lock to gurantee this property. It also makes sure that
+// uses the Threads_lock to guarantee this property. It also makes sure that
 // all threads gets blocked when exiting or starting).
 
 void Threads::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
@@ -4449,9 +4431,7 @@
         ++ctr ;
         if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
            if (Yields > 5) {
-             // Consider using a simple NakedSleep() instead.
-             // Then SpinAcquire could be called by non-JVM threads
-             Thread::current()->_ParkEvent->park(1) ;
+             os::naked_short_sleep(1);
            } else {
              os::NakedYield() ;
              ++Yields ;
--- a/src/share/vm/runtime/thread.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/thread.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1246,7 +1246,7 @@
   void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; }
   vframeArray* vframe_array_head() const         { return _vframe_array_head;  }
 
-  // Side structure for defering update of java frame locals until deopt occurs
+  // Side structure for deferring update of java frame locals until deopt occurs
   GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _deferred_locals_updates; }
   void set_deferred_locals(GrowableArray<jvmtiDeferredLocalVariableSet *>* vf) { _deferred_locals_updates = vf; }
 
@@ -1918,6 +1918,8 @@
   static bool        _vm_complete;
 #endif
 
+  static void initialize_java_lang_classes(JavaThread* main_thread, TRAPS);
+  static void initialize_jsr292_core_classes(TRAPS);
  public:
   // Thread management
   // force_daemon is a concession to JNI, where we may need to add a
--- a/src/share/vm/runtime/unhandledOops.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/unhandledOops.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -34,7 +34,7 @@
 // destructor.  The constructor adds the oop address on a list
 // off each thread and the destructor removes the oop.  At a potential
 // safepoint, the stack addresses of the local variable oops are trashed
-// with a recognizeable value.  If the local variable is used again, it
+// with a recognizable value.  If the local variable is used again, it
 // will segfault, indicating an unsafe use of that oop.
 // eg:
 //    oop o;    //register &o on list
--- a/src/share/vm/runtime/vframeArray.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/vframeArray.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -53,7 +53,7 @@
 
     frame _frame;                                                // the interpreter frame we will unpack into
     int  _bci;                                                   // raw bci for this vframe
-    bool _reexecute;                                             // whether sould we reexecute this bytecode
+    bool _reexecute;                                             // whether we should reexecute this bytecode
     Method*    _method;                                          // the method for this vframe
     MonitorChunk* _monitors;                                     // active monitors for this vframe
     StackValueCollection* _locals;
@@ -158,7 +158,7 @@
   // Tells whether index is within bounds.
   bool is_within_bounds(int index) const        { return 0 <= index && index < frames(); }
 
-  // Accessores for instance variable
+  // Accessories for instance variable
   int frames() const                            { return _frames;   }
 
   static vframeArray* allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
--- a/src/share/vm/runtime/virtualspace.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/virtualspace.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -218,9 +218,9 @@
          noaccess_prefix == _alignment, "noaccess prefix wrong");
 
   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
-         "area must be distinguisable from marks for mark-sweep");
+         "area must be distinguishable from marks for mark-sweep");
   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
-         "area must be distinguisable from marks for mark-sweep");
+         "area must be distinguishable from marks for mark-sweep");
 }
 
 
@@ -554,10 +554,10 @@
 
   // Determine which regions need to grow in this expand_by call.
   // If you are growing in the lower region, high() must be in that
-  // region so calcuate the size based on high().  For the middle and
+  // region so calculate the size based on high().  For the middle and
   // upper regions, determine the starting point of growth based on the
   // location of high().  By getting the MAX of the region's low address
-  // (or the prevoius region's high address) and high(), we can tell if it
+  // (or the previous region's high address) and high(), we can tell if it
   // is an intra or inter region growth.
   size_t lower_needs = 0;
   if (aligned_lower_new_high > lower_high()) {
--- a/src/share/vm/runtime/vm_operations.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/runtime/vm_operations.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -154,7 +154,7 @@
   void set_next(VM_Operation *next)              { _next = next; }
   void set_prev(VM_Operation *prev)              { _prev = prev; }
 
-  // Configuration. Override these appropriatly in subclasses.
+  // Configuration. Override these appropriately in subclasses.
   virtual VMOp_Type type() const = 0;
   virtual Mode evaluation_mode() const            { return _safepoint; }
   virtual bool allow_nested_vm_operations() const { return false; }
--- a/src/share/vm/services/attachListener.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/services/attachListener.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -282,6 +282,20 @@
       return JNI_ERR;
     }
   }
+
+  if (strncmp(name, "MaxHeapFreeRatio", 17) == 0) {
+    FormatBuffer<80> err_msg("");
+    if (!Arguments::verify_MaxHeapFreeRatio(err_msg, value)) {
+      out->print_cr(err_msg.buffer());
+      return JNI_ERR;
+    }
+  } else if (strncmp(name, "MinHeapFreeRatio", 17) == 0) {
+    FormatBuffer<80> err_msg("");
+    if (!Arguments::verify_MinHeapFreeRatio(err_msg, value)) {
+      out->print_cr(err_msg.buffer());
+      return JNI_ERR;
+    }
+  }
   bool res = CommandLineFlags::uintxAtPut((char*)name, &value, Flag::ATTACH_ON_DEMAND);
   if (! res) {
     out->print_cr("setting flag %s failed", name);
@@ -518,7 +532,7 @@
     // Check that thread and osthread were created
     if (listener_thread == NULL || listener_thread->osthread() == NULL) {
       vm_exit_during_initialization("java.lang.OutOfMemoryError",
-                                    "unable to create new native thread");
+                                    os::native_thread_creation_failed_msg());
     }
 
     java_lang_Thread::set_thread(thread_oop(), listener_thread);
--- a/src/share/vm/services/management.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/services/management.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1833,6 +1833,18 @@
     succeed = CommandLineFlags::intxAtPut(name, &ivalue, Flag::MANAGEMENT);
   } else if (flag->is_uintx()) {
     uintx uvalue = (uintx)new_value.j;
+
+    if (strncmp(name, "MaxHeapFreeRatio", 17) == 0) {
+      FormatBuffer<80> err_msg("");
+      if (!Arguments::verify_MaxHeapFreeRatio(err_msg, uvalue)) {
+        THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), err_msg.buffer());
+      }
+    } else if (strncmp(name, "MinHeapFreeRatio", 17) == 0) {
+      FormatBuffer<80> err_msg("");
+      if (!Arguments::verify_MinHeapFreeRatio(err_msg, uvalue)) {
+        THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), err_msg.buffer());
+      }
+    }
     succeed = CommandLineFlags::uintxAtPut(name, &uvalue, Flag::MANAGEMENT);
   } else if (flag->is_uint64_t()) {
     uint64_t uvalue = (uint64_t)new_value.j;
--- a/src/share/vm/trace/trace.xml	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/trace/trace.xml	Wed Feb 19 12:08:49 2014 -0800
@@ -122,6 +122,46 @@
       <value type="CLASS" field="definingClassLoader" label="Defining Class Loader"/>
     </event>
 
+    <event id="LongFlagChanged" path="vm/flag/long_changed" label="Long Flag Changed"
+          is_instant="true">
+      <value type="UTF8" field="name" label="Name" />
+      <value type="LONG" field="old_value" label="Old Value" />
+      <value type="LONG" field="new_value" label="New Value" />
+      <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
+    </event>
+
+    <event id="UnsignedLongFlagChanged" path="vm/flag/ulong_changed" label="Unsigned Long Flag Changed"
+          is_instant="true">
+      <value type="UTF8" field="name" label="Name" />
+      <value type="ULONG" field="old_value" label="Old Value" />
+      <value type="ULONG" field="new_value" label="New Value" />
+      <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
+    </event>
+
+    <event id="DoubleFlagChanged" path="vm/flag/double_changed" label="Double Flag Changed"
+         is_instant="true">
+      <value type="UTF8" field="name" label="Name" />
+      <value type="DOUBLE" field="old_value" label="Old Value" />
+      <value type="DOUBLE" field="new_value" label="New Value" />
+      <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
+    </event>
+
+    <event id="BooleanFlagChanged" path="vm/flag/boolean_changed" label="Boolean Flag Changed"
+         is_instant="true">
+      <value type="UTF8" field="name" label="Name" />
+      <value type="BOOLEAN" field="old_value" label="Old Value" />
+      <value type="BOOLEAN" field="new_value" label="New Value" />
+      <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
+    </event>
+
+    <event id="StringFlagChanged" path="vm/flag/string_changed" label="String Flag Changed"
+         is_instant="true">
+      <value type="UTF8" field="name" label="Name" />
+      <value type="UTF8" field="old_value" label="Old Value" />
+      <value type="UTF8" field="new_value" label="New Value" />
+      <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
+    </event>
+
     <struct id="VirtualSpace">
       <value type="ADDRESS" field="start" label="Start Address" description="Start address of the virtual space" />
       <value type="ADDRESS" field="committedEnd" label="Committed End Address" description="End address of the committed memory for the virtual space" />
--- a/src/share/vm/trace/tracetypes.xml	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/trace/tracetypes.xml	Wed Feb 19 12:08:49 2014 -0800
@@ -150,6 +150,11 @@
       <value type="UTF8" field="phase" label="phase" />
     </content_type>
 
+    <content_type id="FlagValueOrigin" hr_name="Flag Value Origin"
+                  type="U1" jvm_type="FLAGVALUEORIGIN">
+      <value type="UTF8" field="origin" label="origin" />
+    </content_type>
+
   </content_types>
 
 
@@ -334,6 +339,10 @@
     <!-- VMOPERATIONTYPE -->
     <primary_type symbol="VMOPERATIONTYPE" datatype="U2" contenttype="VMOPERATIONTYPE"
                   type="u2" sizeop="sizeof(u2)" />
+                  
+    <!-- FLAGVALUEORIGIN -->
+    <primary_type symbol="FLAGVALUEORIGIN" datatype="U1"
+                  contenttype="FLAGVALUEORIGIN" type="u1" sizeop="sizeof(u1)" />
 
   </primary_types>
 </types>
--- a/src/share/vm/utilities/array.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/utilities/array.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,7 +58,7 @@
 
   void initialize(size_t esize, int length) {
     assert(length >= 0, "illegal length");
-    assert(_data == NULL, "must be new object");
+    assert(StressRewriter || _data == NULL, "must be new object");
     _length  = length;
     _data    = resource_allocate_bytes(esize * length);
     DEBUG_ONLY(init_nesting();)
--- a/src/share/vm/utilities/bitMap.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/utilities/bitMap.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -110,7 +110,7 @@
     while (true) {
       intptr_t res = Atomic::cmpxchg_ptr(nw, pw, w);
       if (res == w) break;
-      w  = *pw;
+      w  = res;
       nw = value ? (w | ~mr) : (w & mr);
     }
   }
--- a/src/share/vm/utilities/dtrace.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/utilities/dtrace.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -38,20 +38,19 @@
 #define HS_DTRACE_WORKAROUND_TAIL_CALL_BUG() \
   do { volatile size_t dtrace_workaround_tail_call_bug = 1; } while (0)
 
-#define USDT1 1
 #elif defined(LINUX)
 #define HS_DTRACE_WORKAROUND_TAIL_CALL_BUG()
-#define USDT1 1
 #elif defined(__APPLE__)
 #define HS_DTRACE_WORKAROUND_TAIL_CALL_BUG()
+#include <sys/types.h>
+#else
+#error "dtrace enabled for unknown os"
+#endif /* defined(SOLARIS) */
+
 #define USDT2 1
-#include <sys/types.h>
 #include "dtracefiles/hotspot.h"
 #include "dtracefiles/hotspot_jni.h"
 #include "dtracefiles/hs_private.h"
-#else
-#error "dtrace enabled for unknown os"
-#endif /* defined(SOLARIS) */
 
 #else /* defined(DTRACE_ENABLED) */
 
--- a/src/share/vm/utilities/exceptions.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/utilities/exceptions.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -200,6 +200,7 @@
 #define CHECK_NH                                 CHECK_(Handle())
 #define CHECK_NULL                               CHECK_(NULL)
 #define CHECK_false                              CHECK_(false)
+#define CHECK_JNI_ERR                            CHECK_(JNI_ERR)
 
 #define CHECK_AND_CLEAR                         THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return;        } (void)(0
 #define CHECK_AND_CLEAR_(result)                THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return result; } (void)(0
--- a/src/share/vm/utilities/globalDefinitions.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -152,7 +152,7 @@
 // The larger HeapWordSize for 64bit requires larger heaps
 // for the same application running in 64bit.  See bug 4967770.
 // The minimum alignment to a heap word size is done.  Other
-// parts of the memory system may required additional alignment
+// parts of the memory system may require additional alignment
 // and are responsible for those alignments.
 #ifdef _LP64
 #define ScaleForWordSize(x) align_size_down_((x) * 13 / 10, HeapWordSize)
--- a/src/share/vm/utilities/hashtable.cpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/utilities/hashtable.cpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -93,7 +93,7 @@
   return false;
 }
 
-template <class T, MEMFLAGS F> jint Hashtable<T, F>::_seed = 0;
+template <class T, MEMFLAGS F> juint Hashtable<T, F>::_seed = 0;
 
 // Create a new table and using alternate hash code, populate the new table
 // with the existing elements.   This can be used to change the hash code
--- a/src/share/vm/utilities/hashtable.hpp	Thu Jan 30 14:30:01 2014 +0100
+++ b/src/share/vm/utilities/hashtable.hpp	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -280,7 +280,7 @@
   // Function to move these elements into the new table.
   void move_to(Hashtable<T, F>* new_table);
   static bool use_alternate_hashcode()  { return _seed != 0; }
-  static jint seed()                    { return _seed; }
+  static juint seed()                    { return _seed; }
 
   static int literal_size(Symbol *symbol);
   static int literal_size(oop oop);
@@ -296,7 +296,7 @@
   void dump_table(outputStream* st, const char *table_name);
 
  private:
-  static jint _seed;
+  static juint _seed;
 };
 
 
--- a/test/compiler/ciReplay/common.sh	Thu Jan 30 14:30:01 2014 +0100
+++ b/test/compiler/ciReplay/common.sh	Wed Feb 19 12:08:49 2014 -0800
@@ -196,6 +196,11 @@
     then
         # enable core dump
         ulimit -c unlimited
+        new_ulimit=`ulimit -c`
+        if [ $new_ulimit != "unlimited" -a $new_ulimit != "-1" ]
+        then
+            test_fail 2 "CHECK :: ULIMIT" "Could not set 'ulimit -c unlimited'. 'ulimit -c' returns : $new_ulimit"
+        fi
 
         if [ $VM_OS = "solaris" ]
         then
@@ -228,7 +233,10 @@
     
     core_locations=`grep -i core crash.out | grep "location:" | \
             sed -e 's/.*location: //'`
+    echo CRASH OUTPUT:
+    cat crash.out    
     rm crash.out 
+    
     # processing core locations for *nix
     if [ $VM_OS != "windows" ]
     then
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/TestVerifySilently.java	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test TestVerifySilently.java
+ * @key gc
+ * @bug 8032771
+ * @summary Test silent verification.
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+import java.util.ArrayList;
+import java.util.Collections;
+
+class RunSystemGC {
+  public static void main(String args[]) throws Exception {
+    System.gc();
+  }
+}
+
+
+public class TestVerifySilently {
+  private static String[] getTestJavaOpts() {
+    String testVmOptsStr = System.getProperty("test.java.opts");
+    if (!testVmOptsStr.isEmpty()) {
+      return testVmOptsStr.split(" ");
+    } else {
+      return new String[] {};
+    }
+  }
+
+  private static OutputAnalyzer runTest(boolean verifySilently) throws Exception {
+    ArrayList<String> vmOpts = new ArrayList();
+
+    Collections.addAll(vmOpts, getTestJavaOpts());
+    Collections.addAll(vmOpts, new String[] {"-XX:+UnlockDiagnosticVMOptions",
+                                             "-XX:+VerifyDuringStartup",
+                                             "-XX:+VerifyBeforeGC",
+                                             "-XX:+VerifyAfterGC",
+                                             "-XX:" + (verifySilently ? "+":"-") + "VerifySilently",
+                                             RunSystemGC.class.getName()});
+    ProcessBuilder pb =
+      ProcessTools.createJavaProcessBuilder(vmOpts.toArray(new String[vmOpts.size()]));
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    System.out.println("Output:\n" + output.getOutput());
+    return output;
+  }
+
+
+  public static void main(String args[]) throws Exception {
+
+    OutputAnalyzer output;
+
+    output = runTest(false);
+    output.shouldContain("[Verifying");
+    output.shouldHaveExitValue(0);
+
+    output = runTest(true);
+    output.shouldNotContain("[Verifying");
+    output.shouldHaveExitValue(0);
+  }
+}
--- a/test/gc/arguments/TestMaxHeapSizeTools.java	Thu Jan 30 14:30:01 2014 +0100
+++ b/test/gc/arguments/TestMaxHeapSizeTools.java	Wed Feb 19 12:08:49 2014 -0800
@@ -41,8 +41,8 @@
   public long initialHeapSize;
   public long maxHeapSize;
 
-  public long minAlignment;
-  public long maxAlignment;
+  public long spaceAlignment;
+  public long heapAlignment;
 }
 
 class TestMaxHeapSizeTools {
@@ -192,7 +192,7 @@
     // Unfortunately there is no other way to retrieve the minimum heap size and
     // the alignments.
 
-    Matcher m = Pattern.compile("Minimum heap \\d+ Initial heap \\d+ Maximum heap \\d+ Min alignment \\d+ Max alignment \\d+").
+    Matcher m = Pattern.compile("Minimum heap \\d+ Initial heap \\d+ Maximum heap \\d+ Space alignment \\d+ Heap alignment \\d+").
       matcher(output.getStdout());
     if (!m.find()) {
       throw new RuntimeException("Could not find heap size string.");
@@ -204,8 +204,8 @@
     val.minHeapSize = valueAfter(match, "Minimum heap ");
     val.initialHeapSize = valueAfter(match, "Initial heap ");
     val.maxHeapSize = valueAfter(match, "Maximum heap ");
-    val.minAlignment = valueAfter(match, "Min alignment ");
-    val.maxAlignment = valueAfter(match, "Max alignment ");
+    val.spaceAlignment = valueAfter(match, "Space alignment ");
+    val.heapAlignment = valueAfter(match, "Heap alignment ");
   }
 
   /**
@@ -218,12 +218,12 @@
     MinInitialMaxValues v = new MinInitialMaxValues();
     getMinInitialMaxHeap(args, v);
 
-    if ((expectedMin != -1) && (align_up(expectedMin, v.minAlignment) != v.minHeapSize)) {
+    if ((expectedMin != -1) && (align_up(expectedMin, v.heapAlignment) != v.minHeapSize)) {
       throw new RuntimeException("Actual minimum heap size of " + v.minHeapSize +
         " differs from expected minimum heap size of " + expectedMin);
     }
 
-    if ((expectedInitial != -1) && (align_up(expectedInitial, v.minAlignment) != v.initialHeapSize)) {
+    if ((expectedInitial != -1) && (align_up(expectedInitial, v.heapAlignment) != v.initialHeapSize)) {
       throw new RuntimeException("Actual initial heap size of " + v.initialHeapSize +
         " differs from expected initial heap size of " + expectedInitial);
     }
@@ -247,7 +247,7 @@
     MinInitialMaxValues v = new MinInitialMaxValues();
     getMinInitialMaxHeap(new String[] { gcflag, "-XX:MaxHeapSize=" + maxHeapsize + "M" }, v);
 
-    long expectedHeapSize = align_up(maxHeapsize * K * K, v.maxAlignment);
+    long expectedHeapSize = align_up(maxHeapsize * K * K, v.heapAlignment);
     long actualHeapSize = v.maxHeapSize;
 
     if (actualHeapSize > expectedHeapSize) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/Test2GbHeap.java	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test Test2GbHeap
+ * @bug 8031686
+ * @summary Regression test to ensure we can start G1 with 2gb heap.
+ * @key gc
+ * @key regression
+ * @library /testlibrary
+ */
+
+import java.util.ArrayList;
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+public class Test2GbHeap {
+  public static void main(String[] args) throws Exception {
+    ArrayList<String> testArguments = new ArrayList<String>();
+
+    testArguments.add("-XX:+UseG1GC");
+    testArguments.add("-Xmx2g");
+    testArguments.add("-version");
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(testArguments.toArray(new String[0]));
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    // Avoid failing test for setups not supported.
+    if (output.getOutput().contains("Could not reserve enough space for 2097152KB object heap")) {
+      // Will fail on machines with too little memory (and Windows 32-bit VM), ignore such failures.
+      output.shouldHaveExitValue(1);
+    } else if (output.getOutput().contains("G1 GC is disabled in this release")) {
+      // G1 is not supported on embedded, ignore such failures.
+      output.shouldHaveExitValue(1);
+    } else {
+      // Normally everything should be fine.
+      output.shouldHaveExitValue(0);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestStringSymbolTableStats.java	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestStringSymbolTableStats.java
+ * @bug 8027476 8027455
+ * @summary Ensure that the G1TraceStringSymbolTableScrubbing prints the expected message.
+ * @key gc
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class TestStringSymbolTableStats {
+  public static void main(String[] args) throws Exception {
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+                                                              "-XX:+UnlockExperimentalVMOptions",
+                                                              "-XX:+G1TraceStringSymbolTableScrubbing",
+                                                              SystemGCTest.class.getName());
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    System.out.println("Output:\n" + output.getOutput());
+
+    output.shouldContain("Cleaned string and symbol table");
+    output.shouldHaveExitValue(0);
+  }
+
+  static class SystemGCTest {
+    public static void main(String [] args) {
+      System.out.println("Calling System.gc()");
+      System.gc();
+    }
+  }
+}
--- a/test/runtime/6626217/Test6626217.sh	Thu Jan 30 14:30:01 2014 +0100
+++ b/test/runtime/6626217/Test6626217.sh	Wed Feb 19 12:08:49 2014 -0800
@@ -48,6 +48,11 @@
 # A Clean Compile: this line will probably fail within jtreg as have a clean dir:
 ${RM} -f *.class *.impl many_loader.java
 
+# Make sure that the compilation steps occurs in the future as not to allow fast systems
+# to copy and compile bug_21227.java so fast as to make the class and java have the same
+# time stamp, which later on would make the compilation step of many_loader.java fail
+sleep 2
+
 # Compile all the usual suspects, including the default 'many_loader'
 ${CP} many_loader1.java.foo many_loader.java
 ${JAVAC} ${TESTJAVACOPTS} -source 1.4 -target 1.4 -Xlint *.java
--- a/test/runtime/6929067/Test6929067.sh	Thu Jan 30 14:30:01 2014 +0100
+++ b/test/runtime/6929067/Test6929067.sh	Wed Feb 19 12:08:49 2014 -0800
@@ -1,15 +1,13 @@
 #!/bin/sh
 
 ##
-## @ignore 8028740
 ## @test Test6929067.sh
 ## @bug 6929067
 ## @bug 8021296
 ## @summary Stack guard pages should be removed when thread is detached
-## @compile T.java
 ## @run shell Test6929067.sh
 ##
-set -x
+
 if [ "${TESTSRC}" = "" ]
 then
   TESTSRC=${PWD}
@@ -114,10 +112,8 @@
 LD_LIBRARY_PATH=.:${COMPILEJAVA}/jre/lib/${ARCH}/${VMTYPE}:/usr/lib:$LD_LIBRARY_PATH
 export LD_LIBRARY_PATH
 
-cp ${TESTSRC}${FS}invoke.c .
-
-# Copy the result of our @compile action:
-cp ${TESTCLASSES}${FS}T.class .
+cp ${TESTSRC}${FS}*.java ${THIS_DIR}
+${TESTJAVA}${FS}bin${FS}javac *.java
 
 echo "Architecture: ${ARCH}"
 echo "Compilation flag: ${COMP_FLAG}"
@@ -127,9 +123,9 @@
 # for /usr/lib/`uname -m`-linux-gnu version ensure to add that path to below compilation.
 
 $gcc_cmd -DLINUX ${COMP_FLAG} -o invoke \
-    -I${COMPILEJAVA}/include -I${COMPILEJAVA}/include/linux \
-    -L${COMPILEJAVA}/jre/lib/${ARCH}/${VMTYPE} \
-    -ljvm -lpthread invoke.c
+    -I${TESTJAVA}/include -I${TESTJAVA}/include/linux \
+    -L${TESTJAVA}/jre/lib/${ARCH}/${VMTYPE} \
+     ${TESTSRC}${FS}invoke.c -ljvm -lpthread
 
 ./invoke
 exit $?
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/ClassFile/UnsupportedClassFileVersion.java	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary
+ * @compile -XDignore.symbol.file UnsupportedClassFileVersion.java
+ * @run main UnsupportedClassFileVersion
+ */
+
+import java.io.File;
+import java.io.FileOutputStream;
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import jdk.internal.org.objectweb.asm.Opcodes;
+import com.oracle.java.testlibrary.*;
+
+public class UnsupportedClassFileVersion implements Opcodes {
+    public static void main(String... args) throws Exception {
+        writeClassFile();
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(true, "-cp", ".",  "ClassFile");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ClassFile has been compiled by a more recent version of the " +
+                            "Java Runtime (class file version 99.0), this version of " +
+                            "the Java Runtime only recognizes class file versions up to " +
+                            System.getProperty("java.class.version"));
+
+        output.shouldHaveExitValue(1);
+    }
+
+    public static void writeClassFile() throws Exception {
+        ClassWriter cw = new ClassWriter(0);
+        MethodVisitor mv;
+
+        cw.visit(99, ACC_PUBLIC + ACC_SUPER, "ClassFile", null, "java/lang/Object", null);
+        mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+        mv.visitCode();
+        mv.visitVarInsn(ALOAD, 0);
+        mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V", false);
+        mv.visitInsn(RETURN);
+        mv.visitMaxs(1, 1);
+        mv.visitEnd();
+        cw.visitEnd();
+
+        try (FileOutputStream fos = new FileOutputStream(new File("ClassFile.class"))) {
+             fos.write(cw.toByteArray());
+        }
+    }
+}
--- a/test/runtime/CommandLine/CompilerConfigFileWarning.java	Thu Jan 30 14:30:01 2014 +0100
+++ b/test/runtime/CommandLine/CompilerConfigFileWarning.java	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,8 +33,7 @@
 
 public class CompilerConfigFileWarning {
     public static void main(String[] args) throws Exception {
-        String vmVersion = System.getProperty("java.vm.version");
-        if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) {
+        if (Platform.isDebugBuild()) {
             System.out.println("Skip on debug builds since we'll always read the file there");
             return;
         }
--- a/test/runtime/CommandLine/ConfigFileWarning.java	Thu Jan 30 14:30:01 2014 +0100
+++ b/test/runtime/CommandLine/ConfigFileWarning.java	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,8 +33,7 @@
 
 public class ConfigFileWarning {
     public static void main(String[] args) throws Exception {
-        String vmVersion = System.getProperty("java.vm.version");
-        if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) {
+        if (Platform.isDebugBuild()) {
             System.out.println("Skip on debug builds since we'll always read the file there");
             return;
         }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/CommandLine/VMOptionWarning.java	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8027314
+ * @summary Warn if diagnostic or experimental vm option is used and -XX:+UnlockDiagnosticVMOptions or -XX:+UnlockExperimentalVMOptions, respectively, isn't specified. Warn if develop or notproduct vm option is used with product version of VM.
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class VMOptionWarning {
+    public static void main(String[] args) throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+PredictedLoadedClassCount", "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Error: VM option 'PredictedLoadedClassCount' is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions.");
+
+        if (Platform.isDebugBuild()) {
+            System.out.println("Skip the rest of the tests on debug builds since diagnostic, develop, and notproduct options are available on debug builds.");
+            return;
+        }
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:+PrintInlining", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Error: VM option 'PrintInlining' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions.");
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:+TraceJNICalls", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Error: VM option 'TraceJNICalls' is develop and is available only in debug version of VM.");
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:+TraceJVMCalls", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Error: VM option 'TraceJVMCalls' is notproduct and is available only in debug version of VM.");
+    }
+}
--- a/test/runtime/RedefineObject/Agent.java	Thu Jan 30 14:30:01 2014 +0100
+++ b/test/runtime/RedefineObject/Agent.java	Wed Feb 19 12:08:49 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,8 +65,8 @@
     public static void main(String[] args) {
         byte[] ba = new byte[0];
 
-        // If it survives 1000 GC's, it's good.
-        for (int i = 0; i < 1000 ; i++) {
+        // If it survives 100 GC's, it's good.
+        for (int i = 0; i < 100 ; i++) {
             System.gc();
             ba.clone();
         }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/SharedArchiveFile/ArchiveDoesNotExist.java	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test ArchiveDoesNotExist
+ * @summary Test how VM handles "file does not exist" situation while
+ *          attempting to use CDS archive. JVM should exit gracefully
+ *          when sharing mode is ON, and continue w/o sharing if sharing
+ *          mode is AUTO.
+ * @library /testlibrary
+ * @run main ArchiveDoesNotExist
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.io.File;
+
+public class ArchiveDoesNotExist {
+    public static void main(String[] args) throws Exception {
+        String fileName = "test.jsa";
+
+        File cdsFile = new File(fileName);
+        if (cdsFile.exists())
+            throw new RuntimeException("Test error: cds file already exists");
+
+        // Sharing: on
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+           "-XX:+UnlockDiagnosticVMOptions",
+           "-XX:SharedArchiveFile=./" + fileName,
+           "-Xshare:on",
+           "-version");
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Specified shared archive not found");
+        output.shouldHaveExitValue(1);
+
+        // Sharing: auto
+        pb = ProcessTools.createJavaProcessBuilder(
+           "-XX:+UnlockDiagnosticVMOptions",
+           "-XX:SharedArchiveFile=./" + fileName,
+           "-Xshare:auto",
+           "-version");
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("java version");
+        output.shouldNotContain("sharing");
+        output.shouldHaveExitValue(0);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/SharedArchiveFile/CdsWriteError.java	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test CdsWriteError
+ * @summary Test how VM handles situation when it is impossible to write the
+ *          CDS archive. VM is expected to exit gracefully and display the
+ *          correct reason for the error.
+ * @library /testlibrary
+ * @run main CdsWriteError
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.io.File;
+
+public class CdsWriteError {
+    public static void main(String[] args) throws Exception {
+
+        if (Platform.isWindows()) {
+            System.out.println("This test is ignored on Windows. This test " +
+                "manipulates folder writable attribute, which is known to be " +
+                "often ignored by Windows");
+
+            return;
+        }
+
+        String folderName = "tmp";
+        String fileName = folderName + File.separator + "empty.jsa";
+
+        // create an empty archive file and make it read only
+        File folder = new File(folderName);
+        if (!folder.mkdir())
+            throw new RuntimeException("Error when creating a tmp folder");
+
+        File cdsFile = new File(fileName);
+        if (!cdsFile.createNewFile())
+            throw new RuntimeException("Error when creating an empty CDS file");
+        if (!cdsFile.setWritable(false))
+            throw new RuntimeException("Error: could not set writable attribute on cds file");
+        if (!folder.setWritable(false))
+            throw new RuntimeException("Error: could not set writable attribute on the cds folder");
+
+        try {
+           ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+             "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./" + fileName, "-Xshare:dump");
+
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldContain("Unable to create shared archive file");
+            output.shouldHaveExitValue(1);
+        } finally {
+            // doing this, just in case, to make sure that files can be deleted by the harness
+            // on any subsequent run
+            folder.setWritable(true);
+            cdsFile.setWritable(true);
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/SharedArchiveFile/DefaultUseWithClient.java	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test DefaultUseWithClient
+ * @summary Test default behavior of sharing with -client
+ * @library /testlibrary
+ * @run main DefaultUseWithClient
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.io.File;
+
+public class DefaultUseWithClient {
+    public static void main(String[] args) throws Exception {
+        String fileName = "test.jsa";
+
+        // On 32-bit windows CDS should be on by default in "-client" config
+        // Skip this test on any other platform
+        boolean is32BitWindows = (Platform.isWindows() && Platform.is32bit());
+        if (!is32BitWindows) {
+            System.out.println("Test only applicable on 32-bit Windows. Skipping");
+            return;
+        }
+
+        // create the archive
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+           "-XX:+UnlockDiagnosticVMOptions",
+           "-XX:SharedArchiveFile=./" + fileName,
+           "-Xshare:dump");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+
+        pb = ProcessTools.createJavaProcessBuilder(
+           "-XX:+UnlockDiagnosticVMOptions",
+           "-XX:SharedArchiveFile=./" + fileName,
+           "-client",
+           "-version");
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("sharing");
+        output.shouldHaveExitValue(0);
+   }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/finalStatic/FinalStatic.java	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8028553
+ * @summary Test that VerifyError is not thrown when 'overriding' a static method.
+ * @run main FinalStatic
+ */
+
+import java.lang.reflect.*;
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import jdk.internal.org.objectweb.asm.Opcodes;
+
+/*
+ *  class A { static final int m() {return FAIL; } }
+ *  class B extends A { int m() { return PASS; } }
+ *  class FinalStatic {
+ *      public static void main () {
+ *          Object b = new B();
+ *          b.m();
+ *      }
+ *  }
+ */
+public class FinalStatic {
+
+    static final String CLASS_NAME_A = "A";
+    static final String CLASS_NAME_B = "B";
+    static final int FAILED = 0;
+    static final int EXPECTED = 1234;
+
+    static class TestClassLoader extends ClassLoader implements Opcodes {
+
+        @Override
+        public Class findClass(String name) throws ClassNotFoundException {
+            byte[] b;
+            try {
+                b = loadClassData(name);
+            } catch (Throwable th) {
+                // th.printStackTrace();
+                throw new ClassNotFoundException("Loading error", th);
+            }
+            return defineClass(name, b, 0, b.length);
+        }
+
+        private byte[] loadClassData(String name) throws Exception {
+            ClassWriter cw = new ClassWriter(0);
+            MethodVisitor mv;
+            switch (name) {
+               case CLASS_NAME_A:
+                    cw.visit(52, ACC_SUPER | ACC_PUBLIC, CLASS_NAME_A, null, "java/lang/Object", null);
+                    {
+                        mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+                        mv.visitCode();
+                        mv.visitVarInsn(ALOAD, 0);
+                        mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V");
+                        mv.visitInsn(RETURN);
+                        mv.visitMaxs(1, 1);
+                        mv.visitEnd();
+
+                        mv = cw.visitMethod(ACC_FINAL | ACC_STATIC, "m", "()I", null, null);
+                        mv.visitCode();
+                        mv.visitLdcInsn(FAILED);
+                        mv.visitInsn(IRETURN);
+                        mv.visitMaxs(1, 1);
+                        mv.visitEnd();
+                    }
+                    break;
+                case CLASS_NAME_B:
+                    cw.visit(52, ACC_SUPER | ACC_PUBLIC, CLASS_NAME_B, null, CLASS_NAME_A, null);
+                    {
+                        mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+                        mv.visitCode();
+                        mv.visitVarInsn(ALOAD, 0);
+                        mv.visitMethodInsn(INVOKESPECIAL, CLASS_NAME_A, "<init>", "()V");
+                        mv.visitInsn(RETURN);
+                        mv.visitMaxs(1, 1);
+                        mv.visitEnd();
+
+                        mv = cw.visitMethod(ACC_PUBLIC, "m", "()I", null, null);
+                        mv.visitCode();
+                        mv.visitLdcInsn(EXPECTED);
+                        mv.visitInsn(IRETURN);
+                        mv.visitMaxs(1, 1);
+                        mv.visitEnd();
+
+                    }
+                    break;
+                default:
+                    break;
+            }
+            cw.visitEnd();
+
+            return cw.toByteArray();
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        TestClassLoader tcl = new TestClassLoader();
+        Class<?> a = tcl.loadClass(CLASS_NAME_A);
+        Class<?> b = tcl.loadClass(CLASS_NAME_B);
+        Object inst = b.newInstance();
+        Method[] meths = b.getDeclaredMethods();
+
+        Method m = meths[0];
+        int mod = m.getModifiers();
+        if ((mod & Modifier.FINAL) != 0) {
+            throw new Exception("FAILED: " + m + " is FINAL");
+        }
+        if ((mod & Modifier.STATIC) != 0) {
+            throw new Exception("FAILED: " + m + " is STATIC");
+        }
+
+        m.setAccessible(true);
+        if (!m.invoke(inst).equals(EXPECTED)) {
+              throw new Exception("FAILED: " + EXPECTED + " from " + m);
+        }
+
+        System.out.println("Passed.");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/lambda-features/InvokespecialInterface.java	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8032024
+ * @bug 8025937
+ * @bug 8033528
+ * @summary [JDK 8] Test invokespecial and invokeinterface with the same JVM_CONSTANT_InterfaceMethodref
+ * @run main/othervm -XX:+StressRewriter InvokespecialInterface
+ */
+import java.util.function.*;
+import java.util.*;
+
+interface I {
+  default void imethod() { System.out.println("I::imethod"); }
+}
+
+class C implements I {
+  public void foo() { I.super.imethod(); }  // invokespecial InterfaceMethod
+  public void bar() { I i = this; i.imethod(); } // invokeinterface same
+  public void doSomeInvokedynamic() {
+      String str = "world";
+      Supplier<String> foo = ()->"hello, "+str;
+      String res = foo.get();
+      System.out.println(res);
+  }
+}
+
+public class InvokespecialInterface {
+  public static void main(java.lang.String[] unused) {
+     // need to create C and call I::foo()
+     C c = new C();
+     c.foo();
+     c.bar();
+     c.doSomeInvokedynamic();
+  }
+};
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/lambda-features/TestConcreteClassWithAbstractMethod.java	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8032010
+ * @summary method lookup on an abstract method in a concrete class should be successful
+ * @run main TestConcreteClassWithAbstractMethod
+ */
+
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+
+import static jdk.internal.org.objectweb.asm.Opcodes.*;
+
+/*
+ *   class T1 { public int m() {} }
+ *   class T2 { public abstract int m(); }
+ *   class T3 { public int m() {} }
+ *
+ *   Call site: T3.test() { invokevirtual T2.m() }
+ *   T3.m() should be invoked
+ */
+public class TestConcreteClassWithAbstractMethod {
+    static final String classT1 = "p1.T1";
+    static final String classT2 = "p1.T2";
+    static final String classT3 = "p1.T3";
+
+    static final String callerName = classT3;
+
+    public static void main(String[] args) throws Exception {
+        ClassLoader cl = new ClassLoader() {
+            public Class<?> loadClass(String name) throws ClassNotFoundException {
+                if (findLoadedClass(name) != null) {
+                    return findLoadedClass(name);
+                }
+
+                if (classT1.equals(name)) {
+                    byte[] classFile = dumpT1();
+                    return defineClass(classT1, classFile, 0, classFile.length);
+                }
+                if (classT2.equals(name)) {
+                    byte[] classFile = dumpT2();
+                    return defineClass(classT2, classFile, 0, classFile.length);
+                }
+                if (classT3.equals(name)) {
+                    byte[] classFile = dumpT3();
+                    return defineClass(classT3, classFile, 0, classFile.length);
+                }
+
+                return super.loadClass(name);
+            }
+        };
+
+        cl.loadClass(classT1);
+        cl.loadClass(classT2);
+        cl.loadClass(classT3);
+
+        //cl.loadClass(callerName).getDeclaredMethod("m");
+        cl.loadClass(callerName).newInstance();
+
+        int result = (Integer)cl.loadClass(callerName).getDeclaredMethod("test").invoke(null);
+        System.out.println(""+result);
+    }
+
+    public static byte[] dumpT1() {
+        ClassWriter cw = new ClassWriter(0);
+        MethodVisitor mv;
+
+        cw.visit(52, ACC_PUBLIC | ACC_SUPER, "p1/T1", null, "java/lang/Object", null);
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+            mv.visitCode();
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V", false);
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(1, 1);
+            mv.visitEnd();
+        }
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "m", "()I", null, null);
+            mv.visitCode();
+            mv.visitFieldInsn(GETSTATIC, "java/lang/System", "out", "Ljava/io/PrintStream;");
+            mv.visitLdcInsn("p1/T1.m()");
+            mv.visitMethodInsn(INVOKEVIRTUAL, "java/io/PrintStream", "print", "(Ljava/lang/String;)V", false);
+            mv.visitIntInsn(BIPUSH, 3);
+            mv.visitInsn(IRETURN);
+            mv.visitMaxs(2, 1);
+            mv.visitEnd();
+        }
+        cw.visitEnd();
+
+        return cw.toByteArray();
+    }
+
+    public static byte[] dumpT2() {
+        ClassWriter cw = new ClassWriter(0);
+        MethodVisitor mv;
+
+        cw.visit(52, ACC_PUBLIC | ACC_SUPER, "p1/T2", null, "p1/T1", null);
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+            mv.visitCode();
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitMethodInsn(INVOKESPECIAL, "p1/T1", "<init>", "()V", false);
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(1, 1);
+            mv.visitEnd();
+        }
+        {
+            mv = cw.visitMethod(ACC_PUBLIC + ACC_ABSTRACT, "m", "()I", null, null);
+            mv.visitEnd();
+        }
+        cw.visitEnd();
+
+        return cw.toByteArray();
+    }
+
+    public static byte[] dumpT3() {
+        ClassWriter cw = new ClassWriter(0);
+        MethodVisitor mv;
+
+        cw.visit(52, ACC_PUBLIC + ACC_SUPER, "p1/T3", null, "p1/T2", null);
+
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+            mv.visitCode();
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitMethodInsn(INVOKESPECIAL, "p1/T2", "<init>", "()V", false);
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(1, 1);
+            mv.visitEnd();
+        }
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "m", "()I", null, null);
+            mv.visitCode();
+            mv.visitFieldInsn(GETSTATIC, "java/lang/System", "out", "Ljava/io/PrintStream;");
+            mv.visitLdcInsn("p1/T3.m()");
+            mv.visitMethodInsn(INVOKEVIRTUAL, "java/io/PrintStream", "print", "(Ljava/lang/String;)V", false);
+            mv.visitIntInsn(BIPUSH, 2);
+            mv.visitInsn(IRETURN);
+            mv.visitMaxs(2, 1);
+            mv.visitEnd();
+        }
+        {
+            mv = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, "test", "()I", null, null);
+            mv.visitCode();
+            mv.visitTypeInsn(NEW, "p1/T3");
+            mv.visitInsn(DUP);
+            mv.visitMethodInsn(INVOKESPECIAL, "p1/T3", "<init>", "()V", false);
+            mv.visitMethodInsn(INVOKEVIRTUAL, "p1/T2", "m", "()I", false);
+            mv.visitInsn(IRETURN);
+            mv.visitMaxs(3, 2);
+            mv.visitEnd();
+        }
+        cw.visitEnd();
+
+        return cw.toByteArray();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/serviceability/sa/jmap-hashcode/Test8028623.java	Wed Feb 19 12:08:49 2014 -0800
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8028623
+ * @summary Test hashing of extended characters in Serviceability Agent.
+ * @library /testlibrary
+ * @compile -encoding utf8 Test8028623.java
+ * @run main Test8028623
+ */
+
+import com.oracle.java.testlibrary.JDKToolLauncher;
+import com.oracle.java.testlibrary.OutputBuffer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+import java.io.File;
+
+public class Test8028623 {
+
+  public static int à = 1;
+  public static String dumpFile = "heap.out";
+
+  public static void main (String[] args) {
+
+    System.out.println(Ã);
+
+    try {
+        int pid = ProcessTools.getProcessId();
+        JDKToolLauncher jmap = JDKToolLauncher.create("jmap")
+                                              .addToolArg("-F")
+                                              .addToolArg("-dump:live,format=b,file=" + dumpFile)
+                                              .addToolArg(Integer.toString(pid));
+        ProcessBuilder pb = new ProcessBuilder(jmap.getCommand());
+        OutputBuffer output = ProcessTools.getOutput(pb);
+        Process p = pb.start();
+        int e = p.waitFor();
+        System.out.println("stdout:");
+        System.out.println(output.getStdout());
+        System.out.println("stderr:");
+        System.out.println(output.getStderr());
+
+        if (e != 0) {
+            throw new RuntimeException("jmap returns: " + e);
+        }
+        if (! new File(dumpFile).exists()) {
+            throw new RuntimeException("dump file NOT created: '" + dumpFile + "'");
+        }
+    } catch (Throwable t) {
+        t.printStackTrace();
+        throw new RuntimeException("Test failed with: " + t);
+    }
+  }
+}
--- a/test/testlibrary/com/oracle/java/testlibrary/ProcessTools.java	Thu Jan 30 14:30:01 2014 +0100
+++ b/test/testlibrary/com/oracle/java/testlibrary/ProcessTools.java	Wed Feb 19 12:08:49 2014 -0800
@@ -142,11 +142,23 @@
    * with any platform specific arguments prepended
    */
   public static ProcessBuilder createJavaProcessBuilder(String... command) throws Exception {
+    return createJavaProcessBuilder(false, command);
+  }
+
+  public static ProcessBuilder createJavaProcessBuilder(boolean addTestVmOptions, String... command) throws Exception {
     String javapath = JDKToolFinder.getJDKTool("java");
 
     ArrayList<String> args = new ArrayList<>();
     args.add(javapath);
     Collections.addAll(args, getPlatformSpecificVMArgs());
+
+    if (addTestVmOptions) {
+      String vmopts = System.getProperty("test.vm.opts");
+      if (vmopts != null) {
+        Collections.addAll(args, vmopts.split("\\s"));
+      }
+    }
+
     Collections.addAll(args, command);
 
     // Reporting
@@ -157,5 +169,4 @@
 
     return new ProcessBuilder(args.toArray(new String[args.size()]));
   }
-
 }