changeset 1144:9b9c1ee9b3f6

Merge
author iveresov
date Wed, 06 Jan 2010 22:21:39 -0800
parents a5a6adfca6ec (diff) aad340e07bc4 (current diff)
children 0579c695832f f62a22282a47
files src/share/vm/classfile/classFileParser.cpp src/share/vm/classfile/vmSymbols.hpp src/share/vm/memory/referenceProcessor.cpp src/share/vm/oops/instanceKlass.cpp src/share/vm/oops/instanceRefKlass.cpp src/share/vm/prims/jvm.cpp src/share/vm/prims/jvmtiEnv.cpp src/share/vm/prims/jvmtiEnvBase.cpp src/share/vm/prims/jvmtiExport.cpp src/share/vm/runtime/frame.cpp src/share/vm/runtime/thread.cpp src/share/vm/runtime/thread.hpp src/share/vm/runtime/vmStructs.cpp
diffstat 66 files changed, 899 insertions(+), 467 deletions(-) [+]
line wrap: on
line diff
--- a/.hgignore	Wed Jan 06 14:25:03 2010 -0800
+++ b/.hgignore	Wed Jan 06 22:21:39 2010 -0800
@@ -1,6 +1,6 @@
 ^build/
 ^dist/
-^nbproject/private/
+/nbproject/private/
 ^src/share/tools/hsdis/build/
 ^src/share/tools/IdealGraphVisualizer/[a-zA-Z0-9]*/build/
 ^src/share/tools/IdealGraphVisualizer/build/
--- a/.hgtags	Wed Jan 06 14:25:03 2010 -0800
+++ b/.hgtags	Wed Jan 06 22:21:39 2010 -0800
@@ -50,3 +50,6 @@
 faf94d94786b621f8e13cbcc941ca69c6d967c3f jdk7-b73
 f4b900403d6e4b0af51447bd13bbe23fe3a1dac7 jdk7-b74
 d8dd291a362acb656026a9c0a9da48501505a1e7 jdk7-b75
+9174bb32e934965288121f75394874eeb1fcb649 jdk7-b76
+455105fc81d941482f8f8056afaa7aa0949c9300 jdk7-b77
+e703499b4b51e3af756ae77c3d5e8b3058a14e4e jdk7-b78
--- a/make/hotspot_version	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/hotspot_version	Wed Jan 06 22:21:39 2010 -0800
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=17
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=05
+HS_BUILD_NUMBER=06
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/make/linux/makefiles/debug.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/linux/makefiles/debug.make	Wed Jan 06 22:21:39 2010 -0800
@@ -38,7 +38,7 @@
  "Please use 'make jvmg' to build debug JVM.                            \n" \
  "----------------------------------------------------------------------\n")
 
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = debug
 SYSDEFS += -DASSERT -DDEBUG
 PICFLAGS = DEFAULT
--- a/make/linux/makefiles/fastdebug.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/linux/makefiles/fastdebug.make	Wed Jan 06 22:21:39 2010 -0800
@@ -58,7 +58,7 @@
 # Linker mapfile
 MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
 
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = optimized
 SYSDEFS += -DASSERT -DFASTDEBUG
 PICFLAGS = DEFAULT
--- a/make/linux/makefiles/jsig.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/linux/makefiles/jsig.make	Wed Jan 06 22:21:39 2010 -0800
@@ -25,9 +25,12 @@
 # Rules to build signal interposition library, used by vm.make
 
 # libjsig[_g].so: signal interposition library
-JSIG = jsig$(G_SUFFIX)
+JSIG = jsig
 LIBJSIG = lib$(JSIG).so
 
+JSIG_G    = $(JSIG)$(G_SUFFIX)
+LIBJSIG_G = lib$(JSIG_G).so
+
 JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
 
 DEST_JSIG  = $(JDK_LIBDIR)/$(LIBJSIG)
@@ -50,6 +53,7 @@
 	@echo Making signal interposition lib...
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
                          $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< -ldl
+	$(QUIETLY) [ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
 
 install_jsig: $(LIBJSIG)
 	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
--- a/make/linux/makefiles/jvmg.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/linux/makefiles/jvmg.make	Wed Jan 06 22:21:39 2010 -0800
@@ -35,7 +35,7 @@
 # Linker mapfile
 MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
 
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = debug
 SYSDEFS += -DASSERT -DDEBUG
 PICFLAGS = DEFAULT
--- a/make/linux/makefiles/launcher.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/linux/makefiles/launcher.make	Wed Jan 06 22:21:39 2010 -0800
@@ -25,7 +25,9 @@
 # Rules to build gamma launcher, used by vm.make
 
 # gamma[_g]: launcher
-LAUNCHER = gamma$(G_SUFFIX)
+
+LAUNCHER   = gamma
+LAUNCHER_G = $(LAUNCHER)$(G_SUFFIX)
 
 LAUNCHERDIR   = $(GAMMADIR)/src/os/$(Platform_os_family)/launcher
 LAUNCHERFLAGS = $(ARCHFLAG) \
@@ -70,4 +72,5 @@
 	    $(LINK_LAUNCHER/PRE_HOOK) \
 	    $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER); \
 	    $(LINK_LAUNCHER/POST_HOOK) \
+	    [ -f $(LAUNCHER_G) ] || { ln -s $@ $(LAUNCHER_G); }; \
         }
--- a/make/linux/makefiles/saproc.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/linux/makefiles/saproc.make	Wed Jan 06 22:21:39 2010 -0800
@@ -25,9 +25,13 @@
 # Rules to build serviceability agent library, used by vm.make
 
 # libsaproc[_g].so: serviceability agent
-SAPROC = saproc$(G_SUFFIX)
+
+SAPROC = saproc
 LIBSAPROC = lib$(SAPROC).so
 
+SAPROC_G = $(SAPROC)$(G_SUFFIX)
+LIBSAPROC_G = lib$(SAPROC_G).so
+
 AGENT_DIR = $(GAMMADIR)/agent
 
 SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)
@@ -75,6 +79,7 @@
 	           $(SA_DEBUG_CFLAGS)                                   \
 	           -o $@                                                \
 	           -lthread_db
+	$(QUIETLY) [ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
 
 install_saproc: checkAndBuildSA
 	$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then             \
--- a/make/linux/makefiles/vm.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/linux/makefiles/vm.make	Wed Jan 06 22:21:39 2010 -0800
@@ -113,8 +113,9 @@
 #----------------------------------------------------------------------
 # JVM
 
-JVM    = jvm$(G_SUFFIX)
-LIBJVM = lib$(JVM).so
+JVM      = jvm
+LIBJVM   = lib$(JVM).so
+LIBJVM_G = lib$(JVM)$(G_SUFFIX).so
 
 JVM_OBJ_FILES = $(Obj_Files)
 
@@ -201,6 +202,7 @@
 		       $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM);       \
 	    $(LINK_LIB.CC/POST_HOOK)                                    \
 	    rm -f $@.1; ln -s $@ $@.1;                                  \
+	    [ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
 	    if [ -x /usr/sbin/selinuxenabled ] ; then                   \
 	      /usr/sbin/selinuxenabled;                                 \
               if [ $$? = 0 ] ; then					\
--- a/make/solaris/makefiles/debug.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/solaris/makefiles/debug.make	Wed Jan 06 22:21:39 2010 -0800
@@ -54,7 +54,7 @@
  "Please use 'gnumake jvmg' to build debug JVM.                            \n" \
  "-------------------------------------------------------------------------\n")
 
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = debug
 SYSDEFS += -DASSERT -DDEBUG
 PICFLAGS = DEFAULT
--- a/make/solaris/makefiles/dtrace.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/solaris/makefiles/dtrace.make	Wed Jan 06 22:21:39 2010 -0800
@@ -24,8 +24,8 @@
 
 # Rules to build jvm_db/dtrace, used by vm.make
 
-# we build libjvm_dtrace/libjvm_db/dtrace for COMPILER1 and COMPILER2
-# but not for CORE configuration
+# We build libjvm_dtrace/libjvm_db/dtrace for COMPILER1 and COMPILER2
+# but not for CORE or KERNEL configurations.
 
 ifneq ("${TYPE}", "CORE")
 ifneq ("${TYPE}", "KERNEL")
@@ -37,12 +37,13 @@
 
 else
 
-
 JVM_DB = libjvm_db
-LIBJVM_DB = libjvm$(G_SUFFIX)_db.so
+LIBJVM_DB = libjvm_db.so
+LIBJVM_DB_G = libjvm$(G_SUFFIX)_db.so
 
 JVM_DTRACE = jvm_dtrace
-LIBJVM_DTRACE = libjvm$(G_SUFFIX)_dtrace.so
+LIBJVM_DTRACE = libjvm_dtrace.so
+LIBJVM_DTRACE_G = libjvm$(G_SUFFIX)_dtrace.so
 
 JVMOFFS = JvmOffsets
 JVMOFFS.o = $(JVMOFFS).o
@@ -77,7 +78,7 @@
 LFLAGS_JVM_DTRACE += -D_REENTRANT $(PICFLAG)
 else
 LFLAGS_JVM_DB += -mt $(PICFLAG) -xnolib
-LFLAGS_JVM_DTRACE += -mt $(PICFLAG) -xnolib
+LFLAGS_JVM_DTRACE += -mt $(PICFLAG) -xnolib -ldl
 endif
 
 ISA = $(subst i386,i486,$(shell isainfo -n))
@@ -86,18 +87,24 @@
 ifneq ("${ISA}","${BUILDARCH}")
 
 XLIBJVM_DB = 64/$(LIBJVM_DB)
+XLIBJVM_DB_G = 64/$(LIBJVM_DB_G)
 XLIBJVM_DTRACE = 64/$(LIBJVM_DTRACE)
+XLIBJVM_DTRACE_G = 64/$(LIBJVM_DTRACE_G)
 
 $(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) mkdir -p 64/ ; \
 	$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. -I$(GENERATED) \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
+	[ -f $(XLIBJVM_DB_G) ] || { ln -s $(LIBJVM_DB) $(XLIBJVM_DB_G); }
+
 $(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) mkdir -p 64/ ; \
 	$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
+	[ -f $(XLIBJVM_DTRACE_G) ] || { ln -s $(LIBJVM_DTRACE) $(XLIBJVM_DTRACE_G); }
+
 endif # ifneq ("${ISA}","${BUILDARCH}")
 
 ifdef USE_GCC
@@ -142,11 +149,13 @@
 	@echo Making $@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
+	[ -f $(LIBJVM_DB_G) ] || { ln -s $@ $(LIBJVM_DB_G); }
 
 $(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I.  \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
+	[ -f $(LIBJVM_DTRACE_G) ] || { ln -s $@ $(LIBJVM_DTRACE_G); }
 
 $(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \
              $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
--- a/make/solaris/makefiles/fastdebug.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/solaris/makefiles/fastdebug.make	Wed Jan 06 22:21:39 2010 -0800
@@ -90,7 +90,6 @@
 # for this method for now. (fix this when dtrace bug 6258412 is fixed)
 OPT_CFLAGS/ciEnv.o = $(OPT_CFLAGS) -xinline=no%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_
 
-
 # (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
 
 # If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings
@@ -115,8 +114,7 @@
 # and mustn't be otherwise.
 MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
 
-
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = optimized
 SYSDEFS += -DASSERT -DFASTDEBUG -DCHECK_UNHANDLED_OOPS
 PICFLAGS = DEFAULT
--- a/make/solaris/makefiles/jsig.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/solaris/makefiles/jsig.make	Wed Jan 06 22:21:39 2010 -0800
@@ -25,8 +25,11 @@
 # Rules to build signal interposition library, used by vm.make
 
 # libjsig[_g].so: signal interposition library
-JSIG = jsig$(G_SUFFIX)
-LIBJSIG = lib$(JSIG).so
+JSIG      = jsig
+LIBJSIG   = lib$(JSIG).so
+
+JSIG_G    = $(JSIG)$(G_SUFFIX)
+LIBJSIG_G = lib$(JSIG_G).so
 
 JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
 
@@ -46,6 +49,7 @@
 	@echo Making signal interposition lib...
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
                          $(LFLAGS_JSIG) -o $@ $< -ldl
+	[ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
 
 install_jsig: $(LIBJSIG)
 	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
--- a/make/solaris/makefiles/jvmg.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/solaris/makefiles/jvmg.make	Wed Jan 06 22:21:39 2010 -0800
@@ -51,7 +51,7 @@
 # and mustn't be otherwise.
 MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
 
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = debug
 SYSDEFS += -DASSERT -DDEBUG
 PICFLAGS = DEFAULT
--- a/make/solaris/makefiles/launcher.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/solaris/makefiles/launcher.make	Wed Jan 06 22:21:39 2010 -0800
@@ -25,7 +25,8 @@
 # Rules to build gamma launcher, used by vm.make
 
 # gamma[_g]: launcher
-LAUNCHER = gamma$(G_SUFFIX)
+LAUNCHER   = gamma
+LAUNCHER_G = $(LAUNCHER)$(G_SUFFIX)
 
 LAUNCHERDIR   = $(GAMMADIR)/src/os/$(Platform_os_family)/launcher
 LAUNCHERFLAGS = $(ARCHFLAG) \
@@ -88,5 +89,6 @@
 	    $(LINK_LAUNCHER/PRE_HOOK) \
 	    $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER); \
 	    $(LINK_LAUNCHER/POST_HOOK) \
+	    [ -f $(LAUNCHER_G) ] || { ln -s $@ $(LAUNCHER_G); }; \
 	    ;; \
 	esac
--- a/make/solaris/makefiles/saproc.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/solaris/makefiles/saproc.make	Wed Jan 06 22:21:39 2010 -0800
@@ -25,9 +25,13 @@
 # Rules to build serviceability agent library, used by vm.make
 
 # libsaproc[_g].so: serviceability agent
-SAPROC = saproc$(G_SUFFIX)
+
+SAPROC = saproc
 LIBSAPROC = lib$(SAPROC).so
 
+SAPROC_G = $(SAPROC)$(G_SUFFIX)
+LIBSAPROC_G = lib$(SAPROC_G).so
+
 AGENT_DIR = $(GAMMADIR)/agent
 
 SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)/proc
@@ -69,6 +73,7 @@
 	           $(SA_LFLAGS)                                         \
 	           -o $@                                                \
 	           -ldl -ldemangle -lthread -lc
+	[ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
 
 install_saproc: checkAndBuildSA
 	$(QUIETLY) if [ -f $(LIBSAPROC) ] ; then             \
--- a/make/solaris/makefiles/sparcWorks.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/solaris/makefiles/sparcWorks.make	Wed Jan 06 22:21:39 2010 -0800
@@ -281,8 +281,6 @@
 OPT_CFLAGS=-xO4 $(EXTRA_OPT_CFLAGS)
 endif
 
-CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_sparc/vm/solaris_sparc.il
-
 endif # sparc
 
 ifeq ("${Platform_arch_model}", "x86_32")
@@ -293,13 +291,14 @@
 # [phh] Is this still true for 6.1?
 OPT_CFLAGS+=-xO3
 
-CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_x86/vm/solaris_x86_32.il
-
 endif # 32bit x86
 
 # no more exceptions
 CFLAGS/NOEX=-noex
 
+# Inline functions
+CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_${Platform_arch}/vm/solaris_${Platform_arch_model}.il
+
 # Reduce code bloat by reverting back to 5.0 behavior for static initializers
 CFLAGS += -Qoption ccfe -one_static_init
 
@@ -312,6 +311,15 @@
 PICFLAG/BETTER  = $(PICFLAG/DEFAULT)
 PICFLAG/BYFILE  = $(PICFLAG/$@)$(PICFLAG/DEFAULT$(PICFLAG/$@))
 
+# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file.
+MAPFLAG = -M FILENAME
+
+# Use $(SONAMEFLAG:SONAME=soname) to specify the intrinsic name of a shared obj
+SONAMEFLAG = -h SONAME
+
+# Build shared library
+SHARED_FLAG = -G
+
 # Would be better if these weren't needed, since we link with CC, but
 # at present removing them causes run-time errors
 LFLAGS += -library=Crun
--- a/make/solaris/makefiles/vm.make	Wed Jan 06 14:25:03 2010 -0800
+++ b/make/solaris/makefiles/vm.make	Wed Jan 06 22:21:39 2010 -0800
@@ -108,11 +108,16 @@
 #   older libm before libCrun, just to make sure it's found and used first.
 LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc
 else
+ifeq ($(COMPILER_REV_NUMERIC), 502)
+# SC6.1 has it's own libm.so: specifying anything else provokes a name conflict.
+LIBS += -ldl -lthread -lsocket -lm -lsched -ldoor
+else
 LIBS += -ldl -lthread -lsocket $(LIBM) -lsched -ldoor
-endif
+endif # 502
+endif # 505
 else
 LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc
-endif
+endif # sparcWorks
 
 # By default, link the *.o into the library, not the executable.
 LINK_INTO$(LINK_INTO) = LIBJVM
@@ -126,8 +131,9 @@
 #----------------------------------------------------------------------
 # JVM
 
-JVM    = jvm$(G_SUFFIX)
-LIBJVM = lib$(JVM).so
+JVM      = jvm
+LIBJVM   = lib$(JVM).so
+LIBJVM_G = lib$(JVM)$(G_SUFFIX).so
 
 JVM_OBJ_FILES = $(Obj_Files) $(DTRACE_OBJS)
 
@@ -173,11 +179,12 @@
 	-sbfast|-xsbfast) \
 	    ;; \
 	*) \
-	    echo Linking vm...;                                                  \
-	    $(LINK_LIB.CC/PRE_HOOK)                                              \
-	    $(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM);                \
-	    $(LINK_LIB.CC/POST_HOOK)                                             \
-	    rm -f $@.1; ln -s $@ $@.1;                                           \
+	    echo Linking vm...; \
+	    $(LINK_LIB.CC/PRE_HOOK) \
+	    $(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM); \
+	    $(LINK_LIB.CC/POST_HOOK) \
+	    rm -f $@.1; ln -s $@ $@.1; \
+	    [ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
 	    ;; \
 	esac
 
--- a/src/os/linux/vm/os_linux.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/os/linux/vm/os_linux.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -223,8 +223,8 @@
                      "environment on Linux when /proc filesystem is not mounted.";
 
 void os::Linux::initialize_system_info() {
-  _processor_count = sysconf(_SC_NPROCESSORS_CONF);
-  if (_processor_count == 1) {
+  set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
+  if (processor_count() == 1) {
     pid_t pid = os::Linux::gettid();
     char fname[32];
     jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);
@@ -236,7 +236,7 @@
     }
   }
   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
-  assert(_processor_count > 0, "linux error");
+  assert(processor_count() > 0, "linux error");
 }
 
 void os::init_system_properties_values() {
@@ -4683,6 +4683,7 @@
   // Return immediately if a permit is available.
   if (_counter > 0) {
       _counter = 0 ;
+      OrderAccess::fence();
       return ;
   }
 
@@ -4725,6 +4726,7 @@
     _counter = 0;
     status = pthread_mutex_unlock(_mutex);
     assert (status == 0, "invariant") ;
+    OrderAccess::fence();
     return;
   }
 
@@ -4765,6 +4767,7 @@
     jt->java_suspend_self();
   }
 
+  OrderAccess::fence();
 }
 
 void Parker::unpark() {
--- a/src/os/solaris/dtrace/libjvm_db.c	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/os/solaris/dtrace/libjvm_db.c	Wed Jan 06 22:21:39 2010 -0800
@@ -937,54 +937,56 @@
   return err;
 }
 
-static int
-scopeDesc_chain(Nmethod_t *N)
-{
+static int scopeDesc_chain(Nmethod_t *N) {
   int32_t decode_offset = 0;
   int32_t err;
 
-  if (debug > 2)
-      fprintf(stderr, "\t scopeDesc_chain: BEGIN\n");
+  if (debug > 2) {
+    fprintf(stderr, "\t scopeDesc_chain: BEGIN\n");
+  }
 
   err = ps_pread(N->J->P, N->pc_desc + OFFSET_PcDesc_scope_decode_offset,
                  &decode_offset, SZ32);
   CHECK_FAIL(err);
 
   while (decode_offset > 0) {
-      if (debug > 2)
-          fprintf(stderr, "\t scopeDesc_chain: decode_offset: %#x\n", decode_offset);
+    Vframe_t *vf = &N->vframes[N->vf_cnt];
 
-      Vframe_t *vf = &N->vframes[N->vf_cnt];
+    if (debug > 2) {
+      fprintf(stderr, "\t scopeDesc_chain: decode_offset: %#x\n", decode_offset);
+    }
+
+    err = scope_desc_at(N, decode_offset, vf);
+    CHECK_FAIL(err);
 
-      err = scope_desc_at(N, decode_offset, vf);
-      CHECK_FAIL(err);
+    if (vf->methodIdx > N->oops_len) {
+      fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops_len) !\n");
+      return -1;
+    }
+    err = read_pointer(N->J, N->nm + N->oops_beg + (vf->methodIdx-1)*POINTER_SIZE,
+                       &vf->methodOop);
+    CHECK_FAIL(err);
 
-      if (vf->methodIdx > N->oops_len) {
-          fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops_len) !\n");
-          return -1;
-      }
-      err = read_pointer(N->J, N->nm + N->oops_beg + (vf->methodIdx-1)*POINTER_SIZE,
-                               &vf->methodOop);
+    if (vf->methodOop) {
+      N->vf_cnt++;
+      err = line_number_from_bci(N->J, vf);
       CHECK_FAIL(err);
-
-      if (vf->methodOop) {
-          N->vf_cnt++;
-          err = line_number_from_bci(N->J, vf);
-          CHECK_FAIL(err);
-          if (debug > 2) {
-              fprintf(stderr, "\t scopeDesc_chain: methodOop: %#8llx, line: %ld\n",
-                              vf->methodOop, vf->line);
-          }
+      if (debug > 2) {
+        fprintf(stderr, "\t scopeDesc_chain: methodOop: %#8llx, line: %ld\n",
+                vf->methodOop, vf->line);
       }
-      decode_offset = vf->sender_decode_offset;
+    }
+    decode_offset = vf->sender_decode_offset;
   }
-  if (debug > 2)
-      fprintf(stderr, "\t scopeDesc_chain: END \n\n");
+  if (debug > 2) {
+    fprintf(stderr, "\t scopeDesc_chain: END \n\n");
+  }
   return PS_OK;
 
  fail:
-  if (debug)
-      fprintf(stderr, "\t scopeDesc_chain: FAIL \n\n");
+  if (debug) {
+    fprintf(stderr, "\t scopeDesc_chain: FAIL \n\n");
+  }
   return err;
 }
 
--- a/src/os/solaris/vm/os_solaris.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/os/solaris/vm/os_solaris.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -457,7 +457,7 @@
 
 
 void os::Solaris::initialize_system_info() {
-  _processor_count = sysconf(_SC_NPROCESSORS_CONF);
+  set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
   _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
 }
@@ -5803,6 +5803,7 @@
   // Return immediately if a permit is available.
   if (_counter > 0) {
       _counter = 0 ;
+      OrderAccess::fence();
       return ;
   }
 
@@ -5846,6 +5847,7 @@
     _counter = 0;
     status = os::Solaris::mutex_unlock(_mutex);
     assert (status == 0, "invariant") ;
+    OrderAccess::fence();
     return;
   }
 
@@ -5892,6 +5894,7 @@
     jt->java_suspend_self();
   }
 
+  OrderAccess::fence();
 }
 
 void Parker::unpark() {
--- a/src/os/windows/vm/os_windows.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/os/windows/vm/os_windows.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -3150,7 +3150,7 @@
   _vm_allocation_granularity = si.dwAllocationGranularity;
   _processor_type  = si.dwProcessorType;
   _processor_level = si.wProcessorLevel;
-  _processor_count = si.dwNumberOfProcessors;
+  set_processor_count(si.dwNumberOfProcessors);
 
   MEMORYSTATUSEX ms;
   ms.dwLength = sizeof(ms);
--- a/src/share/vm/ci/bcEscapeAnalyzer.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/ci/bcEscapeAnalyzer.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -61,9 +61,11 @@
   BCEscapeAnalyzer* _parent;
   int               _level;
 
+ public:
   class  ArgumentMap;
   class  StateInfo;
 
+ private:
   // helper functions
   bool is_argument(int i)    { return i >= 0 && i < _arg_size; }
 
--- a/src/share/vm/classfile/classFileParser.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/classfile/classFileParser.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -3753,8 +3753,9 @@
 }
 
 bool ClassFileParser::is_supported_version(u2 major, u2 minor) {
-  u2 max_version = JDK_Version::is_gte_jdk17x_version() ?
-    JAVA_MAX_SUPPORTED_VERSION : JAVA_6_VERSION;
+  u2 max_version =
+    JDK_Version::is_gte_jdk17x_version() ? JAVA_MAX_SUPPORTED_VERSION :
+    (JDK_Version::is_gte_jdk16x_version() ? JAVA_6_VERSION : JAVA_1_5_VERSION);
   return (major >= JAVA_MIN_SUPPORTED_VERSION) &&
          (major <= max_version) &&
          ((major != max_version) ||
--- a/src/share/vm/classfile/vmSymbols.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/classfile/vmSymbols.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -105,6 +105,7 @@
   template(java_lang_AssertionStatusDirectives,       "java/lang/AssertionStatusDirectives")      \
   template(sun_jkernel_DownloadManager,               "sun/jkernel/DownloadManager")              \
   template(getBootClassPathEntryForClass_name,        "getBootClassPathEntryForClass")            \
+  template(setBootClassLoaderHook_name,               "setBootClassLoaderHook")                   \
                                                                                                   \
   /* class file format tags */                                                                    \
   template(tag_source_file,                           "SourceFile")                               \
--- a/src/share/vm/code/dependencies.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/code/dependencies.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -1528,19 +1528,23 @@
   int nsup = 0, nint = 0;
   for (ContextStream str(*this); str.next(); ) {
     klassOop k = str.klass();
-    switch (str._change_type) {
+    switch (str.change_type()) {
     case Change_new_type:
       tty->print_cr("  dependee = %s", instanceKlass::cast(k)->external_name());
       break;
     case Change_new_sub:
-      if (!WizardMode)
-           ++nsup;
-      else tty->print_cr("  context super = %s", instanceKlass::cast(k)->external_name());
+      if (!WizardMode) {
+        ++nsup;
+      } else {
+        tty->print_cr("  context super = %s", instanceKlass::cast(k)->external_name());
+      }
       break;
     case Change_new_impl:
-      if (!WizardMode)
-           ++nint;
-      else tty->print_cr("  context interface = %s", instanceKlass::cast(k)->external_name());
+      if (!WizardMode) {
+        ++nint;
+      } else {
+        tty->print_cr("  context interface = %s", instanceKlass::cast(k)->external_name());
+      }
       break;
     }
   }
--- a/src/share/vm/code/dependencies.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/code/dependencies.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -470,7 +470,7 @@
 // super types can be context types for a relevant dependency, which the
 // new type could invalidate.
 class DepChange : public StackObj {
- private:
+ public:
   enum ChangeType {
     NO_CHANGE = 0,              // an uninvolved klass
     Change_new_type,            // a newly loaded type
@@ -480,6 +480,7 @@
     Start_Klass = CHANGE_LIMIT  // internal indicator for ContextStream
   };
 
+ private:
   // each change set is rooted in exactly one new type (at present):
   KlassHandle _new_type;
 
@@ -510,15 +511,15 @@
   // }
   class ContextStream : public StackObj {
    private:
-    DepChange&       _changes;
+    DepChange&  _changes;
     friend class DepChange;
 
     // iteration variables:
-    ChangeType            _change_type;
-    klassOop              _klass;
-    objArrayOop           _ti_base;    // i.e., transitive_interfaces
-    int                   _ti_index;
-    int                   _ti_limit;
+    ChangeType  _change_type;
+    klassOop    _klass;
+    objArrayOop _ti_base;    // i.e., transitive_interfaces
+    int         _ti_index;
+    int         _ti_limit;
 
     // start at the beginning:
     void start() {
@@ -530,11 +531,11 @@
       _ti_limit = 0;
     }
 
+   public:
     ContextStream(DepChange& changes)
       : _changes(changes)
     { start(); }
 
-   public:
     ContextStream(DepChange& changes, No_Safepoint_Verifier& nsv)
       : _changes(changes)
       // the nsv argument makes it safe to hold oops like _klass
@@ -542,6 +543,7 @@
 
     bool next();
 
+    ChangeType change_type()     { return _change_type; }
     klassOop   klass()           { return _klass; }
   };
   friend class DepChange::ContextStream;
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -42,28 +42,49 @@
   _n_periods(0),
   _threads(NULL), _n_threads(0)
 {
-  if (G1ConcRefine) {
-    _n_threads = (int)thread_num();
-    if (_n_threads > 0) {
-      _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads);
-      int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
-      ConcurrentG1RefineThread *next = NULL;
-      for (int i = _n_threads - 1; i >= 0; i--) {
-        ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
-        assert(t != NULL, "Conc refine should have been created");
-        assert(t->cg1r() == this, "Conc refine thread should refer to this");
-        _threads[i] = t;
-        next = t;
-      }
-    }
+
+  // Ergomonically select initial concurrent refinement parameters
+  if (FLAG_IS_DEFAULT(G1ConcRefineGreenZone)) {
+    FLAG_SET_DEFAULT(G1ConcRefineGreenZone, MAX2<int>(ParallelGCThreads, 1));
+  }
+  set_green_zone(G1ConcRefineGreenZone);
+
+  if (FLAG_IS_DEFAULT(G1ConcRefineYellowZone)) {
+    FLAG_SET_DEFAULT(G1ConcRefineYellowZone, green_zone() * 3);
+  }
+  set_yellow_zone(MAX2<int>(G1ConcRefineYellowZone, green_zone()));
+
+  if (FLAG_IS_DEFAULT(G1ConcRefineRedZone)) {
+    FLAG_SET_DEFAULT(G1ConcRefineRedZone, yellow_zone() * 2);
+  }
+  set_red_zone(MAX2<int>(G1ConcRefineRedZone, yellow_zone()));
+  _n_worker_threads = thread_num();
+  // We need one extra thread to do the young gen rset size sampling.
+  _n_threads = _n_worker_threads + 1;
+  reset_threshold_step();
+
+  _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads);
+  int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
+  ConcurrentG1RefineThread *next = NULL;
+  for (int i = _n_threads - 1; i >= 0; i--) {
+    ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
+    assert(t != NULL, "Conc refine should have been created");
+    assert(t->cg1r() == this, "Conc refine thread should refer to this");
+    _threads[i] = t;
+    next = t;
   }
 }
 
-size_t ConcurrentG1Refine::thread_num() {
-  if (G1ConcRefine) {
-    return (G1ParallelRSetThreads > 0) ? G1ParallelRSetThreads : ParallelGCThreads;
+void ConcurrentG1Refine::reset_threshold_step() {
+  if (FLAG_IS_DEFAULT(G1ConcRefineThresholdStep)) {
+    _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
+  } else {
+    _thread_threshold_step = G1ConcRefineThresholdStep;
   }
-  return 0;
+}
+
+int ConcurrentG1Refine::thread_num() {
+  return MAX2<int>((G1ParallelRSetThreads > 0) ? G1ParallelRSetThreads : ParallelGCThreads, 1);
 }
 
 void ConcurrentG1Refine::init() {
@@ -123,6 +144,15 @@
   }
 }
 
+void ConcurrentG1Refine::reinitialize_threads() {
+  reset_threshold_step();
+  if (_threads != NULL) {
+    for (int i = 0; i < _n_threads; i++) {
+      _threads[i]->initialize();
+    }
+  }
+}
+
 ConcurrentG1Refine::~ConcurrentG1Refine() {
   if (G1ConcRSLogCacheSize > 0) {
     assert(_card_counts != NULL, "Logic");
@@ -384,4 +414,3 @@
     st->cr();
   }
 }
-
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -29,6 +29,31 @@
 class ConcurrentG1Refine: public CHeapObj {
   ConcurrentG1RefineThread** _threads;
   int _n_threads;
+  int _n_worker_threads;
+ /*
+  * The value of the update buffer queue length falls into one of 3 zones:
+  * green, yellow, red. If the value is in [0, green) nothing is
+  * done, the buffers are left unprocessed to enable the caching effect of the
+  * dirtied cards. In the yellow zone [green, yellow) the concurrent refinement
+  * threads are gradually activated. In [yellow, red) all threads are
+  * running. If the length becomes red (max queue length) the mutators start
+  * processing the buffers.
+  *
+  * There are some interesting cases (with G1AdaptiveConcRefine turned off):
+  * 1) green = yellow = red = 0. In this case the mutator will process all
+  *    buffers. Except for those that are created by the deferred updates
+  *    machinery during a collection.
+  * 2) green = 0. Means no caching. Can be a good way to minimize the
+  *    amount of time spent updating rsets during a collection.
+  */
+  int _green_zone;
+  int _yellow_zone;
+  int _red_zone;
+
+  int _thread_threshold_step;
+
+  // Reset the threshold step value based of the current zone boundaries.
+  void reset_threshold_step();
 
   // The cache for card refinement.
   bool   _use_cache;
@@ -147,6 +172,8 @@
   void init(); // Accomplish some initialization that has to wait.
   void stop();
 
+  void reinitialize_threads();
+
   // Iterate over the conc refine threads
   void threads_do(ThreadClosure *tc);
 
@@ -178,7 +205,20 @@
 
   void clear_and_record_card_counts();
 
-  static size_t thread_num();
+  static int thread_num();
 
   void print_worker_threads_on(outputStream* st) const;
+
+  void set_green_zone(int x)  { _green_zone = x;  }
+  void set_yellow_zone(int x) { _yellow_zone = x; }
+  void set_red_zone(int x)    { _red_zone = x;    }
+
+  int green_zone() const      { return _green_zone;  }
+  int yellow_zone() const     { return _yellow_zone; }
+  int red_zone() const        { return _red_zone;    }
+
+  int total_thread_num() const  { return _n_threads;        }
+  int worker_thread_num() const { return _n_worker_threads; }
+
+  int thread_threshold_step() const { return _thread_threshold_step; }
 };
--- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -25,10 +25,6 @@
 #include "incls/_precompiled.incl"
 #include "incls/_concurrentG1RefineThread.cpp.incl"
 
-// ======= Concurrent Mark Thread ========
-
-// The CM thread is created when the G1 garbage collector is used
-
 ConcurrentG1RefineThread::
 ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next,
                          int worker_id_offset, int worker_id) :
@@ -37,19 +33,42 @@
   _worker_id(worker_id),
   _active(false),
   _next(next),
+  _monitor(NULL),
   _cg1r(cg1r),
-  _vtime_accum(0.0),
-  _interval_ms(5.0)
+  _vtime_accum(0.0)
 {
+
+  // Each thread has its own monitor. The i-th thread is responsible for signalling
+  // to thread i+1 if the number of buffers in the queue exceeds a threashold for this
+  // thread. Monitors are also used to wake up the threads during termination.
+  // The 0th worker in notified by mutator threads and has a special monitor.
+  // The last worker is used for young gen rset size sampling.
+  if (worker_id > 0) {
+    _monitor = new Monitor(Mutex::nonleaf, "Refinement monitor", true);
+  } else {
+    _monitor = DirtyCardQ_CBL_mon;
+  }
+  initialize();
   create_and_start();
 }
 
+void ConcurrentG1RefineThread::initialize() {
+  if (_worker_id < cg1r()->worker_thread_num()) {
+    // Current thread activation threshold
+    _threshold = MIN2<int>(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(),
+                           cg1r()->yellow_zone());
+    // A thread deactivates once the number of buffer reached a deactivation threshold
+    _deactivation_threshold = MAX2<int>(_threshold - cg1r()->thread_threshold_step(), cg1r()->green_zone());
+  } else {
+    set_active(true);
+  }
+}
+
 void ConcurrentG1RefineThread::sample_young_list_rs_lengths() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   G1CollectorPolicy* g1p = g1h->g1_policy();
   if (g1p->adaptive_young_list_length()) {
     int regions_visited = 0;
-
     g1h->young_list_rs_length_sampling_init();
     while (g1h->young_list_rs_length_sampling_more()) {
       g1h->young_list_rs_length_sampling_next();
@@ -70,99 +89,121 @@
   }
 }
 
-void ConcurrentG1RefineThread::run() {
-  initialize_in_thread();
+void ConcurrentG1RefineThread::run_young_rs_sampling() {
+  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   _vtime_start = os::elapsedVTime();
-  wait_for_universe_init();
+  while(!_should_terminate) {
+    _sts.join();
+    sample_young_list_rs_lengths();
+    _sts.leave();
 
-  while (!_should_terminate) {
-    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
-    // Wait for completed log buffers to exist.
-    {
-      MutexLockerEx x(DirtyCardQ_CBL_mon, Mutex::_no_safepoint_check_flag);
-      while (((_worker_id == 0 && !dcqs.process_completed_buffers()) ||
-              (_worker_id > 0 && !is_active())) &&
-             !_should_terminate) {
-         DirtyCardQ_CBL_mon->wait(Mutex::_no_safepoint_check_flag);
-      }
-    }
-
-    if (_should_terminate) {
-      return;
+    if (os::supports_vtime()) {
+      _vtime_accum = (os::elapsedVTime() - _vtime_start);
+    } else {
+      _vtime_accum = 0.0;
     }
 
-    // Now we take them off (this doesn't hold locks while it applies
-    // closures.)  (If we did a full collection, then we'll do a full
-    // traversal.
-    _sts.join();
-    int n_logs = 0;
-    int lower_limit = 0;
-    double start_vtime_sec; // only used when G1SmoothConcRefine is on
-    int prev_buffer_num; // only used when G1SmoothConcRefine is on
-    // This thread activation threshold
-    int threshold = G1UpdateBufferQueueProcessingThreshold * _worker_id;
-    // Next thread activation threshold
-    int next_threshold = threshold + G1UpdateBufferQueueProcessingThreshold;
-    int deactivation_threshold = MAX2<int>(threshold - G1UpdateBufferQueueProcessingThreshold / 2, 0);
+    MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+    if (_should_terminate) {
+      break;
+    }
+    _monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefineServiceInterval);
+  }
+}
 
-    if (G1SmoothConcRefine) {
-      lower_limit = 0;
-      start_vtime_sec = os::elapsedVTime();
-      prev_buffer_num = (int) dcqs.completed_buffers_num();
-    } else {
-      lower_limit = G1UpdateBufferQueueProcessingThreshold / 4; // For now.
+void ConcurrentG1RefineThread::wait_for_completed_buffers() {
+  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+  MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+  while (!_should_terminate && !is_active()) {
+    _monitor->wait(Mutex::_no_safepoint_check_flag);
+  }
+}
+
+bool ConcurrentG1RefineThread::is_active() {
+  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+  return _worker_id > 0 ? _active : dcqs.process_completed_buffers();
+}
+
+void ConcurrentG1RefineThread::activate() {
+  MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+  if (_worker_id > 0) {
+    if (G1TraceConcurrentRefinement) {
+      DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+      gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d",
+                             _worker_id, _threshold, (int)dcqs.completed_buffers_num());
     }
-    while (dcqs.apply_closure_to_completed_buffer(_worker_id + _worker_id_offset, lower_limit)) {
-      double end_vtime_sec;
-      double elapsed_vtime_sec;
-      int elapsed_vtime_ms;
-      int curr_buffer_num = (int) dcqs.completed_buffers_num();
+    set_active(true);
+  } else {
+    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+    dcqs.set_process_completed(true);
+  }
+  _monitor->notify();
+}
 
-      if (G1SmoothConcRefine) {
-        end_vtime_sec = os::elapsedVTime();
-        elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
-        elapsed_vtime_ms = (int) (elapsed_vtime_sec * 1000.0);
+void ConcurrentG1RefineThread::deactivate() {
+  MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+  if (_worker_id > 0) {
+    if (G1TraceConcurrentRefinement) {
+      DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+      gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d",
+                             _worker_id, _deactivation_threshold, (int)dcqs.completed_buffers_num());
+    }
+    set_active(false);
+  } else {
+    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+    dcqs.set_process_completed(false);
+  }
+}
+
+void ConcurrentG1RefineThread::run() {
+  initialize_in_thread();
+  wait_for_universe_init();
 
-        if (curr_buffer_num > prev_buffer_num ||
-            curr_buffer_num > next_threshold) {
-          decreaseInterval(elapsed_vtime_ms);
-        } else if (curr_buffer_num < prev_buffer_num) {
-          increaseInterval(elapsed_vtime_ms);
-        }
+  if (_worker_id >= cg1r()->worker_thread_num()) {
+    run_young_rs_sampling();
+    terminate();
+  }
+
+  _vtime_start = os::elapsedVTime();
+  while (!_should_terminate) {
+    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+
+    // Wait for work
+    wait_for_completed_buffers();
+
+    if (_should_terminate) {
+      break;
+    }
+
+    _sts.join();
+
+    do {
+      int curr_buffer_num = (int)dcqs.completed_buffers_num();
+      // If the number of the buffers falls down into the yellow zone,
+      // that means that the transition period after the evacuation pause has ended.
+      if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cg1r()->yellow_zone()) {
+        dcqs.set_completed_queue_padding(0);
       }
-      if (_worker_id == 0) {
-        sample_young_list_rs_lengths();
-      } else if (curr_buffer_num < deactivation_threshold) {
+
+      if (_worker_id > 0 && curr_buffer_num <= _deactivation_threshold) {
         // If the number of the buffer has fallen below our threshold
         // we should deactivate. The predecessor will reactivate this
         // thread should the number of the buffers cross the threshold again.
-        MutexLockerEx x(DirtyCardQ_CBL_mon, Mutex::_no_safepoint_check_flag);
         deactivate();
-        if (G1TraceConcurrentRefinement) {
-          gclog_or_tty->print_cr("G1-Refine-deactivated worker %d", _worker_id);
-        }
         break;
       }
 
       // Check if we need to activate the next thread.
-      if (curr_buffer_num > next_threshold && _next != NULL && !_next->is_active()) {
-        MutexLockerEx x(DirtyCardQ_CBL_mon, Mutex::_no_safepoint_check_flag);
+      if (_next != NULL && !_next->is_active() && curr_buffer_num > _next->_threshold) {
         _next->activate();
-        DirtyCardQ_CBL_mon->notify_all();
-        if (G1TraceConcurrentRefinement) {
-          gclog_or_tty->print_cr("G1-Refine-activated worker %d", _next->_worker_id);
-        }
       }
+    } while (dcqs.apply_closure_to_completed_buffer(_worker_id + _worker_id_offset, cg1r()->green_zone()));
 
-      if (G1SmoothConcRefine) {
-        prev_buffer_num = curr_buffer_num;
-        _sts.leave();
-        os::sleep(Thread::current(), (jlong) _interval_ms, false);
-        _sts.join();
-        start_vtime_sec = os::elapsedVTime();
-      }
-      n_logs++;
+    // We can exit the loop above while being active if there was a yield request.
+    if (is_active()) {
+      deactivate();
     }
+
     _sts.leave();
 
     if (os::supports_vtime()) {
@@ -172,7 +213,6 @@
     }
   }
   assert(_should_terminate, "just checking");
-
   terminate();
 }
 
@@ -191,8 +231,8 @@
   }
 
   {
-    MutexLockerEx x(DirtyCardQ_CBL_mon, Mutex::_no_safepoint_check_flag);
-    DirtyCardQ_CBL_mon->notify_all();
+    MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+    _monitor->notify();
   }
 
   {
--- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -40,42 +40,36 @@
   // when the number of the rset update buffer crosses a certain threshold. A successor
   // would self-deactivate when the number of the buffers falls below the threshold.
   bool _active;
-  ConcurrentG1RefineThread *       _next;
- public:
-  virtual void run();
-
-  bool is_active()  { return _active;  }
-  void activate()   { _active = true;  }
-  void deactivate() { _active = false; }
-
- private:
-  ConcurrentG1Refine*              _cg1r;
-
-  double                           _interval_ms;
+  ConcurrentG1RefineThread* _next;
+  Monitor* _monitor;
+  ConcurrentG1Refine* _cg1r;
 
-  void decreaseInterval(int processing_time_ms) {
-    double min_interval_ms = (double) processing_time_ms;
-    _interval_ms = 0.8 * _interval_ms;
-    if (_interval_ms < min_interval_ms)
-      _interval_ms = min_interval_ms;
-  }
-  void increaseInterval(int processing_time_ms) {
-    double max_interval_ms = 9.0 * (double) processing_time_ms;
-    _interval_ms = 1.1 * _interval_ms;
-    if (max_interval_ms > 0 && _interval_ms > max_interval_ms)
-      _interval_ms = max_interval_ms;
-  }
+  int _thread_threshold_step;
+  // This thread activation threshold
+  int _threshold;
+  // This thread deactivation threshold
+  int _deactivation_threshold;
 
-  void sleepBeforeNextCycle();
+  void sample_young_list_rs_lengths();
+  void run_young_rs_sampling();
+  void wait_for_completed_buffers();
+
+  void set_active(bool x) { _active = x; }
+  bool is_active();
+  void activate();
+  void deactivate();
 
   // For use by G1CollectedHeap, which is a friend.
   static SuspendibleThreadSet* sts() { return &_sts; }
 
- public:
+public:
+  virtual void run();
   // Constructor
   ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
                            int worker_id_offset, int worker_id);
 
+  void initialize();
+
   // Printing
   void print() const;
   void print_on(outputStream* st) const;
@@ -83,13 +77,10 @@
   // Total virtual time so far.
   double vtime_accum() { return _vtime_accum; }
 
-  ConcurrentG1Refine* cg1r()                     { return _cg1r;     }
-
-  void            sample_young_list_rs_lengths();
+  ConcurrentG1Refine* cg1r() { return _cg1r;     }
 
   // Yield for GC
-  void            yield();
-
+  void yield();
   // shutdown
   void stop();
 };
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -760,7 +760,6 @@
   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
-  satb_mq_set.set_process_completed_threshold(G1SATBProcessCompletedThreshold);
   satb_mq_set.set_active_all_threads(true);
 
   // update_g1_committed() will be called at the end of an evac pause
--- a/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -61,8 +61,8 @@
 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 #endif // _MSC_VER
 
-DirtyCardQueueSet::DirtyCardQueueSet() :
-  PtrQueueSet(true /*notify_when_complete*/),
+DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
+  PtrQueueSet(notify_when_complete),
   _closure(NULL),
   _shared_dirty_card_queue(this, true /*perm*/),
   _free_ids(NULL),
@@ -77,12 +77,12 @@
 }
 
 void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
+                                   int process_completed_threshold,
                                    int max_completed_queue,
                                    Mutex* lock, PtrQueueSet* fl_owner) {
-  PtrQueueSet::initialize(cbl_mon, fl_lock, max_completed_queue, fl_owner);
+  PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold,
+                          max_completed_queue, fl_owner);
   set_buffer_size(G1UpdateBufferSize);
-  set_process_completed_threshold(G1UpdateBufferQueueProcessingThreshold);
-
   _shared_dirty_card_queue.set_lock(lock);
   _free_ids = new FreeIdSet((int) num_par_ids(), _cbl_mon);
 }
@@ -154,9 +154,10 @@
   return b;
 }
 
-DirtyCardQueueSet::CompletedBufferNode*
+
+BufferNode*
 DirtyCardQueueSet::get_completed_buffer(int stop_at) {
-  CompletedBufferNode* nd = NULL;
+  BufferNode* nd = NULL;
   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 
   if ((int)_n_completed_buffers <= stop_at) {
@@ -166,10 +167,11 @@
 
   if (_completed_buffers_head != NULL) {
     nd = _completed_buffers_head;
-    _completed_buffers_head = nd->next;
+    _completed_buffers_head = nd->next();
     if (_completed_buffers_head == NULL)
       _completed_buffers_tail = NULL;
     _n_completed_buffers--;
+    assert(_n_completed_buffers >= 0, "Invariant");
   }
   debug_only(assert_completed_buffer_list_len_correct_locked());
   return nd;
@@ -177,20 +179,19 @@
 
 bool DirtyCardQueueSet::
 apply_closure_to_completed_buffer_helper(int worker_i,
-                                         CompletedBufferNode* nd) {
+                                         BufferNode* nd) {
   if (nd != NULL) {
+    void **buf = BufferNode::make_buffer_from_node(nd);
+    size_t index = nd->index();
     bool b =
-      DirtyCardQueue::apply_closure_to_buffer(_closure, nd->buf,
-                                              nd->index, _sz,
+      DirtyCardQueue::apply_closure_to_buffer(_closure, buf,
+                                              index, _sz,
                                               true, worker_i);
-    void** buf = nd->buf;
-    size_t index = nd->index;
-    delete nd;
     if (b) {
       deallocate_buffer(buf);
       return true;  // In normal case, go on to next buffer.
     } else {
-      enqueue_complete_buffer(buf, index, true);
+      enqueue_complete_buffer(buf, index);
       return false;
     }
   } else {
@@ -203,32 +204,33 @@
                                                           bool during_pause)
 {
   assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
-  CompletedBufferNode* nd = get_completed_buffer(stop_at);
+  BufferNode* nd = get_completed_buffer(stop_at);
   bool res = apply_closure_to_completed_buffer_helper(worker_i, nd);
   if (res) Atomic::inc(&_processed_buffers_rs_thread);
   return res;
 }
 
 void DirtyCardQueueSet::apply_closure_to_all_completed_buffers() {
-  CompletedBufferNode* nd = _completed_buffers_head;
+  BufferNode* nd = _completed_buffers_head;
   while (nd != NULL) {
     bool b =
-      DirtyCardQueue::apply_closure_to_buffer(_closure, nd->buf, 0, _sz,
-                                              false);
+      DirtyCardQueue::apply_closure_to_buffer(_closure,
+                                              BufferNode::make_buffer_from_node(nd),
+                                              0, _sz, false);
     guarantee(b, "Should not stop early.");
-    nd = nd->next;
+    nd = nd->next();
   }
 }
 
 void DirtyCardQueueSet::abandon_logs() {
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
-  CompletedBufferNode* buffers_to_delete = NULL;
+  BufferNode* buffers_to_delete = NULL;
   {
     MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
     while (_completed_buffers_head != NULL) {
-      CompletedBufferNode* nd = _completed_buffers_head;
-      _completed_buffers_head = nd->next;
-      nd->next = buffers_to_delete;
+      BufferNode* nd = _completed_buffers_head;
+      _completed_buffers_head = nd->next();
+      nd->set_next(buffers_to_delete);
       buffers_to_delete = nd;
     }
     _n_completed_buffers = 0;
@@ -236,10 +238,9 @@
     debug_only(assert_completed_buffer_list_len_correct_locked());
   }
   while (buffers_to_delete != NULL) {
-    CompletedBufferNode* nd = buffers_to_delete;
-    buffers_to_delete = nd->next;
-    deallocate_buffer(nd->buf);
-    delete nd;
+    BufferNode* nd = buffers_to_delete;
+    buffers_to_delete = nd->next();
+    deallocate_buffer(BufferNode::make_buffer_from_node(nd));
   }
   // Since abandon is done only at safepoints, we can safely manipulate
   // these queues.
--- a/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -84,11 +84,12 @@
   jint _processed_buffers_rs_thread;
 
 public:
-  DirtyCardQueueSet();
+  DirtyCardQueueSet(bool notify_when_complete = true);
 
   void initialize(Monitor* cbl_mon, Mutex* fl_lock,
-                  int max_completed_queue = 0,
-                  Mutex* lock = NULL, PtrQueueSet* fl_owner = NULL);
+                  int process_completed_threshold,
+                  int max_completed_queue,
+                  Mutex* lock, PtrQueueSet* fl_owner = NULL);
 
   // The number of parallel ids that can be claimed to allow collector or
   // mutator threads to do card-processing work.
@@ -123,9 +124,9 @@
                                          bool during_pause = false);
 
   bool apply_closure_to_completed_buffer_helper(int worker_i,
-                                                CompletedBufferNode* nd);
+                                                BufferNode* nd);
 
-  CompletedBufferNode* get_completed_buffer(int stop_at);
+  BufferNode* get_completed_buffer(int stop_at);
 
   // Applies the current closure to all completed buffers,
   // non-consumptively.
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -1375,6 +1375,7 @@
 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
   SharedHeap(policy_),
   _g1_policy(policy_),
+  _dirty_card_queue_set(false),
   _ref_processor(NULL),
   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
   _bot_shared(NULL),
@@ -1460,8 +1461,6 @@
   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
 
-  // We allocate this in any case, but only do no work if the command line
-  // param is off.
   _cg1r = new ConcurrentG1Refine();
 
   // Reserve the maximum.
@@ -1594,18 +1593,20 @@
 
   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
                                                SATB_Q_FL_lock,
-                                               0,
+                                               G1SATBProcessCompletedThreshold,
                                                Shared_SATB_Q_lock);
 
   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
                                                 DirtyCardQ_FL_lock,
-                                                G1UpdateBufferQueueMaxLength,
+                                                concurrent_g1_refine()->yellow_zone(),
+                                                concurrent_g1_refine()->red_zone(),
                                                 Shared_DirtyCardQ_lock);
 
   if (G1DeferredRSUpdate) {
     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
                                       DirtyCardQ_FL_lock,
-                                      0,
+                                      -1, // never trigger processing
+                                      -1, // no limit on length
                                       Shared_DirtyCardQ_lock,
                                       &JavaThread::dirty_card_queue_set());
   }
@@ -4239,10 +4240,11 @@
     RedirtyLoggedCardTableEntryFastClosure redirty;
     dirty_card_queue_set().set_closure(&redirty);
     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
-    JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set());
+
+    DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
+    dcq.merge_bufferlists(&dirty_card_queue_set());
     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
   }
-
   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 }
 
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -1914,6 +1914,10 @@
   calculate_young_list_min_length();
   calculate_young_list_target_config();
 
+  // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
+  double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSUpdatePauseFractionPercent / 100.0;
+  adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
+
   // </NEW PREDICTION>
 
   _target_pause_time_ms = -1.0;
@@ -1921,6 +1925,47 @@
 
 // <NEW PREDICTION>
 
+void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
+                                                     double update_rs_processed_buffers,
+                                                     double goal_ms) {
+  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+  ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
+
+  if (G1AdaptiveConcRefine) {
+    const int k_gy = 3, k_gr = 6;
+    const double inc_k = 1.1, dec_k = 0.9;
+
+    int g = cg1r->green_zone();
+    if (update_rs_time > goal_ms) {
+      g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
+    } else {
+      if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
+        g = (int)MAX2(g * inc_k, g + 1.0);
+      }
+    }
+    // Change the refinement threads params
+    cg1r->set_green_zone(g);
+    cg1r->set_yellow_zone(g * k_gy);
+    cg1r->set_red_zone(g * k_gr);
+    cg1r->reinitialize_threads();
+
+    int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
+    int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
+                                    cg1r->yellow_zone());
+    // Change the barrier params
+    dcqs.set_process_completed_threshold(processing_threshold);
+    dcqs.set_max_completed_queue(cg1r->red_zone());
+  }
+
+  int curr_queue_size = dcqs.completed_buffers_num();
+  if (curr_queue_size >= cg1r->yellow_zone()) {
+    dcqs.set_completed_queue_padding(curr_queue_size);
+  } else {
+    dcqs.set_completed_queue_padding(0);
+  }
+  dcqs.notify_if_necessary();
+}
+
 double
 G1CollectorPolicy::
 predict_young_collection_elapsed_time_ms(size_t adjustment) {
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -316,6 +316,10 @@
   bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
 #endif // PRODUCT
 
+  void adjust_concurrent_refinement(double update_rs_time,
+                                    double update_rs_processed_buffers,
+                                    double goal_ms);
+
 protected:
   double _pause_time_target_ms;
   double _recorded_young_cset_choice_time_ms;
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -85,7 +85,7 @@
   diagnostic(bool, G1SummarizeZFStats, false,                               \
           "Summarize zero-filling info")                                    \
                                                                             \
-  develop(bool, G1TraceConcurrentRefinement, false,                         \
+  diagnostic(bool, G1TraceConcurrentRefinement, false,                      \
           "Trace G1 concurrent refinement")                                 \
                                                                             \
   product(intx, G1MarkStackSize, 2 * 1024 * 1024,                           \
@@ -94,19 +94,6 @@
   product(intx, G1MarkRegionStackSize, 1024 * 1024,                         \
           "Size of the region stack for concurrent marking.")               \
                                                                             \
-  develop(bool, G1ConcRefine, true,                                         \
-          "If true, run concurrent rem set refinement for G1")              \
-                                                                            \
-  develop(intx, G1ConcRefineTargTraversals, 4,                              \
-          "Number of concurrent refinement we try to achieve")              \
-                                                                            \
-  develop(intx, G1ConcRefineInitialDelta, 4,                                \
-          "Number of heap regions of alloc ahead of starting collection "   \
-          "pause to start concurrent refinement (initially)")               \
-                                                                            \
-  develop(bool, G1SmoothConcRefine, true,                                   \
-          "Attempts to smooth out the overhead of concurrent refinement")   \
-                                                                            \
   develop(bool, G1ConcZeroFill, true,                                       \
           "If true, run concurrent zero-filling thread")                    \
                                                                             \
@@ -178,13 +165,38 @@
   product(intx, G1UpdateBufferSize, 256,                                    \
           "Size of an update buffer")                                       \
                                                                             \
-  product(intx, G1UpdateBufferQueueProcessingThreshold, 5,                  \
+  product(intx, G1ConcRefineYellowZone, 0,                                  \
           "Number of enqueued update buffers that will "                    \
-          "trigger concurrent processing")                                  \
+          "trigger concurrent processing. Will be selected ergonomically "  \
+          "by default.")                                                    \
+                                                                            \
+  product(intx, G1ConcRefineRedZone, 0,                                     \
+          "Maximum number of enqueued update buffers before mutator "       \
+          "threads start processing new ones instead of enqueueing them. "  \
+          "Will be selected ergonomically by default. Zero will disable "   \
+          "concurrent processing.")                                         \
+                                                                            \
+  product(intx, G1ConcRefineGreenZone, 0,                                   \
+          "The number of update buffers that are left in the queue by the " \
+          "concurrent processing threads. Will be selected ergonomically "  \
+          "by default.")                                                    \
                                                                             \
-  product(intx, G1UpdateBufferQueueMaxLength, 30,                           \
-          "Maximum number of enqueued update buffers before mutator "       \
-          "threads start processing new ones instead of enqueueing them")   \
+  product(intx, G1ConcRefineServiceInterval, 300,                           \
+          "The last concurrent refinement thread wakes up every "           \
+          "specified number of milliseconds to do miscellaneous work.")     \
+                                                                            \
+  product(intx, G1ConcRefineThresholdStep, 0,                               \
+          "Each time the rset update queue increases by this amount "       \
+          "activate the next refinement thread if available. "              \
+          "Will be selected ergonomically by default.")                     \
+                                                                            \
+  product(intx, G1RSUpdatePauseFractionPercent, 10,                         \
+          "A target percentage of time that is allowed to be spend on "     \
+          "process RS update buffers during the collection pause.")         \
+                                                                            \
+  product(bool, G1AdaptiveConcRefine, true,                                 \
+          "Select green, yellow and red zones adaptively to meet the "      \
+          "the pause requirements.")                                        \
                                                                             \
   develop(intx, G1ConcRSLogCacheSize, 10,                                   \
           "Log base 2 of the length of conc RS hot-card cache.")            \
--- a/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -64,8 +64,8 @@
   while (_index == 0) {
     handle_zero_index();
   }
+
   assert(_index > 0, "postcondition");
-
   _index -= oopSize;
   _buf[byte_index_to_index((int)_index)] = ptr;
   assert(0 <= _index && _index <= _sz, "Invariant.");
@@ -99,95 +99,110 @@
   assert(_sz > 0, "Didn't set a buffer size.");
   MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
   if (_fl_owner->_buf_free_list != NULL) {
-    void** res = _fl_owner->_buf_free_list;
-    _fl_owner->_buf_free_list = (void**)_fl_owner->_buf_free_list[0];
+    void** res = BufferNode::make_buffer_from_node(_fl_owner->_buf_free_list);
+    _fl_owner->_buf_free_list = _fl_owner->_buf_free_list->next();
     _fl_owner->_buf_free_list_sz--;
-    // Just override the next pointer with NULL, just in case we scan this part
-    // of the buffer.
-    res[0] = NULL;
     return res;
   } else {
-    return (void**) NEW_C_HEAP_ARRAY(char, _sz);
+    // Allocate space for the BufferNode in front of the buffer.
+    char *b =  NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size());
+    return BufferNode::make_buffer_from_block(b);
   }
 }
 
 void PtrQueueSet::deallocate_buffer(void** buf) {
   assert(_sz > 0, "Didn't set a buffer size.");
   MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
-  buf[0] = (void*)_fl_owner->_buf_free_list;
-  _fl_owner->_buf_free_list = buf;
+  BufferNode *node = BufferNode::make_node_from_buffer(buf);
+  node->set_next(_fl_owner->_buf_free_list);
+  _fl_owner->_buf_free_list = node;
   _fl_owner->_buf_free_list_sz++;
 }
 
 void PtrQueueSet::reduce_free_list() {
+  assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
   // For now we'll adopt the strategy of deleting half.
   MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
   size_t n = _buf_free_list_sz / 2;
   while (n > 0) {
     assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
-    void** head = _buf_free_list;
-    _buf_free_list = (void**)_buf_free_list[0];
-    FREE_C_HEAP_ARRAY(char, head);
+    void* b = BufferNode::make_block_from_node(_buf_free_list);
+    _buf_free_list = _buf_free_list->next();
+    FREE_C_HEAP_ARRAY(char, b);
     _buf_free_list_sz --;
     n--;
   }
 }
 
-void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index, bool ignore_max_completed) {
-  // I use explicit locking here because there's a bailout in the middle.
-  _cbl_mon->lock_without_safepoint_check();
-
-  Thread* thread = Thread::current();
-  assert( ignore_max_completed ||
-          thread->is_Java_thread() ||
-          SafepointSynchronize::is_at_safepoint(),
-          "invariant" );
-  ignore_max_completed = ignore_max_completed || !thread->is_Java_thread();
+void PtrQueue::handle_zero_index() {
+  assert(0 == _index, "Precondition.");
+  // This thread records the full buffer and allocates a new one (while
+  // holding the lock if there is one).
+  if (_buf != NULL) {
+    if (_lock) {
+      locking_enqueue_completed_buffer(_buf);
+    } else {
+      if (qset()->process_or_enqueue_complete_buffer(_buf)) {
+        // Recycle the buffer. No allocation.
+        _sz = qset()->buffer_size();
+        _index = _sz;
+        return;
+      }
+    }
+  }
+  // Reallocate the buffer
+  _buf = qset()->allocate_buffer();
+  _sz = qset()->buffer_size();
+  _index = _sz;
+  assert(0 <= _index && _index <= _sz, "Invariant.");
+}
 
-  if (!ignore_max_completed && _max_completed_queue > 0 &&
-      _n_completed_buffers >= (size_t) _max_completed_queue) {
-    _cbl_mon->unlock();
-    bool b = mut_process_buffer(buf);
-    if (b) {
-      deallocate_buffer(buf);
-      return;
+bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {
+  if (Thread::current()->is_Java_thread()) {
+    // We don't lock. It is fine to be epsilon-precise here.
+    if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
+        _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {
+      bool b = mut_process_buffer(buf);
+      if (b) {
+        // True here means that the buffer hasn't been deallocated and the caller may reuse it.
+        return true;
+      }
     }
+  }
+  // The buffer will be enqueued. The caller will have to get a new one.
+  enqueue_complete_buffer(buf);
+  return false;
+}
 
-    // Otherwise, go ahead and enqueue the buffer.  Must reaquire the lock.
-    _cbl_mon->lock_without_safepoint_check();
-  }
-
-  // Here we still hold the _cbl_mon.
-  CompletedBufferNode* cbn = new CompletedBufferNode;
-  cbn->buf = buf;
-  cbn->next = NULL;
-  cbn->index = index;
+void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
+  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+  BufferNode* cbn = BufferNode::new_from_buffer(buf);
+  cbn->set_index(index);
   if (_completed_buffers_tail == NULL) {
     assert(_completed_buffers_head == NULL, "Well-formedness");
     _completed_buffers_head = cbn;
     _completed_buffers_tail = cbn;
   } else {
-    _completed_buffers_tail->next = cbn;
+    _completed_buffers_tail->set_next(cbn);
     _completed_buffers_tail = cbn;
   }
   _n_completed_buffers++;
 
-  if (!_process_completed &&
+  if (!_process_completed && _process_completed_threshold >= 0 &&
       _n_completed_buffers >= _process_completed_threshold) {
     _process_completed = true;
     if (_notify_when_complete)
-      _cbl_mon->notify_all();
+      _cbl_mon->notify();
   }
   debug_only(assert_completed_buffer_list_len_correct_locked());
-  _cbl_mon->unlock();
 }
 
 int PtrQueueSet::completed_buffers_list_length() {
   int n = 0;
-  CompletedBufferNode* cbn = _completed_buffers_head;
+  BufferNode* cbn = _completed_buffers_head;
   while (cbn != NULL) {
     n++;
-    cbn = cbn->next;
+    cbn = cbn->next();
   }
   return n;
 }
@@ -198,7 +213,7 @@
 }
 
 void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
-  guarantee((size_t)completed_buffers_list_length() ==  _n_completed_buffers,
+  guarantee(completed_buffers_list_length() ==  _n_completed_buffers,
             "Completed buffer length is wrong.");
 }
 
@@ -207,12 +222,8 @@
   _sz = sz * oopSize;
 }
 
-void PtrQueueSet::set_process_completed_threshold(size_t sz) {
-  _process_completed_threshold = sz;
-}
-
-// Merge lists of buffers. Notify waiting threads if the length of the list
-// exceeds threshold. The source queue is emptied as a result. The queues
+// Merge lists of buffers. Notify the processing threads.
+// The source queue is emptied as a result. The queues
 // must share the monitor.
 void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
   assert(_cbl_mon == src->_cbl_mon, "Should share the same lock");
@@ -224,7 +235,7 @@
   } else {
     assert(_completed_buffers_head != NULL, "Well formedness");
     if (src->_completed_buffers_head != NULL) {
-      _completed_buffers_tail->next = src->_completed_buffers_head;
+      _completed_buffers_tail->set_next(src->_completed_buffers_head);
       _completed_buffers_tail = src->_completed_buffers_tail;
     }
   }
@@ -237,31 +248,13 @@
   assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
          _completed_buffers_head != NULL && _completed_buffers_tail != NULL,
          "Sanity");
-
-  if (!_process_completed &&
-      _n_completed_buffers >= _process_completed_threshold) {
-    _process_completed = true;
-    if (_notify_when_complete)
-      _cbl_mon->notify_all();
-  }
 }
 
-// Merge free lists of the two queues. The free list of the source
-// queue is emptied as a result. The queues must share the same
-// mutex that guards free lists.
-void PtrQueueSet::merge_freelists(PtrQueueSet* src) {
-  assert(_fl_lock == src->_fl_lock, "Should share the same lock");
-  MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
-  if (_buf_free_list != NULL) {
-    void **p = _buf_free_list;
-    while (*p != NULL) {
-      p = (void**)*p;
-    }
-    *p = src->_buf_free_list;
-  } else {
-    _buf_free_list = src->_buf_free_list;
+void PtrQueueSet::notify_if_necessary() {
+  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+  if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) {
+    _process_completed = true;
+    if (_notify_when_complete)
+      _cbl_mon->notify();
   }
-  _buf_free_list_sz += src->_buf_free_list_sz;
-  src->_buf_free_list = NULL;
-  src->_buf_free_list_sz = 0;
 }
--- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -27,8 +27,10 @@
 // the addresses of modified old-generation objects.  This type supports
 // this operation.
 
+// The definition of placement operator new(size_t, void*) in the <new>.
+#include <new>
+
 class PtrQueueSet;
-
 class PtrQueue VALUE_OBJ_CLASS_SPEC {
 
 protected:
@@ -77,7 +79,7 @@
     else enqueue_known_active(ptr);
   }
 
-  inline void handle_zero_index();
+  void handle_zero_index();
   void locking_enqueue_completed_buffer(void** buf);
 
   void enqueue_known_active(void* ptr);
@@ -126,34 +128,65 @@
 
 };
 
+class BufferNode {
+  size_t _index;
+  BufferNode* _next;
+public:
+  BufferNode() : _index(0), _next(NULL) { }
+  BufferNode* next() const     { return _next;  }
+  void set_next(BufferNode* n) { _next = n;     }
+  size_t index() const         { return _index; }
+  void set_index(size_t i)     { _index = i;    }
+
+  // Align the size of the structure to the size of the pointer
+  static size_t aligned_size() {
+    static const size_t alignment = round_to(sizeof(BufferNode), sizeof(void*));
+    return alignment;
+  }
+
+  // BufferNode is allocated before the buffer.
+  // The chunk of memory that holds both of them is a block.
+
+  // Produce a new BufferNode given a buffer.
+  static BufferNode* new_from_buffer(void** buf) {
+    return new (make_block_from_buffer(buf)) BufferNode;
+  }
+
+  // The following are the required conversion routines:
+  static BufferNode* make_node_from_buffer(void** buf) {
+    return (BufferNode*)make_block_from_buffer(buf);
+  }
+  static void** make_buffer_from_node(BufferNode *node) {
+    return make_buffer_from_block(node);
+  }
+  static void* make_block_from_node(BufferNode *node) {
+    return (void*)node;
+  }
+  static void** make_buffer_from_block(void* p) {
+    return (void**)((char*)p + aligned_size());
+  }
+  static void* make_block_from_buffer(void** p) {
+    return (void*)((char*)p - aligned_size());
+  }
+};
+
 // A PtrQueueSet represents resources common to a set of pointer queues.
 // In particular, the individual queues allocate buffers from this shared
 // set, and return completed buffers to the set.
 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
 class PtrQueueSet VALUE_OBJ_CLASS_SPEC {
-
 protected:
-
-  class CompletedBufferNode: public CHeapObj {
-  public:
-    void** buf;
-    size_t index;
-    CompletedBufferNode* next;
-    CompletedBufferNode() : buf(NULL),
-      index(0), next(NULL){ }
-  };
-
   Monitor* _cbl_mon;  // Protects the fields below.
-  CompletedBufferNode* _completed_buffers_head;
-  CompletedBufferNode* _completed_buffers_tail;
-  size_t _n_completed_buffers;
-  size_t _process_completed_threshold;
+  BufferNode* _completed_buffers_head;
+  BufferNode* _completed_buffers_tail;
+  int _n_completed_buffers;
+  int _process_completed_threshold;
   volatile bool _process_completed;
 
   // This (and the interpretation of the first element as a "next"
   // pointer) are protected by the TLOQ_FL_lock.
   Mutex* _fl_lock;
-  void** _buf_free_list;
+  BufferNode* _buf_free_list;
   size_t _buf_free_list_sz;
   // Queue set can share a freelist. The _fl_owner variable
   // specifies the owner. It is set to "this" by default.
@@ -170,6 +203,7 @@
   // Maximum number of elements allowed on completed queue: after that,
   // enqueuer does the work itself.  Zero indicates no maximum.
   int _max_completed_queue;
+  int _completed_queue_padding;
 
   int completed_buffers_list_length();
   void assert_completed_buffer_list_len_correct_locked();
@@ -191,9 +225,12 @@
   // Because of init-order concerns, we can't pass these as constructor
   // arguments.
   void initialize(Monitor* cbl_mon, Mutex* fl_lock,
-                  int max_completed_queue = 0,
+                  int process_completed_threshold,
+                  int max_completed_queue,
                   PtrQueueSet *fl_owner = NULL) {
     _max_completed_queue = max_completed_queue;
+    _process_completed_threshold = process_completed_threshold;
+    _completed_queue_padding = 0;
     assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?");
     _cbl_mon = cbl_mon;
     _fl_lock = fl_lock;
@@ -208,14 +245,17 @@
   void deallocate_buffer(void** buf);
 
   // Declares that "buf" is a complete buffer.
-  void enqueue_complete_buffer(void** buf, size_t index = 0,
-                               bool ignore_max_completed = false);
+  void enqueue_complete_buffer(void** buf, size_t index = 0);
+
+  // To be invoked by the mutator.
+  bool process_or_enqueue_complete_buffer(void** buf);
 
   bool completed_buffers_exist_dirty() {
     return _n_completed_buffers > 0;
   }
 
   bool process_completed_buffers() { return _process_completed; }
+  void set_process_completed(bool x) { _process_completed = x; }
 
   bool active() { return _all_active; }
 
@@ -226,15 +266,24 @@
   // Get the buffer size.
   size_t buffer_size() { return _sz; }
 
-  // Set the number of completed buffers that triggers log processing.
-  void set_process_completed_threshold(size_t sz);
+  // Get/Set the number of completed buffers that triggers log processing.
+  void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
+  int process_completed_threshold() const { return _process_completed_threshold; }
 
   // Must only be called at a safe point.  Indicates that the buffer free
   // list size may be reduced, if that is deemed desirable.
   void reduce_free_list();
 
-  size_t completed_buffers_num() { return _n_completed_buffers; }
+  int completed_buffers_num() { return _n_completed_buffers; }
 
   void merge_bufferlists(PtrQueueSet* src);
-  void merge_freelists(PtrQueueSet* src);
+
+  void set_max_completed_queue(int m) { _max_completed_queue = m; }
+  int max_completed_queue() { return _max_completed_queue; }
+
+  void set_completed_queue_padding(int padding) { _completed_queue_padding = padding; }
+  int completed_queue_padding() { return _completed_queue_padding; }
+
+  // Notify the consumer if the number of buffers crossed the threshold
+  void notify_if_necessary();
 };
--- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -67,9 +67,9 @@
 {}
 
 void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
-                                  int max_completed_queue,
+                                  int process_completed_threshold,
                                   Mutex* lock) {
-  PtrQueueSet::initialize(cbl_mon, fl_lock, max_completed_queue);
+  PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
   _shared_satb_queue.set_lock(lock);
   if (ParallelGCThreads > 0) {
     _par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads);
@@ -122,12 +122,12 @@
 
 bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
                                                               int worker) {
-  CompletedBufferNode* nd = NULL;
+  BufferNode* nd = NULL;
   {
     MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
     if (_completed_buffers_head != NULL) {
       nd = _completed_buffers_head;
-      _completed_buffers_head = nd->next;
+      _completed_buffers_head = nd->next();
       if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
       _n_completed_buffers--;
       if (_n_completed_buffers == 0) _process_completed = false;
@@ -135,9 +135,9 @@
   }
   ObjectClosure* cl = (par ? _par_closures[worker] : _closure);
   if (nd != NULL) {
-    ObjPtrQueue::apply_closure_to_buffer(cl, nd->buf, 0, _sz);
-    deallocate_buffer(nd->buf);
-    delete nd;
+    void **buf = BufferNode::make_buffer_from_node(nd);
+    ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
+    deallocate_buffer(buf);
     return true;
   } else {
     return false;
@@ -145,13 +145,13 @@
 }
 
 void SATBMarkQueueSet::abandon_partial_marking() {
-  CompletedBufferNode* buffers_to_delete = NULL;
+  BufferNode* buffers_to_delete = NULL;
   {
     MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
     while (_completed_buffers_head != NULL) {
-      CompletedBufferNode* nd = _completed_buffers_head;
-      _completed_buffers_head = nd->next;
-      nd->next = buffers_to_delete;
+      BufferNode* nd = _completed_buffers_head;
+      _completed_buffers_head = nd->next();
+      nd->set_next(buffers_to_delete);
       buffers_to_delete = nd;
     }
     _completed_buffers_tail = NULL;
@@ -159,10 +159,9 @@
     DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
   }
   while (buffers_to_delete != NULL) {
-    CompletedBufferNode* nd = buffers_to_delete;
-    buffers_to_delete = nd->next;
-    deallocate_buffer(nd->buf);
-    delete nd;
+    BufferNode* nd = buffers_to_delete;
+    buffers_to_delete = nd->next();
+    deallocate_buffer(BufferNode::make_buffer_from_node(nd));
   }
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
   // So we can safely manipulate these queues.
--- a/src/share/vm/gc_implementation/g1/satbQueue.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/satbQueue.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -60,8 +60,8 @@
   SATBMarkQueueSet();
 
   void initialize(Monitor* cbl_mon, Mutex* fl_lock,
-                  int max_completed_queue = 0,
-                  Mutex* lock = NULL);
+                  int process_completed_threshold,
+                  Mutex* lock);
 
   static void handle_zero_index_for_thread(JavaThread* t);
 
--- a/src/share/vm/gc_implementation/includeDB_gc_g1	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/gc_implementation/includeDB_gc_g1	Wed Jan 06 22:21:39 2010 -0800
@@ -109,7 +109,6 @@
 dirtyCardQueue.cpp                      dirtyCardQueue.hpp
 dirtyCardQueue.cpp			heapRegionRemSet.hpp
 dirtyCardQueue.cpp                      mutexLocker.hpp
-dirtyCardQueue.cpp                      ptrQueue.inline.hpp
 dirtyCardQueue.cpp                      safepoint.hpp
 dirtyCardQueue.cpp                      thread.hpp
 dirtyCardQueue.cpp                      thread_<os_family>.inline.hpp
@@ -319,7 +318,6 @@
 ptrQueue.cpp                            mutex.hpp
 ptrQueue.cpp                            mutexLocker.hpp
 ptrQueue.cpp                            ptrQueue.hpp
-ptrQueue.cpp                            ptrQueue.inline.hpp
 ptrQueue.cpp                            thread_<os_family>.inline.hpp
 
 ptrQueue.hpp                            allocation.hpp
@@ -329,7 +327,6 @@
 
 satbQueue.cpp                           allocation.inline.hpp
 satbQueue.cpp                           mutexLocker.hpp
-satbQueue.cpp                           ptrQueue.inline.hpp
 satbQueue.cpp                           satbQueue.hpp
 satbQueue.cpp                           sharedHeap.hpp
 satbQueue.cpp                           thread.hpp
--- a/src/share/vm/memory/heap.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/memory/heap.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -464,7 +464,7 @@
   }
 
   // Verify that freelist contains the right amount of free space
-  guarantee(len == _free_segments, "wrong freelist");
+  //  guarantee(len == _free_segments, "wrong freelist");
 
   // Verify that the number of free blocks is not out of hand.
   static int free_block_threshold = 10000;
@@ -479,5 +479,5 @@
   for(HeapBlock *h = first_block(); h != NULL; h = next_block(h)) {
     if (h->free()) count--;
   }
-  guarantee(count == 0, "missing free blocks");
+  //  guarantee(count == 0, "missing free blocks");
 }
--- a/src/share/vm/memory/referenceProcessor.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/memory/referenceProcessor.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -299,8 +299,8 @@
 
 
 template <class T>
-static bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
-                                          AbstractRefProcTaskExecutor* task_executor) {
+bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
+                                   AbstractRefProcTaskExecutor* task_executor) {
 
   // Remember old value of pending references list
   T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
--- a/src/share/vm/oops/instanceKlass.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/oops/instanceKlass.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -2045,8 +2045,9 @@
     // As we walk along, look for equalities between outer1 and class2.
     // Eventually, the walks will terminate as outer1 stops
     // at the top-level class around the original class.
-    symbolOop ignore_name;
-    klassOop next = outer1->compute_enclosing_class(ignore_name, CHECK_false);
+    bool ignore_inner_is_member;
+    klassOop next = outer1->compute_enclosing_class(&ignore_inner_is_member,
+                                                    CHECK_false);
     if (next == NULL)  break;
     if (next == class2())  return true;
     outer1 = instanceKlassHandle(THREAD, next);
@@ -2055,8 +2056,9 @@
   // Now do the same for class2.
   instanceKlassHandle outer2 = class2;
   for (;;) {
-    symbolOop ignore_name;
-    klassOop next = outer2->compute_enclosing_class(ignore_name, CHECK_false);
+    bool ignore_inner_is_member;
+    klassOop next = outer2->compute_enclosing_class(&ignore_inner_is_member,
+                                                    CHECK_false);
     if (next == NULL)  break;
     // Might as well check the new outer against all available values.
     if (next == class1())  return true;
--- a/src/share/vm/oops/instanceKlass.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/oops/instanceKlass.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -337,12 +337,12 @@
   static bool is_same_class_package(oop class_loader1, symbolOop class_name1, oop class_loader2, symbolOop class_name2);
 
   // find an enclosing class (defined where original code was, in jvm.cpp!)
-  klassOop compute_enclosing_class(symbolOop& simple_name_result, TRAPS) {
+  klassOop compute_enclosing_class(bool* inner_is_member, TRAPS) {
     instanceKlassHandle self(THREAD, this->as_klassOop());
-    return compute_enclosing_class_impl(self, simple_name_result, THREAD);
+    return compute_enclosing_class_impl(self, inner_is_member, THREAD);
   }
   static klassOop compute_enclosing_class_impl(instanceKlassHandle self,
-                                               symbolOop& simple_name_result, TRAPS);
+                                               bool* inner_is_member, TRAPS);
 
   // tell if two classes have the same enclosing class (at package level)
   bool is_same_package_member(klassOop class2, TRAPS) {
--- a/src/share/vm/oops/instanceRefKlass.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/oops/instanceRefKlass.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -78,9 +78,9 @@
 
 #ifndef SERIALGC
 template <class T>
-static void specialized_oop_follow_contents(instanceRefKlass* ref,
-                                            ParCompactionManager* cm,
-                                            oop obj) {
+void specialized_oop_follow_contents(instanceRefKlass* ref,
+                                     ParCompactionManager* cm,
+                                     oop obj) {
   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
   T heap_oop = oopDesc::load_heap_oop(referent_addr);
   debug_only(
--- a/src/share/vm/oops/oop.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/oops/oop.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -30,13 +30,12 @@
 // no virtual functions allowed
 
 // store into oop with store check
-template <class T> void oop_store(T* p, oop v);
-template <class T> void oop_store(volatile T* p, oop v);
+template <class T> inline void oop_store(T* p, oop v);
+template <class T> inline void oop_store(volatile T* p, oop v);
 
 // store into oop without store check
-template <class T> void oop_store_without_check(T* p, oop v);
-template <class T> void oop_store_without_check(volatile T* p, oop v);
-
+template <class T> inline void oop_store_without_check(T* p, oop v);
+template <class T> inline void oop_store_without_check(volatile T* p, oop v);
 
 extern bool always_do_update_barrier;
 
--- a/src/share/vm/prims/jvm.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/prims/jvm.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -1318,19 +1318,20 @@
     return NULL;
   }
 
-  symbolOop simple_name = NULL;
+  bool inner_is_member = false;
   klassOop outer_klass
     = instanceKlass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass))
-                          )->compute_enclosing_class(simple_name, CHECK_NULL);
+                          )->compute_enclosing_class(&inner_is_member, CHECK_NULL);
   if (outer_klass == NULL)  return NULL;  // already a top-level class
-  if (simple_name == NULL)  return NULL;  // an anonymous class (inside a method)
+  if (!inner_is_member)  return NULL;     // an anonymous class (inside a method)
   return (jclass) JNIHandles::make_local(env, Klass::cast(outer_klass)->java_mirror());
 }
 JVM_END
 
 // should be in instanceKlass.cpp, but is here for historical reasons
 klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle k,
-                                                     symbolOop& simple_name_result, TRAPS) {
+                                                     bool* inner_is_member,
+                                                     TRAPS) {
   Thread* thread = THREAD;
   const int inner_class_info_index = inner_class_inner_class_info_offset;
   const int outer_class_info_index = inner_class_outer_class_info_offset;
@@ -1347,8 +1348,7 @@
   bool found = false;
   klassOop ok;
   instanceKlassHandle outer_klass;
-  bool inner_is_member = false;
-  int simple_name_index = 0;
+  *inner_is_member = false;
 
   // Find inner_klass attribute
   for (int i = 0; i < i_length && !found; i += inner_class_next_offset) {
@@ -1364,8 +1364,7 @@
         if (found && ooff != 0) {
           ok = i_cp->klass_at(ooff, CHECK_NULL);
           outer_klass = instanceKlassHandle(thread, ok);
-          simple_name_index = noff;
-          inner_is_member = true;
+          *inner_is_member = true;
         }
       }
     }
@@ -1377,7 +1376,7 @@
     if (encl_method_class_idx != 0) {
       ok = i_cp->klass_at(encl_method_class_idx, CHECK_NULL);
       outer_klass = instanceKlassHandle(thread, ok);
-      inner_is_member = false;
+      *inner_is_member = false;
     }
   }
 
@@ -1387,9 +1386,7 @@
   // Throws an exception if outer klass has not declared k as an inner klass
   // We need evidence that each klass knows about the other, or else
   // the system could allow a spoof of an inner class to gain access rights.
-  Reflection::check_for_inner_class(outer_klass, k, inner_is_member, CHECK_NULL);
-
-  simple_name_result = (inner_is_member ? i_cp->symbol_at(simple_name_index) : symbolOop(NULL));
+  Reflection::check_for_inner_class(outer_klass, k, *inner_is_member, CHECK_NULL);
   return outer_klass();
 }
 
--- a/src/share/vm/prims/jvmtiEnv.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/prims/jvmtiEnv.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,15 +32,15 @@
  // FIXLATER: hook into JvmtiTrace
 #define TraceJVMTICalls false
 
-JvmtiEnv::JvmtiEnv() : JvmtiEnvBase() {
+JvmtiEnv::JvmtiEnv(jint version) : JvmtiEnvBase(version) {
 }
 
 JvmtiEnv::~JvmtiEnv() {
 }
 
 JvmtiEnv*
-JvmtiEnv::create_a_jvmti() {
-  return new JvmtiEnv();
+JvmtiEnv::create_a_jvmti(jint version) {
+  return new JvmtiEnv(version);
 }
 
 // VM operation class to copy jni function table at safepoint.
@@ -411,8 +411,15 @@
   if (phase == JVMTI_PHASE_ONLOAD) {
     Arguments::append_sysclasspath(segment);
     return JVMTI_ERROR_NONE;
-  } else {
-    assert(phase == JVMTI_PHASE_LIVE, "sanity check");
+  } else if (use_version_1_0_semantics()) {
+    // This JvmtiEnv requested version 1.0 semantics and this function
+    // is only allowed in the ONLOAD phase in version 1.0 so we need to
+    // return an error here.
+    return JVMTI_ERROR_WRONG_PHASE;
+  } else if (phase == JVMTI_PHASE_LIVE) {
+    // The phase is checked by the wrapper that called this function,
+    // but this thread could be racing with the thread that is
+    // terminating the VM so we check one more time.
 
     // create the zip entry
     ClassPathZipEntry* zip_entry = ClassLoader::create_class_path_zip_entry(segment);
@@ -433,6 +440,8 @@
     }
     ClassLoader::add_to_list(zip_entry);
     return JVMTI_ERROR_NONE;
+  } else {
+    return JVMTI_ERROR_WRONG_PHASE;
   }
 
 } /* end AddToBootstrapClassLoaderSearch */
@@ -451,11 +460,12 @@
       }
     }
     return JVMTI_ERROR_NONE;
-  } else {
+  } else if (phase == JVMTI_PHASE_LIVE) {
+    // The phase is checked by the wrapper that called this function,
+    // but this thread could be racing with the thread that is
+    // terminating the VM so we check one more time.
     HandleMark hm;
 
-    assert(phase == JVMTI_PHASE_LIVE, "sanity check");
-
     // create the zip entry (which will open the zip file and hence
     // check that the segment is indeed a zip file).
     ClassPathZipEntry* zip_entry = ClassLoader::create_class_path_zip_entry(segment);
@@ -504,6 +514,8 @@
     }
 
     return JVMTI_ERROR_NONE;
+  } else {
+    return JVMTI_ERROR_WRONG_PHASE;
   }
 } /* end AddToSystemClassLoaderSearch */
 
@@ -2863,6 +2875,14 @@
 // is_obsolete_ptr - pre-checked for NULL
 jvmtiError
 JvmtiEnv::IsMethodObsolete(methodOop method_oop, jboolean* is_obsolete_ptr) {
+  if (use_version_1_0_semantics() &&
+      get_capabilities()->can_redefine_classes == 0) {
+    // This JvmtiEnv requested version 1.0 semantics and this function
+    // requires the can_redefine_classes capability in version 1.0 so
+    // we need to return an error here.
+    return JVMTI_ERROR_MUST_POSSESS_CAPABILITY;
+  }
+
   if (method_oop == NULL || method_oop->is_obsolete()) {
     *is_obsolete_ptr = true;
   } else {
--- a/src/share/vm/prims/jvmtiEnvBase.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/prims/jvmtiEnvBase.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -123,7 +123,26 @@
 }
 
 
-JvmtiEnvBase::JvmtiEnvBase() : _env_event_enable() {
+bool
+JvmtiEnvBase::use_version_1_0_semantics() {
+  int major, minor, micro;
+
+  JvmtiExport::decode_version_values(_version, &major, &minor, &micro);
+  return major == 1 && minor == 0;  // micro version doesn't matter here
+}
+
+
+bool
+JvmtiEnvBase::use_version_1_1_semantics() {
+  int major, minor, micro;
+
+  JvmtiExport::decode_version_values(_version, &major, &minor, &micro);
+  return major == 1 && minor == 1;  // micro version doesn't matter here
+}
+
+
+JvmtiEnvBase::JvmtiEnvBase(jint version) : _env_event_enable() {
+  _version = version;
   _env_local_storage = NULL;
   _tag_map = NULL;
   _native_method_prefix_count = 0;
--- a/src/share/vm/prims/jvmtiEnvBase.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/prims/jvmtiEnvBase.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -76,6 +76,7 @@
 
   jvmtiEnv _jvmti_external;
   jint _magic;
+  jint _version;  // version value passed to JNI GetEnv()
   JvmtiEnvBase* _next;
   bool _is_retransformable;
   const void *_env_local_storage;     // per env agent allocated data.
@@ -91,7 +92,7 @@
   int    _native_method_prefix_count;
 
  protected:
-  JvmtiEnvBase();
+  JvmtiEnvBase(jint version);
   ~JvmtiEnvBase();
   void dispose();
   void env_dispose();
@@ -122,6 +123,9 @@
 
   bool is_valid();
 
+  bool use_version_1_0_semantics();  // agent asked for version 1.0
+  bool use_version_1_1_semantics();  // agent asked for version 1.1
+
   bool is_retransformable()                        { return _is_retransformable; }
 
   static ByteSize jvmti_external_offset() {
--- a/src/share/vm/prims/jvmtiExport.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/prims/jvmtiExport.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -319,7 +319,27 @@
 
 jint
 JvmtiExport::get_jvmti_interface(JavaVM *jvm, void **penv, jint version) {
-  /* To Do: add version checks */
+  // The JVMTI_VERSION_INTERFACE_JVMTI part of the version number
+  // has already been validated in JNI GetEnv().
+  int major, minor, micro;
+
+  // micro version doesn't matter here (yet?)
+  decode_version_values(version, &major, &minor, &micro);
+  switch (major) {
+  case 1:
+      switch (minor) {
+      case 0:  // version 1.0.<micro> is recognized
+      case 1:  // version 1.1.<micro> is recognized
+          break;
+
+      default:
+          return JNI_EVERSION;  // unsupported minor version number
+      }
+      break;
+
+  default:
+      return JNI_EVERSION;  // unsupported major version number
+  }
 
   if (JvmtiEnv::get_phase() == JVMTI_PHASE_LIVE) {
     JavaThread* current_thread = (JavaThread*) ThreadLocalStorage::thread();
@@ -328,13 +348,13 @@
     __ENTRY(jvmtiEnv*, JvmtiExport::get_jvmti_interface, current_thread)
     debug_only(VMNativeEntryWrapper __vew;)
 
-    JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti();
+    JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti(version);
     *penv = jvmti_env->jvmti_external();  // actual type is jvmtiEnv* -- not to be confused with JvmtiEnv*
     return JNI_OK;
 
   } else if (JvmtiEnv::get_phase() == JVMTI_PHASE_ONLOAD) {
     // not live, no thread to transition
-    JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti();
+    JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti(version);
     *penv = jvmti_env->jvmti_external();  // actual type is jvmtiEnv* -- not to be confused with JvmtiEnv*
     return JNI_OK;
 
@@ -345,6 +365,15 @@
   }
 }
 
+
+void
+JvmtiExport::decode_version_values(jint version, int * major, int * minor,
+                                   int * micro) {
+  *major = (version & JVMTI_VERSION_MASK_MAJOR) >> JVMTI_VERSION_SHIFT_MAJOR;
+  *minor = (version & JVMTI_VERSION_MASK_MINOR) >> JVMTI_VERSION_SHIFT_MINOR;
+  *micro = (version & JVMTI_VERSION_MASK_MICRO) >> JVMTI_VERSION_SHIFT_MICRO;
+}
+
 void JvmtiExport::enter_primordial_phase() {
   JvmtiEnvBase::set_phase(JVMTI_PHASE_PRIMORDIAL);
 }
--- a/src/share/vm/prims/jvmtiExport.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/prims/jvmtiExport.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -236,6 +236,8 @@
   static bool is_jvmti_version(jint version)                      { return (version & JVMTI_VERSION_MASK) == JVMTI_VERSION_VALUE; }
   static bool is_jvmdi_version(jint version)                      { return (version & JVMTI_VERSION_MASK) == JVMDI_VERSION_VALUE; }
   static jint get_jvmti_interface(JavaVM *jvm, void **penv, jint version);
+  static void decode_version_values(jint version, int * major, int * minor,
+                                    int * micro);
 
   // single stepping management methods
   static void at_single_stepping_point(JavaThread *thread, methodOop method, address location) KERNEL_RETURN;
--- a/src/share/vm/prims/jvmtiHpp.xsl	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/prims/jvmtiHpp.xsl	Wed Jan 06 22:21:39 2010 -0800
@@ -1,6 +1,6 @@
 <?xml version="1.0"?> 
 <!--
- Copyright 2002-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ Copyright 2002-2009 Sun Microsystems, Inc.  All Rights Reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -48,12 +48,12 @@
 
 private:
     
-    JvmtiEnv();
+    JvmtiEnv(jint version);
     ~JvmtiEnv();
 
 public:
 
-    static JvmtiEnv* create_a_jvmti();
+    static JvmtiEnv* create_a_jvmti(jint version);
 
 </xsl:text>
   <xsl:apply-templates select="functionsection"/>
--- a/src/share/vm/runtime/frame.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/runtime/frame.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -1189,9 +1189,19 @@
 
 
 void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
-         if (is_interpreted_frame())    { oops_interpreted_do(f, map, use_interpreter_oop_map_cache);
-  } else if (is_entry_frame())          { oops_entry_do      (f, map);
-  } else if (CodeCache::contains(pc())) { oops_code_blob_do  (f, cf, map);
+#ifndef PRODUCT
+  // simulate GC crash here to dump java thread in error report
+  if (CrashGCForDumpingJavaThread) {
+    char *t = NULL;
+    *t = 'c';
+  }
+#endif
+  if (is_interpreted_frame()) {
+    oops_interpreted_do(f, map, use_interpreter_oop_map_cache);
+  } else if (is_entry_frame()) {
+    oops_entry_do(f, map);
+  } else if (CodeCache::contains(pc())) {
+    oops_code_blob_do(f, cf, map);
   } else {
     ShouldNotReachHere();
   }
--- a/src/share/vm/runtime/globals.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/runtime/globals.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -2554,6 +2554,9 @@
           "Include miscellaneous runtime verifications in nmethod code; "   \
           "default off because it disturbs nmethod size heuristics")        \
                                                                             \
+  notproduct(bool, CrashGCForDumpingJavaThread, false,                      \
+          "Manually make GC thread crash then dump java stack trace;  "     \
+          "Test only")                                                      \
                                                                             \
   /* compilation */                                                         \
   product(bool, UseCompiler, true,                                          \
--- a/src/share/vm/runtime/os.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/runtime/os.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -60,24 +60,26 @@
 typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
 
 class os: AllStatic {
- private:
+ public:
   enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
 
+ private:
   static OSThread*          _starting_thread;
   static address            _polling_page;
   static volatile int32_t * _mem_serialize_page;
   static uintptr_t          _serialize_page_mask;
+ public:
   static size_t             _page_sizes[page_sizes_max];
 
+ private:
   static void init_page_sizes(size_t default_page_size) {
     _page_sizes[0] = default_page_size;
     _page_sizes[1] = 0; // sentinel
   }
 
  public:
-
-  static void init(void);                       // Called before command line parsing
-  static jint init_2(void);                    // Called after command line parsing
+  static void init(void);   // Called before command line parsing
+  static jint init_2(void); // Called after command line parsing
 
   // File names are case-insensitive on windows only
   // Override me as needed
@@ -141,6 +143,7 @@
   static int processor_count() {
     return _processor_count;
   }
+  static void set_processor_count(int count) { _processor_count = count; }
 
   // Returns the number of CPUs this process is currently allowed to run on.
   // Note that on some OSes this can change dynamically.
--- a/src/share/vm/runtime/thread.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/runtime/thread.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -884,6 +884,22 @@
                                          vmSymbolHandles::void_method_signature(), CHECK);
 }
 
+#ifdef KERNEL
+static void set_jkernel_boot_classloader_hook(TRAPS) {
+  klassOop k = SystemDictionary::sun_jkernel_DownloadManager_klass();
+  instanceKlassHandle klass (THREAD, k);
+
+  if (k == NULL) {
+    // sun.jkernel.DownloadManager may not present in the JDK; just return
+    return;
+  }
+
+  JavaValue result(T_VOID);
+  JavaCalls::call_static(&result, klass, vmSymbolHandles::setBootClassLoaderHook_name(),
+                                         vmSymbolHandles::void_method_signature(), CHECK);
+}
+#endif // KERNEL
+
 static void reset_vm_info_property(TRAPS) {
   // the vm info string
   ResourceMark rm(THREAD);
@@ -975,6 +991,7 @@
 // uniquely named instances should derive from this.
 NamedThread::NamedThread() : Thread() {
   _name = NULL;
+  _processed_thread = NULL;
 }
 
 NamedThread::~NamedThread() {
@@ -2317,6 +2334,27 @@
   frames_do(frame_gc_prologue);
 }
 
+// If the caller is a NamedThread, then remember, in the current scope,
+// the given JavaThread in its _processed_thread field.
+class RememberProcessedThread: public StackObj {
+  NamedThread* _cur_thr;
+public:
+  RememberProcessedThread(JavaThread* jthr) {
+    Thread* thread = Thread::current();
+    if (thread->is_Named_thread()) {
+      _cur_thr = (NamedThread *)thread;
+      _cur_thr->set_processed_thread(jthr);
+    } else {
+      _cur_thr = NULL;
+    }
+  }
+
+  ~RememberProcessedThread() {
+    if (_cur_thr) {
+      _cur_thr->set_processed_thread(NULL);
+    }
+  }
+};
 
 void JavaThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
   // Flush deferred store-barriers, if any, associated with
@@ -2333,6 +2371,8 @@
           (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
 
   if (has_last_Java_frame()) {
+    // Record JavaThread to GC thread
+    RememberProcessedThread rpt(this);
 
     // Traverse the privileged stack
     if (_privileged_stack_top != NULL) {
@@ -3108,6 +3148,12 @@
     vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
   }
 
+#ifdef KERNEL
+  if (JDK_Version::is_gte_jdk17x_version()) {
+    set_jkernel_boot_classloader_hook(THREAD);
+  }
+#endif // KERNEL
+
 #ifndef SERIALGC
   // Support for ConcurrentMarkSweep. This should be cleaned up
   // and better encapsulated. The ugly nested if test would go away
--- a/src/share/vm/runtime/thread.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/runtime/thread.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -48,7 +48,12 @@
 
 // Class hierarchy
 // - Thread
-//   - VMThread
+//   - NamedThread
+//     - VMThread
+//     - ConcurrentGCThread
+//     - WorkerThread
+//       - GangWorker
+//       - GCTaskThread
 //   - JavaThread
 //   - WatcherThread
 
@@ -249,6 +254,7 @@
   virtual bool is_GC_task_thread() const             { return false; }
   virtual bool is_Watcher_thread() const             { return false; }
   virtual bool is_ConcurrentGC_thread() const        { return false; }
+  virtual bool is_Named_thread() const               { return false; }
 
   virtual char* name() const { return (char*)"Unknown thread"; }
 
@@ -568,12 +574,18 @@
   };
  private:
   char* _name;
+  // log JavaThread being processed by oops_do
+  JavaThread* _processed_thread;
+
  public:
   NamedThread();
   ~NamedThread();
   // May only be called once per thread.
   void set_name(const char* format, ...);
+  virtual bool is_Named_thread() const { return true; }
   virtual char* name() const { return _name == NULL ? (char*)"Unknown Thread" : _name; }
+  JavaThread *processed_thread() { return _processed_thread; }
+  void set_processed_thread(JavaThread *thread) { _processed_thread = thread; }
 };
 
 // Worker threads are named and have an id of an assigned work.
--- a/src/share/vm/runtime/vmStructs.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/runtime/vmStructs.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -664,6 +664,7 @@
   nonstatic_field(Thread,                      _current_pending_monitor_is_from_java,         bool)                                  \
   nonstatic_field(Thread,                      _current_waiting_monitor,                      ObjectMonitor*)                        \
   nonstatic_field(NamedThread,                 _name,                                         char*)                                 \
+  nonstatic_field(NamedThread,                 _processed_thread,                             JavaThread*)                           \
   nonstatic_field(JavaThread,                  _next,                                         JavaThread*)                           \
   nonstatic_field(JavaThread,                  _threadObj,                                    oop)                                   \
   nonstatic_field(JavaThread,                  _anchor,                                       JavaFrameAnchor)                       \
--- a/src/share/vm/runtime/vmThread.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/runtime/vmThread.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -204,8 +204,8 @@
 }
 
 
-VMThread::VMThread() : Thread() {
-  // nothing to do
+VMThread::VMThread() : NamedThread() {
+  set_name("VM Thread");
 }
 
 void VMThread::destroy() {
--- a/src/share/vm/runtime/vmThread.hpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/runtime/vmThread.hpp	Wed Jan 06 22:21:39 2010 -0800
@@ -83,7 +83,7 @@
 // like scavenge, garbage_collect etc.
 //
 
-class VMThread: public Thread {
+class VMThread: public NamedThread {
  private:
   static ThreadPriority _current_priority;
 
@@ -101,8 +101,6 @@
   bool is_VM_thread() const                      { return true; }
   bool is_GC_thread() const                      { return true; }
 
-  char* name() const { return (char*)"VM Thread"; }
-
   // The ever running loop for the VMThread
   void loop();
 
--- a/src/share/vm/utilities/vmError.cpp	Wed Jan 06 14:25:03 2010 -0800
+++ b/src/share/vm/utilities/vmError.cpp	Wed Jan 06 22:21:39 2010 -0800
@@ -502,6 +502,23 @@
 #endif // ZERO
      }
 
+  STEP(135, "(printing target Java thread stack)" )
+
+     // printing Java thread stack trace if it is involved in GC crash
+     if (_verbose && (_thread->is_Named_thread())) {
+       JavaThread*  jt = ((NamedThread *)_thread)->processed_thread();
+       if (jt != NULL) {
+         st->print_cr("JavaThread " PTR_FORMAT " (nid = " UINTX_FORMAT ") was being processed", jt, jt->osthread()->thread_id());
+         if (jt->has_last_Java_frame()) {
+           st->print_cr("Java frames: (J=compiled Java code, j=interpreted, Vv=VM code)");
+           for(StackFrameStream sfs(jt); !sfs.is_done(); sfs.next()) {
+             sfs.current()->print_on_error(st, buf, sizeof(buf), true);
+             st->cr();
+           }
+         }
+       }
+     }
+
   STEP(140, "(printing VM operation)" )
 
      if (_verbose && _thread && _thread->is_VM_thread()) {