changeset 8030:f989aff6946f

Merge
author zgu
date Fri, 08 Feb 2013 16:56:03 -0800
parents 1ba5b18088a8 (diff) 3f11b37f047c (current diff)
children 927a311d00f9
files
diffstat 160 files changed, 1212 insertions(+), 975 deletions(-) [+]
line wrap: on
line diff
--- a/agent/src/os/linux/LinuxDebuggerLocal.c	Fri Feb 08 13:55:41 2013 -0800
+++ b/agent/src/os/linux/LinuxDebuggerLocal.c	Fri Feb 08 16:56:03 2013 -0800
@@ -280,7 +280,7 @@
   return (err == PS_OK)? array : 0;
 }
 
-#if defined(i386) || defined(ia64) || defined(amd64) || defined(sparc) || defined(sparcv9)
+#if defined(i386) || defined(amd64) || defined(sparc) || defined(sparcv9)
 JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_getThreadIntegerRegisterSet0
   (JNIEnv *env, jobject this_obj, jint lwp_id) {
 
@@ -299,9 +299,6 @@
 #ifdef i386
 #define NPRGREG sun_jvm_hotspot_debugger_x86_X86ThreadContext_NPRGREG
 #endif
-#ifdef ia64
-#define NPRGREG IA64_REG_COUNT
-#endif
 #ifdef amd64
 #define NPRGREG sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext_NPRGREG
 #endif
@@ -336,13 +333,6 @@
 
 #endif /* i386 */
 
-#if ia64
-  regs = (*env)->GetLongArrayElements(env, array, &isCopy);
-  for (i = 0; i < NPRGREG; i++ ) {
-    regs[i] = 0xDEADDEAD;
-  }
-#endif /* ia64 */
-
 #ifdef amd64
 #define REG_INDEX(reg) sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext_##reg
 
--- a/agent/src/os/linux/libproc.h	Fri Feb 08 13:55:41 2013 -0800
+++ b/agent/src/os/linux/libproc.h	Fri Feb 08 16:56:03 2013 -0800
@@ -79,14 +79,6 @@
 
 *************************************************************************************/
 
-#ifdef ia64
-struct user_regs_struct {
-/* copied from user.h which doesn't define this in a struct */
-
-#define IA64_REG_COUNT (EF_SIZE/8+32)   /* integer and fp regs */
-unsigned long   regs[IA64_REG_COUNT];     /* integer and fp regs */
-};
-#endif
 
 #if defined(sparc)  || defined(sparcv9)
 #define user_regs_struct  pt_regs
--- a/agent/src/os/win32/windbg/sawindbg.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/agent/src/os/win32/windbg/sawindbg.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -27,10 +27,7 @@
 
 #include "sun_jvm_hotspot_debugger_windbg_WindbgDebuggerLocal.h"
 
-#ifdef _M_IA64
-  #include "sun_jvm_hotspot_debugger_ia64_IA64ThreadContext.h"
-  #define NPRGREG sun_jvm_hotspot_debugger_ia64_IA64ThreadContext_NPRGREG
-#elif _M_IX86
+#ifdef _M_IX86
   #include "sun_jvm_hotspot_debugger_x86_X86ThreadContext.h"
   #define NPRGREG sun_jvm_hotspot_debugger_x86_X86ThreadContext_NPRGREG
 #elif _M_AMD64
@@ -491,92 +488,7 @@
      memset(&context, 0, sizeof(CONTEXT));
 
 #undef REG_INDEX
-#ifdef _M_IA64
-     #define REG_INDEX(x) sun_jvm_hotspot_debugger_ia64_IA64ThreadContext_##x
-
-     context.ContextFlags = CONTEXT_FULL | CONTEXT_DEBUG;
-     ptrIDebugAdvanced->GetThreadContext(&context, sizeof(CONTEXT));
-
-     ptrRegs[REG_INDEX(GR0)]  = 0; // always 0
-     ptrRegs[REG_INDEX(GR1)]  = context.IntGp;  // r1
-     ptrRegs[REG_INDEX(GR2)]  = context.IntT0;  // r2-r3
-     ptrRegs[REG_INDEX(GR3)]  = context.IntT1;
-     ptrRegs[REG_INDEX(GR4)]  = context.IntS0;  // r4-r7
-     ptrRegs[REG_INDEX(GR5)]  = context.IntS1;
-     ptrRegs[REG_INDEX(GR6)]  = context.IntS2;
-     ptrRegs[REG_INDEX(GR7)]  = context.IntS3;
-     ptrRegs[REG_INDEX(GR8)]  = context.IntV0;  // r8
-     ptrRegs[REG_INDEX(GR9)]  = context.IntT2;  // r9-r11
-     ptrRegs[REG_INDEX(GR10)] = context.IntT3;
-     ptrRegs[REG_INDEX(GR11)] = context.IntT4;
-     ptrRegs[REG_INDEX(GR12)] = context.IntSp;  // r12 stack pointer
-     ptrRegs[REG_INDEX(GR13)] = context.IntTeb; // r13 teb
-     ptrRegs[REG_INDEX(GR14)] = context.IntT5;  // r14-r31
-     ptrRegs[REG_INDEX(GR15)] = context.IntT6;
-     ptrRegs[REG_INDEX(GR16)] = context.IntT7;
-     ptrRegs[REG_INDEX(GR17)] = context.IntT8;
-     ptrRegs[REG_INDEX(GR18)] = context.IntT9;
-     ptrRegs[REG_INDEX(GR19)] = context.IntT10;
-     ptrRegs[REG_INDEX(GR20)] = context.IntT11;
-     ptrRegs[REG_INDEX(GR21)] = context.IntT12;
-     ptrRegs[REG_INDEX(GR22)] = context.IntT13;
-     ptrRegs[REG_INDEX(GR23)] = context.IntT14;
-     ptrRegs[REG_INDEX(GR24)] = context.IntT15;
-     ptrRegs[REG_INDEX(GR25)] = context.IntT16;
-     ptrRegs[REG_INDEX(GR26)] = context.IntT17;
-     ptrRegs[REG_INDEX(GR27)] = context.IntT18;
-     ptrRegs[REG_INDEX(GR28)] = context.IntT19;
-     ptrRegs[REG_INDEX(GR29)] = context.IntT20;
-     ptrRegs[REG_INDEX(GR30)] = context.IntT21;
-     ptrRegs[REG_INDEX(GR31)] = context.IntT22;
-
-     ptrRegs[REG_INDEX(INT_NATS)] = context.IntNats;
-     ptrRegs[REG_INDEX(PREDS)]    = context.Preds;
-
-     ptrRegs[REG_INDEX(BR_RP)] = context.BrRp;
-     ptrRegs[REG_INDEX(BR1)]   = context.BrS0;  // b1-b5
-     ptrRegs[REG_INDEX(BR2)]   = context.BrS1;
-     ptrRegs[REG_INDEX(BR3)]   = context.BrS2;
-     ptrRegs[REG_INDEX(BR4)]   = context.BrS3;
-     ptrRegs[REG_INDEX(BR5)]   = context.BrS4;
-     ptrRegs[REG_INDEX(BR6)]   = context.BrT0;  // b6-b7
-     ptrRegs[REG_INDEX(BR7)]   = context.BrT1;
-
-     ptrRegs[REG_INDEX(AP_UNAT)] = context.ApUNAT;
-     ptrRegs[REG_INDEX(AP_LC)]   = context.ApLC;
-     ptrRegs[REG_INDEX(AP_EC)]   = context.ApEC;
-     ptrRegs[REG_INDEX(AP_CCV)]  = context.ApCCV;
-     ptrRegs[REG_INDEX(AP_DCR)]  = context.ApDCR;
-
-     ptrRegs[REG_INDEX(RS_PFS)]      = context.RsPFS;
-     ptrRegs[REG_INDEX(RS_BSP)]      = context.RsBSP;
-     ptrRegs[REG_INDEX(RS_BSPSTORE)] = context.RsBSPSTORE;
-     ptrRegs[REG_INDEX(RS_RSC)]      = context.RsRSC;
-     ptrRegs[REG_INDEX(RS_RNAT)]     = context.RsRNAT;
-
-     ptrRegs[REG_INDEX(ST_IPSR)] = context.StIPSR;
-     ptrRegs[REG_INDEX(ST_IIP)]  = context.StIIP;
-     ptrRegs[REG_INDEX(ST_IFS)]  = context.StIFS;
-
-     ptrRegs[REG_INDEX(DB_I0)] = context.DbI0;
-     ptrRegs[REG_INDEX(DB_I1)] = context.DbI1;
-     ptrRegs[REG_INDEX(DB_I2)] = context.DbI2;
-     ptrRegs[REG_INDEX(DB_I3)] = context.DbI3;
-     ptrRegs[REG_INDEX(DB_I4)] = context.DbI4;
-     ptrRegs[REG_INDEX(DB_I5)] = context.DbI5;
-     ptrRegs[REG_INDEX(DB_I6)] = context.DbI6;
-     ptrRegs[REG_INDEX(DB_I7)] = context.DbI7;
-
-     ptrRegs[REG_INDEX(DB_D0)] = context.DbD0;
-     ptrRegs[REG_INDEX(DB_D1)] = context.DbD1;
-     ptrRegs[REG_INDEX(DB_D2)] = context.DbD2;
-     ptrRegs[REG_INDEX(DB_D3)] = context.DbD3;
-     ptrRegs[REG_INDEX(DB_D4)] = context.DbD4;
-     ptrRegs[REG_INDEX(DB_D5)] = context.DbD5;
-     ptrRegs[REG_INDEX(DB_D6)] = context.DbD6;
-     ptrRegs[REG_INDEX(DB_D7)] = context.DbD7;
-
-#elif _M_IX86
+#ifdef _M_IX86
      #define REG_INDEX(x) sun_jvm_hotspot_debugger_x86_X86ThreadContext_##x
 
      context.ContextFlags = CONTEXT_FULL | CONTEXT_DEBUG_REGISTERS;
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Fri Feb 08 13:55:41 2013 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Fri Feb 08 16:56:03 2013 -0800
@@ -467,7 +467,7 @@
               liveRegions.add(tlab.start());
               liveRegions.add(tlab.start());
               liveRegions.add(tlab.top());
-              liveRegions.add(tlab.end());
+              liveRegions.add(tlab.hardEnd());
             }
           }
         }
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/ThreadLocalAllocBuffer.java	Fri Feb 08 13:55:41 2013 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/ThreadLocalAllocBuffer.java	Fri Feb 08 16:56:03 2013 -0800
@@ -27,6 +27,7 @@
 import java.io.*;
 import java.util.*;
 import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.oops.*;
 import sun.jvm.hotspot.types.*;
 
 /** <P> ThreadLocalAllocBuffer: a descriptor for thread-local storage
@@ -62,9 +63,22 @@
     super(addr);
   }
 
-  public Address start()                        { return startField.getValue(addr); }
-  public Address end()                          { return   endField.getValue(addr); }
-  public Address top()                          { return   topField.getValue(addr); }
+  public Address start()    { return startField.getValue(addr); }
+  public Address end()      { return   endField.getValue(addr); }
+  public Address top()      { return   topField.getValue(addr); }
+  public Address hardEnd()  { return end().addOffsetTo(alignmentReserve()); }
+
+  private long alignmentReserve() {
+    return Oop.alignObjectSize(endReserve());
+  }
+
+  private long endReserve() {
+    long minFillerArraySize = Array.baseOffsetInBytes(BasicType.T_INT);
+    long reserveForAllocationPrefetch = VM.getVM().getReserveForAllocationPrefetch();
+    long heapWordSize = VM.getVM().getHeapWordSize();
+
+    return Math.max(minFillerArraySize, reserveForAllocationPrefetch * heapWordSize);
+  }
 
   /** Support for iteration over heap -- not sure how this will
       interact with GC in reflective system, but necessary for the
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Fri Feb 08 13:55:41 2013 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Fri Feb 08 16:56:03 2013 -0800
@@ -114,6 +114,7 @@
   private int          invalidOSREntryBCI;
   private ReversePtrs  revPtrs;
   private VMRegImpl    vmregImpl;
+  private int          reserveForAllocationPrefetch;
 
   // System.getProperties from debuggee VM
   private Properties   sysProps;
@@ -293,6 +294,10 @@
        vmRelease = CStringUtilities.getString(releaseAddr);
        Address vmInternalInfoAddr = vmVersion.getAddressField("_s_internal_vm_info_string").getValue();
        vmInternalInfo = CStringUtilities.getString(vmInternalInfoAddr);
+
+       CIntegerType intType = (CIntegerType) db.lookupType("int");
+       CIntegerField reserveForAllocationPrefetchField = vmVersion.getCIntegerField("_reserve_for_allocation_prefetch");
+       reserveForAllocationPrefetch = (int)reserveForAllocationPrefetchField.getCInteger(intType);
     } catch (Exception exp) {
        throw new RuntimeException("can't determine target's VM version : " + exp.getMessage());
     }
@@ -778,6 +783,10 @@
     return vmInternalInfo;
   }
 
+  public int getReserveForAllocationPrefetch() {
+    return reserveForAllocationPrefetch;
+  }
+
   public boolean isSharingEnabled() {
     if (sharingEnabled == null) {
       Flag flag = getCommandLineFlag("UseSharedSpaces");
--- a/make/bsd/makefiles/minimal1.make	Fri Feb 08 13:55:41 2013 -0800
+++ b/make/bsd/makefiles/minimal1.make	Fri Feb 08 16:56:03 2013 -0800
@@ -30,7 +30,7 @@
 INCLUDE_JNI_CHECK ?= false
 INCLUDE_SERVICES ?= false
 INCLUDE_MANAGEMENT ?= false
-INCLUDE_ALTERNATE_GCS ?= false
+INCLUDE_ALL_GCS ?= false
 INCLUDE_NMT ?= false
 INCLUDE_CDS ?= false
 
--- a/make/excludeSrc.make	Fri Feb 08 13:55:41 2013 -0800
+++ b/make/excludeSrc.make	Fri Feb 08 16:56:03 2013 -0800
@@ -72,12 +72,10 @@
       Src_Files_EXCLUDE += metaspaceShared.cpp
 endif
 
-ifeq ($(INCLUDE_ALTERNATE_GCS), false)
-      CXXFLAGS += -DINCLUDE_ALTERNATE_GCS=0
-      CFLAGS += -DINCLUDE_ALTERNATE_GCS=0
+ifeq ($(INCLUDE_ALL_GCS), false)
+      CXXFLAGS += -DINCLUDE_ALL_GCS=0
+      CFLAGS += -DINCLUDE_ALL_GCS=0
 
-      CXXFLAGS += -DSERIALGC
-      CFLAGS += -DSERIALGC
       Src_Files_EXCLUDE += \
 	cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
 	cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp cmsPermGen.cpp compactibleFreeListSpace.cpp \
--- a/make/linux/makefiles/minimal1.make	Fri Feb 08 13:55:41 2013 -0800
+++ b/make/linux/makefiles/minimal1.make	Fri Feb 08 16:56:03 2013 -0800
@@ -30,7 +30,7 @@
 INCLUDE_JNI_CHECK ?= false
 INCLUDE_SERVICES ?= false
 INCLUDE_MANAGEMENT ?= false
-INCLUDE_ALTERNATE_GCS ?= false
+INCLUDE_ALL_GCS ?= false
 INCLUDE_NMT ?= false
 INCLUDE_CDS ?= false
 
--- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -30,10 +30,11 @@
 #include "c1/c1_Runtime1.hpp"
 #include "nativeInst_sparc.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "utilities/macros.hpp"
 #include "vmreg_sparc.inline.hpp"
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 #define __ ce->masm()->
 
@@ -420,7 +421,7 @@
 
 
 ///////////////////////////////////////////////////////////////////////////////////
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 
 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
   // At this point we know that marking is in progress.
@@ -483,7 +484,7 @@
   __ delayed()->nop();
 }
 
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 ///////////////////////////////////////////////////////////////////////////////////
 
 #undef __
--- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -35,6 +35,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/vframeArray.hpp"
+#include "utilities/macros.hpp"
 #include "vmreg_sparc.inline.hpp"
 
 // Implementation of StubAssembler
@@ -822,7 +823,7 @@
       }
       break;
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     case g1_pre_barrier_slow_id:
       { // G4: previous value of memory
         BarrierSet* bs = Universe::heap()->barrier_set();
@@ -984,7 +985,7 @@
         __ delayed()->restore();
       }
       break;
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
     default:
       { __ set_info("unimplemented entry", dont_gc_arguments);
--- a/src/cpu/sparc/vm/c2_globals_sparc.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -42,7 +42,7 @@
 #else
 define_pd_global(bool, ProfileInterpreter,           true);
 #endif // CC_INTERP
-define_pd_global(bool, TieredCompilation,            trueInTiered);
+define_pd_global(bool, TieredCompilation,            false);
 define_pd_global(intx, CompileThreshold,             10000);
 define_pd_global(intx, BackEdgeThreshold,            140000);
 
--- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -45,6 +45,7 @@
 #include "runtime/timer.hpp"
 #include "runtime/vframeArray.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
 #ifdef SHARK
 #include "shark/shark_globals.hpp"
 #endif
@@ -551,7 +552,7 @@
 }
 
 address InterpreterGenerator::generate_Reference_get_entry(void) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   if (UseG1GC) {
     // We need to generate have a routine that generates code to:
     //   * load the value in the referent field
@@ -563,7 +564,7 @@
     // field as live.
     Unimplemented();
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // If G1 is not enabled then attempt to go through the accessor entry point
   // Reference.get is an accessor
--- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -36,11 +36,12 @@
 #include "runtime/os.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/heapRegion.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 #ifdef PRODUCT
 #define BLOCK_COMMENT(str) /* nothing */
@@ -3867,7 +3868,7 @@
 }
 
 ///////////////////////////////////////////////////////////////////////////////////
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 
 static address satb_log_enqueue_with_frame = NULL;
 static u_char* satb_log_enqueue_with_frame_end = NULL;
@@ -4231,7 +4232,7 @@
   bind(filtered);
 }
 
-#endif  // SERIALGC
+#endif // INCLUDE_ALL_GCS
 ///////////////////////////////////////////////////////////////////////////////////
 
 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
--- a/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -26,6 +26,7 @@
 #define CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP
 
 #include "asm/assembler.hpp"
+#include "utilities/macros.hpp"
 
 // <sys/trap.h> promises that the system will not use traps 16-31
 #define ST_RESERVED_FOR_USER_0 0x10
@@ -1181,13 +1182,13 @@
 
   void card_write_barrier_post(Register store_addr, Register new_val, Register tmp);
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // General G1 pre-barrier generator.
   void g1_write_barrier_pre(Register obj, Register index, int offset, Register pre_val, Register tmp, bool preserve_o_regs);
 
   // General G1 post-barrier generator
   void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp);
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
   void push_fTOS();
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -44,6 +44,7 @@
 #include "runtime/timer.hpp"
 #include "runtime/vframeArray.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
 
 #ifndef CC_INTERP
 #ifndef FAST_DISPATCH
@@ -734,7 +735,7 @@
 
 // Method entry for java.lang.ref.Reference.get.
 address InterpreterGenerator::generate_Reference_get_entry(void) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // Code: _aload_0, _getfield, _areturn
   // parameter size = 1
   //
@@ -805,7 +806,7 @@
     (void) generate_normal_entry(false);
     return entry;
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // If G1 is not enabled then attempt to go through the accessor entry point
   // Reference.get is an accessor
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -34,6 +34,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/synchronizer.hpp"
+#include "utilities/macros.hpp"
 
 #ifndef CC_INTERP
 #define __ _masm->
@@ -53,7 +54,7 @@
   assert(tmp != val && tmp != base && tmp != index, "register collision");
   assert(index == noreg || offset == 0, "only one offset");
   switch (barrier) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     case BarrierSet::G1SATBCT:
     case BarrierSet::G1SATBCTLogging:
       {
@@ -82,7 +83,7 @@
         }
       }
       break;
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
     case BarrierSet::CardTableModRef:
     case BarrierSet::CardTableExtension:
       {
--- a/src/cpu/x86/vm/assembler_x86.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -36,11 +36,12 @@
 #include "runtime/os.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/heapRegion.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 #ifdef PRODUCT
 #define BLOCK_COMMENT(str) /* nothing */
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -30,10 +30,11 @@
 #include "c1/c1_Runtime1.hpp"
 #include "nativeInst_x86.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "utilities/macros.hpp"
 #include "vmreg_x86.inline.hpp"
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 
 #define __ ce->masm()->
@@ -482,7 +483,7 @@
 }
 
 /////////////////////////////////////////////////////////////////////////////
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 
 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
   // At this point we know that marking is in progress.
@@ -528,7 +529,7 @@
   __ jmp(_continuation);
 }
 
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 /////////////////////////////////////////////////////////////////////////////
 
 #undef __
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -36,6 +36,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/vframeArray.hpp"
+#include "utilities/macros.hpp"
 #include "vmreg_x86.inline.hpp"
 
 
@@ -1607,7 +1608,7 @@
       }
       break;
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     case g1_pre_barrier_slow_id:
       {
         StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
@@ -1804,7 +1805,7 @@
 
       }
       break;
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
     default:
       { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
--- a/src/cpu/x86/vm/c2_globals_x86.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/x86/vm/c2_globals_x86.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -44,7 +44,7 @@
 #else
 define_pd_global(bool, ProfileInterpreter,           true);
 #endif // CC_INTERP
-define_pd_global(bool, TieredCompilation,            trueInTiered);
+define_pd_global(bool, TieredCompilation,            false);
 define_pd_global(intx, CompileThreshold,             10000);
 define_pd_global(intx, BackEdgeThreshold,            100000);
 
--- a/src/cpu/x86/vm/cppInterpreter_x86.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -45,6 +45,7 @@
 #include "runtime/timer.hpp"
 #include "runtime/vframeArray.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
 #ifdef SHARK
 #include "shark/shark_globals.hpp"
 #endif
@@ -938,7 +939,7 @@
 }
 
 address InterpreterGenerator::generate_Reference_get_entry(void) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   if (UseG1GC) {
     // We need to generate have a routine that generates code to:
     //   * load the value in the referent field
@@ -950,7 +951,7 @@
     // field as live.
     Unimplemented();
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // If G1 is not enabled then attempt to go through the accessor entry point
   // Reference.get is an accessor
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -37,11 +37,12 @@
 #include "runtime/os.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/heapRegion.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 #ifdef PRODUCT
 #define BLOCK_COMMENT(str) /* nothing */
@@ -3207,7 +3208,7 @@
 
 
 //////////////////////////////////////////////////////////////////////////////////
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 
 void MacroAssembler::g1_write_barrier_pre(Register obj,
                                           Register pre_val,
@@ -3417,7 +3418,7 @@
   bind(done);
 }
 
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 //////////////////////////////////////////////////////////////////////////////////
 
 
--- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -26,6 +26,7 @@
 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP
 
 #include "asm/assembler.hpp"
+#include "utilities/macros.hpp"
 
 
 // MacroAssembler extends Assembler by frequently used macros.
@@ -294,7 +295,7 @@
   void store_check(Register obj);                // store check for obj - register is destroyed afterwards
   void store_check(Register obj, Address dst);   // same as above, dst is exact store location (reg. is destroyed)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 
   void g1_write_barrier_pre(Register obj,
                             Register pre_val,
@@ -309,7 +310,7 @@
                              Register tmp,
                              Register tmp2);
 
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // split store_check(Register obj) to enhance instruction interleaving
   void store_check_part_1(Register obj);
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -44,6 +44,7 @@
 #include "runtime/timer.hpp"
 #include "runtime/vframeArray.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
 
 #define __ _masm->
 
@@ -761,7 +762,7 @@
 
 // Method entry for java.lang.ref.Reference.get.
 address InterpreterGenerator::generate_Reference_get_entry(void) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // Code: _aload_0, _getfield, _areturn
   // parameter size = 1
   //
@@ -844,7 +845,7 @@
 
     return entry;
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // If G1 is not enabled then attempt to go through the accessor entry point
   // Reference.get is an accessor
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -44,6 +44,7 @@
 #include "runtime/timer.hpp"
 #include "runtime/vframeArray.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
 
 #define __ _masm->
 
@@ -742,7 +743,7 @@
 
 // Method entry for java.lang.ref.Reference.get.
 address InterpreterGenerator::generate_Reference_get_entry(void) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // Code: _aload_0, _getfield, _areturn
   // parameter size = 1
   //
@@ -821,7 +822,7 @@
 
     return entry;
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // If G1 is not enabled then attempt to go through the accessor entry point
   // Reference.get is an accessor
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -35,6 +35,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/synchronizer.hpp"
+#include "utilities/macros.hpp"
 
 #ifndef CC_INTERP
 #define __ _masm->
@@ -125,7 +126,7 @@
                          bool precise) {
   assert(val == noreg || val == rax, "parameter is just for looks");
   switch (barrier) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     case BarrierSet::G1SATBCT:
     case BarrierSet::G1SATBCTLogging:
       {
@@ -164,7 +165,7 @@
 
       }
       break;
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
     case BarrierSet::CardTableModRef:
     case BarrierSet::CardTableExtension:
       {
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -35,6 +35,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/synchronizer.hpp"
+#include "utilities/macros.hpp"
 
 #ifndef CC_INTERP
 
@@ -136,7 +137,7 @@
                          bool precise) {
   assert(val == noreg || val == rax, "parameter is just for looks");
   switch (barrier) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     case BarrierSet::G1SATBCT:
     case BarrierSet::G1SATBCTLogging:
       {
@@ -167,7 +168,7 @@
 
       }
       break;
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
     case BarrierSet::CardTableModRef:
     case BarrierSet::CardTableExtension:
       {
--- a/src/cpu/zero/vm/assembler_zero.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/zero/vm/assembler_zero.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -36,11 +36,12 @@
 #include "runtime/os.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/heapRegion.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 int AbstractAssembler::code_fill_byte() {
   return 0;
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -47,6 +47,7 @@
 #include "runtime/vframeArray.hpp"
 #include "stack_zero.inline.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
 #ifdef SHARK
 #include "shark/shark_globals.hpp"
 #endif
@@ -791,7 +792,7 @@
 }
 
 address InterpreterGenerator::generate_Reference_get_entry(void) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   if (UseG1GC) {
     // We need to generate have a routine that generates code to:
     //   * load the value in the referent field
@@ -803,7 +804,7 @@
     // field as live.
     Unimplemented();
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // If G1 is not enabled then attempt to go through the accessor entry point
   // Reference.get is an accessor
--- a/src/os/linux/vm/os_linux.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/os/linux/vm/os_linux.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -1155,13 +1155,9 @@
   //   for initial thread if its stack size exceeds 6M. Cap it at 2M,
   //   in case other parts in glibc still assumes 2M max stack size.
   // FIXME: alt signal stack is gone, maybe we can relax this constraint?
-#ifndef IA64
-  if (stack_size > 2 * K * K) stack_size = 2 * K * K;
-#else
   // Problem still exists RH7.2 (IA64 anyway) but 2MB is a little small
-  if (stack_size > 4 * K * K) stack_size = 4 * K * K;
-#endif
-
+  if (stack_size > 2 * K * K IA64_ONLY(*2))
+      stack_size = 2 * K * K IA64_ONLY(*2);
   // Try to figure out where the stack base (top) is. This is harder.
   //
   // When an application is started, glibc saves the initial stack pointer in
@@ -4367,16 +4363,12 @@
    if (is_NPTL()) {
       return pthread_cond_timedwait(_cond, _mutex, _abstime);
    } else {
-#ifndef IA64
       // 6292965: LinuxThreads pthread_cond_timedwait() resets FPU control
       // word back to default 64bit precision if condvar is signaled. Java
       // wants 53bit precision.  Save and restore current value.
       int fpu = get_fpu_control_word();
-#endif // IA64
       int status = pthread_cond_timedwait(_cond, _mutex, _abstime);
-#ifndef IA64
       set_fpu_control_word(fpu);
-#endif // IA64
       return status;
    }
 }
--- a/src/os/windows/vm/os_windows.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/os/windows/vm/os_windows.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -349,6 +349,33 @@
 
 #ifdef _M_IA64
   // IA64 has memory and register stacks
+  //
+  // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit
+  // at thread creation (1MB backing store growing upwards, 1MB memory stack
+  // growing downwards, 2MB summed up)
+  //
+  // ...
+  // ------- top of stack (high address) -----
+  // |
+  // |      1MB
+  // |      Backing Store (Register Stack)
+  // |
+  // |         / \
+  // |          |
+  // |          |
+  // |          |
+  // ------------------------ stack base -----
+  // |      1MB
+  // |      Memory Stack
+  // |
+  // |          |
+  // |          |
+  // |          |
+  // |         \ /
+  // |
+  // ----- bottom of stack (low address) -----
+  // ...
+
   stack_size = stack_size / 2;
 #endif
   return stack_bottom + stack_size;
@@ -2005,17 +2032,34 @@
   JavaThread* thread = JavaThread::current();
   // Save pc in thread
 #ifdef _M_IA64
-  thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->StIIP);
+  // Do not blow up if no thread info available.
+  if (thread) {
+    // Saving PRECISE pc (with slot information) in thread.
+    uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress;
+    // Convert precise PC into "Unix" format
+    precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2);
+    thread->set_saved_exception_pc((address)precise_pc);
+  }
   // Set pc to handler
   exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
+  // Clear out psr.ri (= Restart Instruction) in order to continue
+  // at the beginning of the target bundle.
+  exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
+  assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
 #elif _M_AMD64
-  thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Rip);
+  // Do not blow up if no thread info available.
+  if (thread) {
+    thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
+  }
   // Set pc to handler
   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
 #else
-  thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Eip);
+  // Do not blow up if no thread info available.
+  if (thread) {
+    thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
+  }
   // Set pc to handler
-  exceptionInfo->ContextRecord->Eip = (LONG)handler;
+  exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
 #endif
 
   // Continue the execution
@@ -2040,6 +2084,14 @@
 // included or copied here.
 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
 
+// Handle NAT Bit consumption on IA64.
+#ifdef _M_IA64
+#define EXCEPTION_REG_NAT_CONSUMPTION    STATUS_REG_NAT_CONSUMPTION
+#endif
+
+// Windows Vista/2008 heap corruption check
+#define EXCEPTION_HEAP_CORRUPTION        0xC0000374
+
 #define def_excpt(val) #val, val
 
 struct siglabel {
@@ -2082,6 +2134,10 @@
     def_excpt(EXCEPTION_GUARD_PAGE),
     def_excpt(EXCEPTION_INVALID_HANDLE),
     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
+    def_excpt(EXCEPTION_HEAP_CORRUPTION),
+#ifdef _M_IA64
+    def_excpt(EXCEPTION_REG_NAT_CONSUMPTION),
+#endif
     NULL, 0
 };
 
@@ -2206,7 +2262,14 @@
   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
 #ifdef _M_IA64
-  address pc = (address) exceptionInfo->ContextRecord->StIIP;
+  // On Itanium, we need the "precise pc", which has the slot number coded
+  // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format).
+  address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress;
+  // Convert the pc to "Unix format", which has the slot number coded
+  // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2
+  // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
+  // information is saved in the Unix format.
+  address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
 #elif _M_AMD64
   address pc = (address) exceptionInfo->ContextRecord->Rip;
 #else
@@ -2321,29 +2384,40 @@
     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
       if (os::uses_stack_guard_pages()) {
 #ifdef _M_IA64
-        //
-        // If it's a legal stack address continue, Windows will map it in.
-        //
+        // Use guard page for register stack.
         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
         address addr = (address) exceptionRecord->ExceptionInformation[1];
-        if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() )
-          return EXCEPTION_CONTINUE_EXECUTION;
-
-        // The register save area is the same size as the memory stack
-        // and starts at the page just above the start of the memory stack.
-        // If we get a fault in this area, we've run out of register
-        // stack.  If we are in java, try throwing a stack overflow exception.
-        if (addr > thread->stack_base() &&
-                      addr <= (thread->stack_base()+thread->stack_size()) ) {
-          char buf[256];
-          jio_snprintf(buf, sizeof(buf),
-                       "Register stack overflow, addr:%p, stack_base:%p\n",
-                       addr, thread->stack_base() );
-          tty->print_raw_cr(buf);
-          // If not in java code, return and hope for the best.
-          return in_java ? Handle_Exception(exceptionInfo,
-            SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
-            :  EXCEPTION_CONTINUE_EXECUTION;
+        // Check for a register stack overflow on Itanium
+        if (thread->addr_inside_register_stack_red_zone(addr)) {
+          // Fatal red zone violation happens if the Java program
+          // catches a StackOverflow error and does so much processing
+          // that it runs beyond the unprotected yellow guard zone. As
+          // a result, we are out of here.
+          fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit.");
+        } else if(thread->addr_inside_register_stack(addr)) {
+          // Disable the yellow zone which sets the state that
+          // we've got a stack overflow problem.
+          if (thread->stack_yellow_zone_enabled()) {
+            thread->disable_stack_yellow_zone();
+          }
+          // Give us some room to process the exception.
+          thread->disable_register_stack_guard();
+          // Tracing with +Verbose.
+          if (Verbose) {
+            tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc);
+            tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr);
+            tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base());
+            tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]",
+                          thread->register_stack_base(),
+                          thread->register_stack_base() + thread->stack_size());
+          }
+
+          // Reguard the permanent register stack red zone just to be sure.
+          // We saw Windows silently disabling this without telling us.
+          thread->enable_register_stack_red_zone();
+
+          return Handle_Exception(exceptionInfo,
+            SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
         }
 #endif
         if (thread->stack_yellow_zone_enabled()) {
@@ -2418,50 +2492,33 @@
           {
             // Null pointer exception.
 #ifdef _M_IA64
-            // We catch register stack overflows in compiled code by doing
-            // an explicit compare and executing a st8(G0, G0) if the
-            // BSP enters into our guard area.  We test for the overflow
-            // condition and fall into the normal null pointer exception
-            // code if BSP hasn't overflowed.
-            if ( in_java ) {
-              if(thread->register_stack_overflow()) {
-                assert((address)exceptionInfo->ContextRecord->IntS3 ==
-                                thread->register_stack_limit(),
-                               "GR7 doesn't contain register_stack_limit");
-                // Disable the yellow zone which sets the state that
-                // we've got a stack overflow problem.
-                if (thread->stack_yellow_zone_enabled()) {
-                  thread->disable_stack_yellow_zone();
+            // Process implicit null checks in compiled code. Note: Implicit null checks
+            // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs.
+            if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) {
+              CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format);
+              // Handle implicit null check in UEP method entry
+              if (cb && (cb->is_frame_complete_at(pc) ||
+                         (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) {
+                if (Verbose) {
+                  intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0);
+                  tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format);
+                  tty->print_cr("      to addr " INTPTR_FORMAT, addr);
+                  tty->print_cr("      bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)",
+                                *(bundle_start + 1), *bundle_start);
                 }
-                // Give us some room to process the exception
-                thread->disable_register_stack_guard();
-                // Update GR7 with the new limit so we can continue running
-                // compiled code.
-                exceptionInfo->ContextRecord->IntS3 =
-                               (ULONGLONG)thread->register_stack_limit();
                 return Handle_Exception(exceptionInfo,
-                       SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
-              } else {
-                //
-                // Check for implicit null
-                // We only expect null pointers in the stubs (vtable)
-                // the rest are checked explicitly now.
-                //
-                if (((uintptr_t)addr) < os::vm_page_size() ) {
-                  // an access to the first page of VM--assume it is a null pointer
-                  address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
-                  if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
-                }
+                  SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
               }
-            } // in_java
-
-            // IA64 doesn't use implicit null checking yet. So we shouldn't
-            // get here.
-            tty->print_raw_cr("Access violation, possible null pointer exception");
+            }
+
+            // Implicit null checks were processed above.  Hence, we should not reach
+            // here in the usual case => die!
+            if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception");
             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
                          exceptionInfo->ContextRecord);
             return EXCEPTION_CONTINUE_SEARCH;
-#else /* !IA64 */
+
+#else // !IA64
 
             // Windows 98 reports faulting addresses incorrectly
             if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) ||
@@ -2493,7 +2550,24 @@
       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
                    exceptionInfo->ContextRecord);
       return EXCEPTION_CONTINUE_SEARCH;
-    }
+    } // /EXCEPTION_ACCESS_VIOLATION
+    // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+#if defined _M_IA64
+    else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
+              exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
+      M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0);
+
+      // Compiled method patched to be non entrant? Following conditions must apply:
+      // 1. must be first instruction in bundle
+      // 2. must be a break instruction with appropriate code
+      if((((uint64_t) pc & 0x0F) == 0) &&
+         (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
+        return Handle_Exception(exceptionInfo,
+                                (address)SharedRuntime::get_handle_wrong_method_stub());
+      }
+    } // /EXCEPTION_ILLEGAL_INSTRUCTION
+#endif
+
 
     if (in_java) {
       switch (exception_code) {
--- a/src/share/vm/adlc/adlparse.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/adlc/adlparse.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -168,7 +168,7 @@
   // Check for block delimiter
   if ( (_curchar != '%')
        || ( next_char(),  (_curchar != '{')) ) {
-    parse_err(SYNERR, "missing '%{' in instruction definition\n");
+    parse_err(SYNERR, "missing '%%{' in instruction definition\n");
     return;
   }
   next_char();                     // Maintain the invariant
@@ -253,7 +253,7 @@
   } while(_curchar != '%');
   next_char();
   if (_curchar != '}') {
-    parse_err(SYNERR, "missing '%}' in instruction definition\n");
+    parse_err(SYNERR, "missing '%%}' in instruction definition\n");
     return;
   }
   // Check for "Set" form of chain rule
@@ -423,7 +423,7 @@
   skipws();
   // Check for block delimiter
   if ((_curchar != '%') || (*(_ptr+1) != '{')) { // If not open block
-    parse_err(SYNERR, "missing '%c{' in operand definition\n","%");
+    parse_err(SYNERR, "missing '%%{' in operand definition\n");
     return;
   }
   next_char(); next_char();        // Skip over "%{" symbol
@@ -483,7 +483,7 @@
   } while(_curchar != '%');
   next_char();
   if (_curchar != '}') {
-    parse_err(SYNERR, "missing '%}' in operand definition\n");
+    parse_err(SYNERR, "missing '%%}' in operand definition\n");
     return;
   }
   // Add operand to tail of operand list
@@ -1324,7 +1324,7 @@
   // Check for block delimiter
   if ( (_curchar != '%')
        || ( next_char(),  (_curchar != '{')) ) {
-    parse_err(SYNERR, "missing '%{' in pipeline definition\n");
+    parse_err(SYNERR, "missing '%%{' in pipeline definition\n");
     return;
   }
   next_char();                     // Maintain the invariant
@@ -1341,7 +1341,7 @@
       skipws();
       if ( (_curchar != '%')
            || ( next_char(),  (_curchar != '{')) ) {
-        parse_err(SYNERR, "expected '%{'\n");
+        parse_err(SYNERR, "expected '%%{'\n");
         return;
       }
       next_char(); skipws();
@@ -1397,7 +1397,7 @@
       skipws();
       if ( (_curchar != '%')
            || ( next_char(),  (_curchar != '{')) ) {
-        parse_err(SYNERR, "expected '%{'\n");
+        parse_err(SYNERR, "expected '%%{'\n");
         return;
       }
       next_char(); skipws();
@@ -1586,7 +1586,7 @@
 
       if ( (_curchar != '%')
            || ( next_char(),  (_curchar != '}')) ) {
-        parse_err(SYNERR, "expected '%}', found \"%c\"\n", _curchar);
+        parse_err(SYNERR, "expected '%%}', found \"%c\"\n", _curchar);
       }
       next_char(); skipws();
 
@@ -1612,7 +1612,7 @@
 
   next_char();
   if (_curchar != '}') {
-    parse_err(SYNERR, "missing \"%}\" in pipeline definition\n");
+    parse_err(SYNERR, "missing \"%%}\" in pipeline definition\n");
     return;
   }
 
@@ -1775,7 +1775,7 @@
   // Check for block delimiter
   if ( (_curchar != '%')
        || ( next_char(),  (_curchar != '{')) ) {
-    parse_err(SYNERR, "missing \"%{\" in pipe_class definition\n");
+    parse_err(SYNERR, "missing \"%%{\" in pipe_class definition\n");
     return;
   }
   next_char();
@@ -2062,7 +2062,7 @@
 
   next_char();
   if (_curchar != '}') {
-    parse_err(SYNERR, "missing \"%}\" in pipe_class definition\n");
+    parse_err(SYNERR, "missing \"%%}\" in pipe_class definition\n");
     return;
   }
 
@@ -3341,12 +3341,12 @@
   char *disp        = NULL;
 
   if (_curchar != '%') {
-    parse_err(SYNERR, "Missing '%{' for 'interface' block.\n");
+    parse_err(SYNERR, "Missing '%%{' for 'interface' block.\n");
     return NULL;
   }
   next_char();                  // Skip '%'
   if (_curchar != '{') {
-    parse_err(SYNERR, "Missing '%{' for 'interface' block.\n");
+    parse_err(SYNERR, "Missing '%%{' for 'interface' block.\n");
     return NULL;
   }
   next_char();                  // Skip '{'
@@ -3354,7 +3354,7 @@
   do {
     char *field = get_ident();
     if (field == NULL) {
-      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%}' ending interface.\n");
+      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%%}' ending interface.\n");
       return NULL;
     }
     if ( strcmp(field,"base") == 0 ) {
@@ -3370,13 +3370,13 @@
       disp  = interface_field_parse();
     }
     else {
-      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%}' ending interface.\n");
+      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%%}' ending interface.\n");
       return NULL;
     }
   } while( _curchar != '%' );
   next_char();                  // Skip '%'
   if ( _curchar != '}' ) {
-    parse_err(SYNERR, "Missing '%}' for 'interface' block.\n");
+    parse_err(SYNERR, "Missing '%%}' for 'interface' block.\n");
     return NULL;
   }
   next_char();                  // Skip '}'
@@ -3403,12 +3403,12 @@
   const char *greater_format = "gt";
 
   if (_curchar != '%') {
-    parse_err(SYNERR, "Missing '%{' for 'cond_interface' block.\n");
+    parse_err(SYNERR, "Missing '%%{' for 'cond_interface' block.\n");
     return NULL;
   }
   next_char();                  // Skip '%'
   if (_curchar != '{') {
-    parse_err(SYNERR, "Missing '%{' for 'cond_interface' block.\n");
+    parse_err(SYNERR, "Missing '%%{' for 'cond_interface' block.\n");
     return NULL;
   }
   next_char();                  // Skip '{'
@@ -3416,7 +3416,7 @@
   do {
     char *field = get_ident();
     if (field == NULL) {
-      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%}' ending interface.\n");
+      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%%}' ending interface.\n");
       return NULL;
     }
     if ( strcmp(field,"equal") == 0 ) {
@@ -3438,13 +3438,13 @@
       greater = interface_field_parse(&greater_format);
     }
     else {
-      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%}' ending interface.\n");
+      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%%}' ending interface.\n");
       return NULL;
     }
   } while( _curchar != '%' );
   next_char();                  // Skip '%'
   if ( _curchar != '}' ) {
-    parse_err(SYNERR, "Missing '%}' for 'interface' block.\n");
+    parse_err(SYNERR, "Missing '%%}' for 'interface' block.\n");
     return NULL;
   }
   next_char();                  // Skip '}'
@@ -3543,7 +3543,7 @@
   }
   else if ((cnstr = find_cpp_block("match constructor")) == NULL ) {
     parse_err(SYNERR, "invalid construction of match rule\n"
-              "Missing ';' or invalid '%{' and '%}' constructor\n");
+              "Missing ';' or invalid '%%{' and '%%}' constructor\n");
     return NULL;                  // No MatchRule to return
   }
   if (_AD._adl_debug > 1)
@@ -3646,7 +3646,7 @@
       // Check for closing '"' and '%}' in format description
       skipws();                   // Move to closing '%}'
       if ( _curchar != '%' ) {
-        parse_err(SYNERR, "non-blank characters between closing '\"' and '%' in format");
+        parse_err(SYNERR, "non-blank characters between closing '\"' and '%%' in format");
         return NULL;
       }
     } // Done with format description inside
@@ -3654,7 +3654,7 @@
     skipws();
     // Past format description, at '%'
     if ( _curchar != '%' || *(_ptr+1) != '}' ) {
-      parse_err(SYNERR, "missing '%}' at end of format block");
+      parse_err(SYNERR, "missing '%%}' at end of format block");
       return NULL;
     }
     next_char();                  // Move past the '%'
@@ -3785,7 +3785,7 @@
   skipws();
   // Past format description, at '%'
   if ( _curchar != '%' || *(_ptr+1) != '}' ) {
-    parse_err(SYNERR, "missing '%}' at end of format block");
+    parse_err(SYNERR, "missing '%%}' at end of format block");
     return NULL;
   }
   next_char();                  // Move past the '%'
@@ -3834,7 +3834,7 @@
   skipws();                        // Skip leading whitespace
   if ((_curchar != '%')
       || (next_char(), (_curchar != '{')) ) { // If not open block
-    parse_err(SYNERR, "missing '%{' in expand definition\n");
+    parse_err(SYNERR, "missing '%%{' in expand definition\n");
     return(NULL);
   }
   next_char();                     // Maintain the invariant
@@ -3933,7 +3933,7 @@
   } while(_curchar != '%');
   next_char();
   if (_curchar != '}') {
-    parse_err(SYNERR, "missing '%}' in expand rule definition\n");
+    parse_err(SYNERR, "missing '%%}' in expand rule definition\n");
     return(NULL);
   }
   next_char();
--- a/src/share/vm/c1/c1_CodeStubs.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/c1/c1_CodeStubs.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -31,6 +31,7 @@
 #include "c1/c1_LIR.hpp"
 #include "c1/c1_Runtime1.hpp"
 #include "utilities/array.hpp"
+#include "utilities/macros.hpp"
 
 class CodeEmitInfo;
 class LIR_Assembler;
@@ -515,7 +516,7 @@
 };
 
 //////////////////////////////////////////////////////////////////////////////////////////
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 
 // Code stubs for Garbage-First barriers.
 class G1PreBarrierStub: public CodeStub {
@@ -608,7 +609,7 @@
 #endif // PRODUCT
 };
 
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 //////////////////////////////////////////////////////////////////////////////////////////
 
 #endif // SHARE_VM_C1_C1_CODESTUBS_HPP
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -3667,11 +3667,12 @@
   }
 
   // now perform tests that are based on flag settings
-  if (callee->force_inline() || callee->should_inline()) {
-    // ignore heuristic controls on inlining
-    if (callee->force_inline())
-      print_inlining(callee, "force inline by annotation");
+  if (callee->force_inline()) {
+    print_inlining(callee, "force inline by annotation");
+  } else if (callee->should_inline()) {
+    print_inlining(callee, "force inline by CompileOracle");
   } else {
+    // use heuristic controls on inlining
     if (inline_level() > MaxInlineLevel                         ) INLINE_BAILOUT("inlining too deep");
     if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep");
     if (callee->code_size_for_inlining() > max_inline_size()    ) INLINE_BAILOUT("callee is too large");
--- a/src/share/vm/c1/c1_Instruction.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/c1/c1_Instruction.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -188,7 +188,7 @@
 
 ciType* LoadIndexed::declared_type() const {
   ciType* array_type = array()->declared_type();
-  if (array_type == NULL) {
+  if (array_type == NULL || !array_type->is_loaded()) {
     return NULL;
   }
   assert(array_type->is_array_klass(), "what else?");
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -35,9 +35,10 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/bitMap.inline.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/heapRegion.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 #ifdef ASSERT
 #define __ gen()->lir(__FILE__, __LINE__)->
@@ -1417,12 +1418,12 @@
                                bool do_load, bool patch, CodeEmitInfo* info) {
   // Do the pre-write barrier, if any.
   switch (_bs->kind()) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     case BarrierSet::G1SATBCT:
     case BarrierSet::G1SATBCTLogging:
       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
       break;
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
     case BarrierSet::CardTableModRef:
     case BarrierSet::CardTableExtension:
       // No pre barriers
@@ -1439,12 +1440,12 @@
 
 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
   switch (_bs->kind()) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     case BarrierSet::G1SATBCT:
     case BarrierSet::G1SATBCTLogging:
       G1SATBCardTableModRef_post_barrier(addr,  new_val);
       break;
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
     case BarrierSet::CardTableModRef:
     case BarrierSet::CardTableExtension:
       CardTableModRef_post_barrier(addr,  new_val);
@@ -1459,7 +1460,7 @@
 }
 
 ////////////////////////////////////////////////////////////////////////
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 
 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
                                                      bool do_load, bool patch, CodeEmitInfo* info) {
@@ -1575,7 +1576,7 @@
   __ branch_destination(slow->continuation());
 }
 
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 ////////////////////////////////////////////////////////////////////////
 
 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
@@ -2181,7 +2182,7 @@
 
   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // We might be reading the value of the referent field of a
   // Reference object in order to attach it back to the live
   // object graph. If G1 is enabled then we need to record
@@ -2311,7 +2312,7 @@
       __ branch_destination(Lcont->label());
     }
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   if (x->is_volatile() && os::is_MP()) __ membar_acquire();
 }
--- a/src/share/vm/ci/ciEnv.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/ci/ciEnv.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -52,6 +52,7 @@
 #include "runtime/reflection.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "utilities/dtrace.hpp"
+#include "utilities/macros.hpp"
 #ifdef COMPILER1
 #include "c1/c1_Runtime1.hpp"
 #endif
@@ -1168,7 +1169,7 @@
 
 void ciEnv::dump_replay_data(outputStream* out) {
   ASSERT_IN_VM;
-
+  ResourceMark rm;
 #if INCLUDE_JVMTI
   out->print_cr("JvmtiExport can_access_local_variables %d",     _jvmti_can_access_local_variables);
   out->print_cr("JvmtiExport can_hotswap_or_post_breakpoint %d", _jvmti_can_hotswap_or_post_breakpoint);
--- a/src/share/vm/ci/ciInstanceKlass.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/ci/ciInstanceKlass.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -580,6 +580,7 @@
   }
   void do_field(fieldDescriptor* fd) {
     if (fd->is_final() && !fd->has_initial_value()) {
+      ResourceMark rm;
       oop mirror = fd->field_holder()->java_mirror();
       _out->print("staticfield %s %s %s ", _holder, fd->name()->as_quoted_ascii(), fd->signature()->as_quoted_ascii());
       switch (fd->field_type()) {
@@ -643,6 +644,8 @@
 
 void ciInstanceKlass::dump_replay_data(outputStream* out) {
   ASSERT_IN_VM;
+  ResourceMark rm;
+
   InstanceKlass* ik = get_instanceKlass();
   ConstantPool*  cp = ik->constants();
 
--- a/src/share/vm/ci/ciMethod.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/ci/ciMethod.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -977,7 +977,7 @@
 // ciMethod::set_not_compilable
 //
 // Tell the VM that this method cannot be compiled at all.
-void ciMethod::set_not_compilable() {
+void ciMethod::set_not_compilable(const char* reason) {
   check_is_loaded();
   VM_ENTRY_MARK;
   ciEnv* env = CURRENT_ENV;
@@ -986,7 +986,7 @@
   } else {
     _is_c2_compilable = false;
   }
-  get_Method()->set_not_compilable(env->comp_level());
+  get_Method()->set_not_compilable(env->comp_level(), true, reason);
 }
 
 // ------------------------------------------------------------------
@@ -1178,6 +1178,7 @@
 
 void ciMethod::dump_replay_data(outputStream* st) {
   ASSERT_IN_VM;
+  ResourceMark rm;
   Method* method = get_Method();
   Klass*  holder = method->method_holder();
   st->print_cr("ciMethod %s %s %s %d %d %d %d %d",
--- a/src/share/vm/ci/ciMethod.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/ci/ciMethod.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -252,7 +252,7 @@
   bool has_option(const char *option);
   bool can_be_compiled();
   bool can_be_osr_compiled(int entry_bci);
-  void set_not_compilable();
+  void set_not_compilable(const char* reason = NULL);
   bool has_compiled_code();
   void log_nmethod_identity(xmlStream* log);
   bool is_not_reached(int bci);
--- a/src/share/vm/ci/ciMethodData.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/ci/ciMethodData.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -374,6 +374,7 @@
 
 void ciMethodData::dump_replay_data(outputStream* out) {
   ASSERT_IN_VM;
+  ResourceMark rm;
   MethodData* mdo = get_MethodData();
   Method* method = mdo->method();
   Klass* holder = method->method_holder();
--- a/src/share/vm/ci/ciReplay.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/ci/ciReplay.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -30,6 +30,7 @@
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "utilities/copy.hpp"
+#include "utilities/macros.hpp"
 
 #ifndef PRODUCT
 
--- a/src/share/vm/compiler/compileBroker.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/compiler/compileBroker.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -1398,7 +1398,7 @@
       method->print_short_name(tty);
       tty->cr();
     }
-    method->set_not_compilable_quietly();
+    method->set_not_compilable(CompLevel_all, !quietly, "excluded by CompilerOracle");
   }
 
   return false;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -102,7 +102,7 @@
   // temporarily disabled).
   switch (dictionaryChoice) {
     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
-      _dictionary = new BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>(mr);
+      _dictionary = new AFLBinaryTreeDictionary(mr);
       break;
     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
@@ -122,7 +122,8 @@
   // moved to its new location before the klass is moved.
   // Set the _refillSize for the linear allocation blocks
   if (!use_adaptive_freelists) {
-    FreeChunk* fc = _dictionary->get_chunk(mr.word_size());
+    FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
+                                           FreeBlockDictionary<FreeChunk>::atLeast);
     // The small linAB initially has all the space and will allocate
     // a chunk of any size.
     HeapWord* addr = (HeapWord*) fc;
@@ -1647,7 +1648,8 @@
 FreeChunk*
 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
   assert_locked();
-  FreeChunk* fc = _dictionary->get_chunk(size);
+  FreeChunk* fc = _dictionary->get_chunk(size,
+                                         FreeBlockDictionary<FreeChunk>::atLeast);
   if (fc == NULL) {
     return NULL;
   }
@@ -1664,7 +1666,8 @@
 FreeChunk*
 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
   assert_locked();
-  FreeChunk* fc = _dictionary->get_chunk(size);
+  FreeChunk* fc = _dictionary->get_chunk(size,
+                                         FreeBlockDictionary<FreeChunk>::atLeast);
   if (fc == NULL) {
     return fc;
   }
@@ -1677,7 +1680,8 @@
   if (fc->size() < size + MinChunkSize) {
     // Return the chunk to the dictionary and go get a bigger one.
     returnChunkToDictionary(fc);
-    fc = _dictionary->get_chunk(size + MinChunkSize);
+    fc = _dictionary->get_chunk(size + MinChunkSize,
+                                FreeBlockDictionary<FreeChunk>::atLeast);
     if (fc == NULL) {
       return NULL;
     }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -131,7 +131,7 @@
   LinearAllocBlock _smallLinearAllocBlock;
 
   FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
-  FreeBlockDictionary<FreeChunk>* _dictionary;    // ptr to dictionary for large size blocks
+  AFLBinaryTreeDictionary* _dictionary;    // ptr to dictionary for large size blocks
 
   AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
                                        // indexed array for small size blocks
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -25,8 +25,6 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP
 
-typedef BinaryTreeDictionary<FreeChunk, AdaptiveFreeList> AFLBinaryTreeDictionary;
-
 #define VM_STRUCTS_CMS(nonstatic_field, \
                    volatile_nonstatic_field, \
                    static_field) \
@@ -42,9 +40,9 @@
      static_field(ConcurrentMarkSweepThread,   _collector,                                    CMSCollector*)                         \
   nonstatic_field(LinearAllocBlock,            _word_size,                                    size_t)                                \
   nonstatic_field(AFLBinaryTreeDictionary,     _total_size,                                   size_t)                                \
+  nonstatic_field(CompactibleFreeListSpace,    _dictionary,                                   AFLBinaryTreeDictionary*)              \
   nonstatic_field(CompactibleFreeListSpace,    _indexedFreeList[0],                           FreeList<FreeChunk>)                   \
-  nonstatic_field(CompactibleFreeListSpace,    _smallLinearAllocBlock,                        LinearAllocBlock)                      \
-  nonstatic_field(CompactibleFreeListSpace,    _dictionary,                                   FreeBlockDictionary<FreeChunk>*)
+  nonstatic_field(CompactibleFreeListSpace,    _smallLinearAllocBlock,                        LinearAllocBlock)
 
 
 #define VM_TYPES_CMS(declare_type,                                        \
@@ -63,10 +61,9 @@
   declare_toplevel_type(SurrogateLockerThread*)                           \
   declare_toplevel_type(CompactibleFreeListSpace*)                        \
   declare_toplevel_type(CMSCollector*)                                    \
-  declare_toplevel_type(AFLBinaryTreeDictionary*)                         \
+  declare_toplevel_type(AFLBinaryTreeDictionary)                          \
   declare_toplevel_type(LinearAllocBlock)                                 \
-  declare_toplevel_type(FreeBlockDictionary<FreeChunk>)                   \
-           declare_type(AFLBinaryTreeDictionary, FreeBlockDictionary<FreeChunk>)
+  declare_toplevel_type(FreeBlockDictionary<FreeChunk>)
 
 #define VM_INT_CONSTANTS_CMS(declare_constant)                            \
   declare_constant(Generation::ConcurrentMarkSweep)                       \
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -571,19 +571,14 @@
     _sleep_factor             =     0.0;
     _marking_task_overhead    =     1.0;
   } else {
-    if (ConcGCThreads > 0) {
-      // notice that ConcGCThreads overwrites G1MarkingOverheadPercent
+    if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
+      // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
       // if both are set
-
-      _parallel_marking_threads = (uint) ConcGCThreads;
-      _max_parallel_marking_threads = _parallel_marking_threads;
       _sleep_factor             = 0.0;
       _marking_task_overhead    = 1.0;
     } else if (G1MarkingOverheadPercent > 0) {
-      // we will calculate the number of parallel marking threads
-      // based on a target overhead with respect to the soft real-time
-      // goal
-
+      // We will calculate the number of parallel marking threads based
+      // on a target overhead with respect to the soft real-time goal
       double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
       double overall_cm_overhead =
         (double) MaxGCPauseMillis * marking_overhead /
@@ -596,17 +591,22 @@
       double sleep_factor =
                          (1.0 - marking_task_overhead) / marking_task_overhead;
 
-      _parallel_marking_threads = (uint) marking_thread_num;
-      _max_parallel_marking_threads = _parallel_marking_threads;
+      FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
       _sleep_factor             = sleep_factor;
       _marking_task_overhead    = marking_task_overhead;
     } else {
-      _parallel_marking_threads = scale_parallel_threads((uint)ParallelGCThreads);
-      _max_parallel_marking_threads = _parallel_marking_threads;
+      // Calculate the number of parallel marking threads by scaling
+      // the number of parallel GC threads.
+      uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
+      FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
       _sleep_factor             = 0.0;
       _marking_task_overhead    = 1.0;
     }
 
+    assert(ConcGCThreads > 0, "Should have been set");
+    _parallel_marking_threads = (uint) ConcGCThreads;
+    _max_parallel_marking_threads = _parallel_marking_threads;
+
     if (parallel_marking_threads() > 1) {
       _cleanup_task_overhead = 1.0;
     } else {
@@ -1190,7 +1190,7 @@
     uint active_workers = MAX2(1U, parallel_marking_threads());
 
     CMRootRegionScanTask task(this);
-    if (parallel_marking_threads() > 0) {
+    if (use_parallel_marking_threads()) {
       _parallel_workers->set_active_workers((int) active_workers);
       _parallel_workers->run_task(&task);
     } else {
@@ -1226,7 +1226,7 @@
   set_phase(active_workers, true /* concurrent */);
 
   CMConcurrentMarkingTask markingTask(this, cmThread());
-  if (parallel_marking_threads() > 0) {
+  if (use_parallel_marking_threads()) {
     _parallel_workers->set_active_workers((int)active_workers);
     // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
     // and the decisions on that MT processing is made elsewhere.
@@ -2167,7 +2167,8 @@
   assert(tmp_free_list.is_empty(), "post-condition");
 }
 
-// Support closures for reference procssing in G1
+// Supporting Object and Oop closures for reference discovery
+// and processing in during marking
 
 bool G1CMIsAliveClosure::do_object_b(oop obj) {
   HeapWord* addr = (HeapWord*)obj;
@@ -2175,73 +2176,26 @@
          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
 }
 
-class G1CMKeepAliveClosure: public ExtendedOopClosure {
-  G1CollectedHeap* _g1;
-  ConcurrentMark*  _cm;
- public:
-  G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm) :
-    _g1(g1), _cm(cm) {
-    assert(Thread::current()->is_VM_thread(), "otherwise fix worker id");
-  }
-
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(      oop* p) { do_oop_work(p); }
-
-  template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
-    HeapWord* addr = (HeapWord*)obj;
-
-    if (_cm->verbose_high()) {
-      gclog_or_tty->print_cr("\t[0] we're looking at location "
-                             "*"PTR_FORMAT" = "PTR_FORMAT,
-                             p, (void*) obj);
-    }
-
-    if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) {
-      _cm->mark_and_count(obj);
-      _cm->mark_stack_push(obj);
-    }
-  }
-};
-
-class G1CMDrainMarkingStackClosure: public VoidClosure {
-  ConcurrentMark*               _cm;
-  CMMarkStack*                  _markStack;
-  G1CMKeepAliveClosure*         _oopClosure;
- public:
-  G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMMarkStack* markStack,
-                               G1CMKeepAliveClosure* oopClosure) :
-    _cm(cm),
-    _markStack(markStack),
-    _oopClosure(oopClosure) { }
-
-  void do_void() {
-    _markStack->drain(_oopClosure, _cm->nextMarkBitMap(), false);
-  }
-};
-
-// 'Keep Alive' closure used by parallel reference processing.
-// An instance of this closure is used in the parallel reference processing
-// code rather than an instance of G1CMKeepAliveClosure. We could have used
-// the G1CMKeepAliveClosure as it is MT-safe. Also reference objects are
-// placed on to discovered ref lists once so we can mark and push with no
-// need to check whether the object has already been marked. Using the
-// G1CMKeepAliveClosure would mean, however, having all the worker threads
-// operating on the global mark stack. This means that an individual
-// worker would be doing lock-free pushes while it processes its own
-// discovered ref list followed by drain call. If the discovered ref lists
-// are unbalanced then this could cause interference with the other
-// workers. Using a CMTask (and its embedded local data structures)
-// avoids that potential interference.
-class G1CMParKeepAliveAndDrainClosure: public OopClosure {
+// 'Keep Alive' oop closure used by both serial parallel reference processing.
+// Uses the CMTask associated with a worker thread (for serial reference
+// processing the CMTask for worker 0 is used) to preserve (mark) and
+// trace referent objects.
+//
+// Using the CMTask and embedded local queues avoids having the worker
+// threads operating on the global mark stack. This reduces the risk
+// of overflowing the stack - which we would rather avoid at this late
+// state. Also using the tasks' local queues removes the potential
+// of the workers interfering with each other that could occur if
+// operating on the global stack.
+
+class G1CMKeepAliveAndDrainClosure: public OopClosure {
   ConcurrentMark*  _cm;
   CMTask*          _task;
   int              _ref_counter_limit;
   int              _ref_counter;
  public:
-  G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task) :
-    _cm(cm), _task(task),
-    _ref_counter_limit(G1RefProcDrainInterval) {
+  G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task) :
+    _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval) {
     assert(_ref_counter_limit > 0, "sanity");
     _ref_counter = _ref_counter_limit;
   }
@@ -2262,18 +2216,22 @@
       _ref_counter--;
 
       if (_ref_counter == 0) {
-        // We have dealt with _ref_counter_limit references, pushing them and objects
-        // reachable from them on to the local stack (and possibly the global stack).
-        // Call do_marking_step() to process these entries. We call the routine in a
-        // loop, which we'll exit if there's nothing more to do (i.e. we're done
-        // with the entries that we've pushed as a result of the deal_with_reference
-        // calls above) or we overflow.
-        // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag
-        // while there may still be some work to do. (See the comment at the
-        // beginning of CMTask::do_marking_step() for those conditions - one of which
-        // is reaching the specified time target.) It is only when
-        // CMTask::do_marking_step() returns without setting the has_aborted() flag
-        // that the marking has completed.
+        // We have dealt with _ref_counter_limit references, pushing them
+        // and objects reachable from them on to the local stack (and
+        // possibly the global stack). Call CMTask::do_marking_step() to
+        // process these entries.
+        //
+        // We call CMTask::do_marking_step() in a loop, which we'll exit if
+        // there's nothing more to do (i.e. we're done with the entries that
+        // were pushed as a result of the CMTask::deal_with_reference() calls
+        // above) or we overflow.
+        //
+        // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
+        // flag while there may still be some work to do. (See the comment at
+        // the beginning of CMTask::do_marking_step() for those conditions -
+        // one of which is reaching the specified time target.) It is only
+        // when CMTask::do_marking_step() returns without setting the
+        // has_aborted() flag that the marking step has completed.
         do {
           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
           _task->do_marking_step(mark_step_duration_ms,
@@ -2290,36 +2248,59 @@
   }
 };
 
-class G1CMParDrainMarkingStackClosure: public VoidClosure {
+// 'Drain' oop closure used by both serial and parallel reference processing.
+// Uses the CMTask associated with a given worker thread (for serial
+// reference processing the CMtask for worker 0 is used). Calls the
+// do_marking_step routine, with an unbelievably large timeout value,
+// to drain the marking data structures of the remaining entries
+// added by the 'keep alive' oop closure above.
+
+class G1CMDrainMarkingStackClosure: public VoidClosure {
   ConcurrentMark* _cm;
-  CMTask* _task;
+  CMTask*         _task;
+  bool            _do_stealing;
+  bool            _do_termination;
  public:
-  G1CMParDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task) :
-    _cm(cm), _task(task) { }
+  G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_par) :
+    _cm(cm), _task(task) {
+    assert(is_par || _task->worker_id() == 0,
+           "Only task for worker 0 should be used if ref processing is single threaded");
+    // We only allow stealing and only enter the termination protocol
+    // in CMTask::do_marking_step() if this closure is being instantiated
+    // for parallel reference processing.
+    _do_stealing = _do_termination = is_par;
+  }
 
   void do_void() {
     do {
       if (_cm->verbose_high()) {
-        gclog_or_tty->print_cr("\t[%u] Drain: Calling do marking_step",
-                               _task->worker_id());
+        gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - "
+                               "stealing: %s, termination: %s",
+                               _task->worker_id(),
+                               BOOL_TO_STR(_do_stealing),
+                               BOOL_TO_STR(_do_termination));
       }
 
-      // We call CMTask::do_marking_step() to completely drain the local and
-      // global marking stacks. The routine is called in a loop, which we'll
-      // exit if there's nothing more to do (i.e. we'completely drained the
-      // entries that were pushed as a result of applying the
-      // G1CMParKeepAliveAndDrainClosure to the entries on the discovered ref
-      // lists above) or we overflow the global marking stack.
-      // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag
-      // while there may still be some work to do. (See the comment at the
-      // beginning of CMTask::do_marking_step() for those conditions - one of which
-      // is reaching the specified time target.) It is only when
-      // CMTask::do_marking_step() returns without setting the has_aborted() flag
-      // that the marking has completed.
+      // We call CMTask::do_marking_step() to completely drain the local
+      // and global marking stacks of entries pushed by the 'keep alive'
+      // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
+      //
+      // CMTask::do_marking_step() is called in a loop, which we'll exit
+      // if there's nothing more to do (i.e. we'completely drained the
+      // entries that were pushed as a a result of applying the 'keep alive'
+      // closure to the entries on the discovered ref lists) or we overflow
+      // the global marking stack.
+      //
+      // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
+      // flag while there may still be some work to do. (See the comment at
+      // the beginning of CMTask::do_marking_step() for those conditions -
+      // one of which is reaching the specified time target.) It is only
+      // when CMTask::do_marking_step() returns without setting the
+      // has_aborted() flag that the marking step has completed.
 
       _task->do_marking_step(1000000000.0 /* something very large */,
-                             true /* do_stealing    */,
-                             true /* do_termination */);
+                             _do_stealing,
+                             _do_termination);
     } while (_task->has_aborted() && !_cm->has_overflown());
   }
 };
@@ -2352,19 +2333,23 @@
   ProcessTask&     _proc_task;
   G1CollectedHeap* _g1h;
   ConcurrentMark*  _cm;
+  bool             _processing_is_mt;
 
 public:
   G1CMRefProcTaskProxy(ProcessTask& proc_task,
                      G1CollectedHeap* g1h,
                      ConcurrentMark* cm) :
     AbstractGangTask("Process reference objects in parallel"),
-    _proc_task(proc_task), _g1h(g1h), _cm(cm) { }
+    _proc_task(proc_task), _g1h(g1h), _cm(cm) {
+      ReferenceProcessor* rp = _g1h->ref_processor_cm();
+      _processing_is_mt = rp->processing_is_mt();
+    }
 
   virtual void work(uint worker_id) {
     CMTask* marking_task = _cm->task(worker_id);
     G1CMIsAliveClosure g1_is_alive(_g1h);
-    G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task);
-    G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
+    G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task);
+    G1CMDrainMarkingStackClosure g1_par_drain(_cm, marking_task, _processing_is_mt);
 
     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
   }
@@ -2372,6 +2357,7 @@
 
 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
   assert(_workers != NULL, "Need parallel worker threads.");
+  assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
 
   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
 
@@ -2399,6 +2385,7 @@
 
 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
   assert(_workers != NULL, "Need parallel worker threads.");
+  assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
 
   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
 
@@ -2429,59 +2416,58 @@
     // See the comment in G1CollectedHeap::ref_processing_init()
     // about how reference processing currently works in G1.
 
-    // Process weak references.
+    // Set the soft reference policy
     rp->setup_policy(clear_all_soft_refs);
     assert(_markStack.isEmpty(), "mark stack should be empty");
 
-    G1CMKeepAliveClosure g1_keep_alive(g1h, this);
-    G1CMDrainMarkingStackClosure
-      g1_drain_mark_stack(this, &_markStack, &g1_keep_alive);
-
-    // We use the work gang from the G1CollectedHeap and we utilize all
-    // the worker threads.
-    uint active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1U;
+    // Non-MT instances 'Keep Alive' and 'Complete GC' oop closures.
+    G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0));
+    G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), false);
+
+    // We need at least one active thread. If reference processing is
+    // not multi-threaded we use the current (ConcurrentMarkThread) thread,
+    // otherwise we use the work gang from the G1CollectedHeap and we
+    // utilize all the worker threads we can.
+    uint active_workers = (rp->processing_is_mt() && g1h->workers() != NULL
+                                ? g1h->workers()->active_workers()
+                                : 1U);
+
     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
 
     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
                                               g1h->workers(), active_workers);
 
-    if (rp->processing_is_mt()) {
-      // Set the degree of MT here.  If the discovery is done MT, there
-      // may have been a different number of threads doing the discovery
-      // and a different number of discovered lists may have Ref objects.
-      // That is OK as long as the Reference lists are balanced (see
-      // balance_all_queues() and balance_queues()).
-      rp->set_active_mt_degree(active_workers);
-
-      rp->process_discovered_references(&g1_is_alive,
+    AbstractRefProcTaskExecutor* executor = (rp->processing_is_mt()
+                                                ? &par_task_executor
+                                                : NULL);
+
+    // Set the degree of MT processing here.  If the discovery was done MT,
+    // the number of threads involved during discovery could differ from
+    // the number of active workers.  This is OK as long as the discovered
+    // Reference lists are balanced (see balance_all_queues() and balance_queues()).
+    rp->set_active_mt_degree(active_workers);
+
+    // Process the weak references.
+    rp->process_discovered_references(&g1_is_alive,
                                       &g1_keep_alive,
                                       &g1_drain_mark_stack,
-                                      &par_task_executor);
-
-      // The work routines of the parallel keep_alive and drain_marking_stack
-      // will set the has_overflown flag if we overflow the global marking
-      // stack.
-    } else {
-      rp->process_discovered_references(&g1_is_alive,
-                                        &g1_keep_alive,
-                                        &g1_drain_mark_stack,
-                                        NULL);
-    }
+                                      executor);
+
+    // The do_oop work routines of the keep_alive and drain_marking_stack
+    // oop closures will set the has_overflown flag if we overflow the
+    // global marking stack.
 
     assert(_markStack.overflow() || _markStack.isEmpty(),
             "mark stack should be empty (unless it overflowed)");
     if (_markStack.overflow()) {
-      // Should have been done already when we tried to push an
+      // This should have been done already when we tried to push an
       // entry on to the global mark stack. But let's do it again.
       set_has_overflown();
     }
 
-    if (rp->processing_is_mt()) {
-      assert(rp->num_q() == active_workers, "why not");
-      rp->enqueue_discovered_references(&par_task_executor);
-    } else {
-      rp->enqueue_discovered_references();
-    }
+    assert(rp->num_q() == active_workers, "why not");
+
+    rp->enqueue_discovered_references(executor);
 
     rp->verify_no_references_recorded();
     assert(!rp->discovery_enabled(), "Post condition");
@@ -3242,7 +3228,9 @@
 }
 
 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
-  _parallel_workers->print_worker_threads_on(st);
+  if (use_parallel_marking_threads()) {
+    _parallel_workers->print_worker_threads_on(st);
+  }
 }
 
 // We take a break if someone is trying to stop the world.
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -371,8 +371,8 @@
   friend class CalcLiveObjectsClosure;
   friend class G1CMRefProcTaskProxy;
   friend class G1CMRefProcTaskExecutor;
-  friend class G1CMParKeepAliveAndDrainClosure;
-  friend class G1CMParDrainMarkingStackClosure;
+  friend class G1CMKeepAliveAndDrainClosure;
+  friend class G1CMDrainMarkingStackClosure;
 
 protected:
   ConcurrentMarkThread* _cmThread;   // the thread doing the work
@@ -499,17 +499,26 @@
   }
 
   // accessor methods
-  uint parallel_marking_threads() { return _parallel_marking_threads; }
-  uint max_parallel_marking_threads() { return _max_parallel_marking_threads;}
-  double sleep_factor()             { return _sleep_factor; }
-  double marking_task_overhead()    { return _marking_task_overhead;}
-  double cleanup_sleep_factor()     { return _cleanup_sleep_factor; }
-  double cleanup_task_overhead()    { return _cleanup_task_overhead;}
+  uint parallel_marking_threads() const     { return _parallel_marking_threads; }
+  uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
+  double sleep_factor()                     { return _sleep_factor; }
+  double marking_task_overhead()            { return _marking_task_overhead;}
+  double cleanup_sleep_factor()             { return _cleanup_sleep_factor; }
+  double cleanup_task_overhead()            { return _cleanup_task_overhead;}
 
-  HeapWord*               finger()        { return _finger;   }
-  bool                    concurrent()    { return _concurrent; }
-  uint                    active_tasks()  { return _active_tasks; }
-  ParallelTaskTerminator* terminator()    { return &_terminator; }
+  bool use_parallel_marking_threads() const {
+    assert(parallel_marking_threads() <=
+           max_parallel_marking_threads(), "sanity");
+    assert((_parallel_workers == NULL && parallel_marking_threads() == 0) ||
+           parallel_marking_threads() > 0,
+           "parallel workers not set up correctly");
+    return _parallel_workers != NULL;
+  }
+
+  HeapWord*               finger()          { return _finger;   }
+  bool                    concurrent()      { return _concurrent; }
+  uint                    active_tasks()    { return _active_tasks; }
+  ParallelTaskTerminator* terminator()      { return &_terminator; }
 
   // It claims the next available region to be scanned by a marking
   // task/thread. It might return NULL if the next region is empty or
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -28,8 +28,9 @@
 #include "memory/cardTableModRefBS.hpp"
 #include "memory/memRegion.hpp"
 #include "oops/oop.inline.hpp"
+#include "utilities/macros.hpp"
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 
 class DirtyCardQueueSet;
 
@@ -120,6 +121,6 @@
 };
 
 
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -32,8 +32,9 @@
 #include "gc_implementation/shared/spaceDecorator.hpp"
 #include "memory/space.inline.hpp"
 #include "memory/watermark.hpp"
+#include "utilities/macros.hpp"
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 
 // A HeapRegion is the smallest piece of a G1CollectedHeap that
 // can be collected independently.
@@ -837,6 +838,6 @@
   bool complete() { return _complete; }
 };
 
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
--- a/src/share/vm/gc_implementation/shared/allocationStats.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/allocationStats.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -23,10 +23,11 @@
  */
 
 #include "precompiled.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/shared/allocationStats.hpp"
 #include "utilities/ostream.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // Technically this should be derived from machine speed, and
 // ideally it would be dynamically adjusted.
--- a/src/share/vm/gc_implementation/shared/allocationStats.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/allocationStats.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -25,11 +25,12 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_ALLOCATIONSTATS_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_ALLOCATIONSTATS_HPP
 
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/shared/gcUtil.hpp"
 #include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 class AllocationStats VALUE_OBJ_CLASS_SPEC {
   // A duration threshold (in ms) used to filter
--- a/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -25,9 +25,10 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_CONCURRENTGCTHREAD_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_CONCURRENTGCTHREAD_HPP
 
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "runtime/thread.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 class VoidClosure;
 
--- a/src/share/vm/gc_implementation/shared/gSpaceCounters.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/gSpaceCounters.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -23,11 +23,12 @@
  */
 
 #include "precompiled.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/shared/gSpaceCounters.hpp"
 #include "memory/generation.hpp"
 #include "memory/resourceArea.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 GSpaceCounters::GSpaceCounters(const char* name, int ordinal, size_t max_size,
                                Generation* g, GenerationCounters* gc,
--- a/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -25,11 +25,12 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GSPACECOUNTERS_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_GSPACECOUNTERS_HPP
 
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/shared/generationCounters.hpp"
 #include "memory/generation.hpp"
 #include "runtime/perfData.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // A GSpaceCounter is a holder class for performance counters
 // that track a space;
--- a/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -25,10 +25,11 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCADAPTIVEPOLICYCOUNTERS_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCADAPTIVEPOLICYCOUNTERS_HPP
 
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
 #include "gc_implementation/shared/gcPolicyCounters.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // This class keeps statistical information and computes the
 // size of the heap.
--- a/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -25,11 +25,12 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
 
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/shared/generationCounters.hpp"
 #include "memory/generation.hpp"
 #include "runtime/perfData.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // A HSpaceCounter is a holder class for performance counters
 // that track a collections (logical spaces) in a heap;
--- a/src/share/vm/gc_implementation/shared/immutableSpace.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/immutableSpace.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -23,11 +23,12 @@
  */
 
 #include "precompiled.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/shared/immutableSpace.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 void ImmutableSpace::initialize(MemRegion mr) {
   HeapWord* bottom = mr.start();
--- a/src/share/vm/gc_implementation/shared/isGCActiveMark.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/isGCActiveMark.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -25,9 +25,10 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_ISGCACTIVEMARK_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_ISGCACTIVEMARK_HPP
 
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // This class provides a method for block structured setting of the
 // _is_gc_active state without requiring accessors in CollectedHeap
--- a/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -28,9 +28,10 @@
 #include "gc_implementation/shared/markSweep.hpp"
 #include "gc_interface/collectedHeap.hpp"
 #include "utilities/stack.inline.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 inline void MarkSweep::mark_object(oop obj) {
   // some marks may contain information we need to preserve so we store them away
--- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -25,10 +25,11 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_MUTABLENUMASPACE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_MUTABLENUMASPACE_HPP
 
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/shared/gcUtil.hpp"
 #include "gc_implementation/shared/mutableSpace.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 /*
  *    The NUMA-aware allocator (MutableNUMASpace) is basically a modification
--- a/src/share/vm/gc_implementation/shared/mutableSpace.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/mutableSpace.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -23,13 +23,14 @@
  */
 
 #include "precompiled.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/shared/mutableSpace.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 MutableSpace::MutableSpace(size_t alignment): ImmutableSpace(), _top(NULL), _alignment(alignment) {
   assert(MutableSpace::alignment() >= 0 &&
--- a/src/share/vm/gc_implementation/shared/spaceCounters.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/spaceCounters.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -23,10 +23,11 @@
  */
 
 #include "precompiled.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/shared/spaceCounters.hpp"
 #include "memory/resourceArea.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 SpaceCounters::SpaceCounters(const char* name, int ordinal, size_t max_size,
                              MutableSpace* m, GenerationCounters* gc) :
--- a/src/share/vm/gc_implementation/shared/spaceCounters.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/spaceCounters.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -25,12 +25,13 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_SPACECOUNTERS_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_SPACECOUNTERS_HPP
 
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/shared/generationCounters.hpp"
 #include "gc_implementation/shared/immutableSpace.hpp"
 #include "gc_implementation/shared/mutableSpace.hpp"
 #include "runtime/perfData.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // A SpaceCounter is a holder class for performance counters
 // that track a space;
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -36,9 +36,10 @@
 #include "runtime/interfaceSupport.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/preserveException.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 #ifndef USDT2
 HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -3099,9 +3099,9 @@
   tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult);
   tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult);
 #endif
-#if defined(IA64) && !defined(ZERO)
+#if !defined(ZERO)
   tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp);
-#endif // IA64 && !ZERO
+#endif // !ZERO
   tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link);
 }
 
--- a/src/share/vm/memory/binaryTreeDictionary.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/binaryTreeDictionary.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "utilities/macros.hpp"
 #include "gc_implementation/shared/allocationStats.hpp"
 #include "memory/binaryTreeDictionary.hpp"
 #include "memory/freeList.hpp"
@@ -31,12 +32,13 @@
 #include "memory/metachunk.hpp"
 #include "runtime/globals.hpp"
 #include "utilities/ostream.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 ////////////////////////////////////////////////////////////////////////////////
 // A binary tree based search structure for free blocks.
@@ -118,7 +120,7 @@
 }
 
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 // Specialize for AdaptiveFreeList which tries to avoid
 // splitting a chunk of a size that is under populated in favor of
 // an over populated size.  The general get_better_list() just returns
@@ -160,7 +162,7 @@
   }
   return curTL;
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 template <class Chunk_t, template <class> class FreeList_t>
 TreeList<Chunk_t, FreeList_t>*
@@ -871,9 +873,9 @@
 template <class Chunk_t, template <class> class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::dict_census_update(size_t size, bool split, bool birth){}
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 template <>
-void BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>::dict_census_update(size_t size, bool split, bool birth){
+void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth){
   TreeList<FreeChunk, AdaptiveFreeList>* nd = find_list(size);
   if (nd) {
     if (split) {
@@ -900,7 +902,7 @@
   //   This is a birth associated with a LinAB.  The chunk
   //     for the LinAB is not in the dictionary.
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 template <class Chunk_t, template <class> class FreeList_t>
 bool BinaryTreeDictionary<Chunk_t, FreeList_t>::coal_dict_over_populated(size_t size) {
@@ -909,9 +911,9 @@
   return true;
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 template <>
-bool BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>::coal_dict_over_populated(size_t size) {
+bool AFLBinaryTreeDictionary::coal_dict_over_populated(size_t size) {
   if (FLSAlwaysCoalesceLarge) return true;
 
   TreeList<FreeChunk, AdaptiveFreeList>* list_of_size = find_list(size);
@@ -919,7 +921,7 @@
   return list_of_size == NULL || list_of_size->coal_desired() <= 0 ||
          list_of_size->count() > list_of_size->coal_desired();
 }
-#endif  // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 // Closures for walking the binary tree.
 //   do_list() walks the free list in a node applying the closure
@@ -979,7 +981,7 @@
 
   void do_list(FreeList<Chunk_t>* fl) {}
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   void do_list(AdaptiveFreeList<Chunk_t>* fl) {
     double coalSurplusPercent = _percentage;
     fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate);
@@ -987,7 +989,7 @@
     fl->set_before_sweep(fl->count());
     fl->set_bfr_surp(fl->surplus());
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 };
 
 // Used to search the tree until a condition is met.
@@ -1134,13 +1136,13 @@
   setTreeSurplusClosure(double v) { percentage = v; }
   void do_list(FreeList<Chunk_t>* fl) {}
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   void do_list(AdaptiveFreeList<Chunk_t>* fl) {
     double splitSurplusPercent = percentage;
     fl->set_surplus(fl->count() -
                    (ssize_t)((double)fl->desired() * splitSurplusPercent));
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 };
 
 template <class Chunk_t, template <class> class FreeList_t>
@@ -1157,7 +1159,7 @@
   setTreeHintsClosure(size_t v) { hint = v; }
   void do_list(FreeList<Chunk_t>* fl) {}
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   void do_list(AdaptiveFreeList<Chunk_t>* fl) {
     fl->set_hint(hint);
     assert(fl->hint() == 0 || fl->hint() > fl->size(),
@@ -1166,7 +1168,7 @@
       hint = fl->size();
     }
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 };
 
 template <class Chunk_t, template <class> class FreeList_t>
@@ -1180,7 +1182,7 @@
 class clearTreeCensusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
   void do_list(FreeList<Chunk_t>* fl) {}
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   void do_list(AdaptiveFreeList<Chunk_t>* fl) {
     fl->set_prev_sweep(fl->count());
     fl->set_coal_births(0);
@@ -1188,7 +1190,7 @@
     fl->set_split_births(0);
     fl->set_split_deaths(0);
   }
-#endif  // SERIALGC
+#endif // INCLUDE_ALL_GCS
 };
 
 template <class Chunk_t, template <class> class FreeList_t>
@@ -1252,7 +1254,7 @@
     total()->set_count(      total()->count()       + fl->count()      );
   }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   void do_list(AdaptiveFreeList<Chunk_t>* fl) {
     if (++_print_line >= 40) {
       FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
@@ -1271,7 +1273,7 @@
     total()->set_split_births(total()->split_births() + fl->split_births());
     total()->set_split_deaths(total()->split_deaths() + fl->split_deaths());
   }
-#endif  // SERIALGC
+#endif // INCLUDE_ALL_GCS
 };
 
 template <class Chunk_t, template <class> class FreeList_t>
@@ -1286,9 +1288,9 @@
   FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, " ");
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 template <>
-void BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>::print_dict_census(void) const {
+void AFLBinaryTreeDictionary::print_dict_census(void) const {
 
   gclog_or_tty->print("\nBinaryTree\n");
   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
@@ -1308,7 +1310,7 @@
              (double)(total->desired() - total->count())
              /(total->desired() != 0 ? (double)total->desired() : 1.0));
 }
-#endif  // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 template <class Chunk_t, template <class> class FreeList_t>
 class PrintFreeListsClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
@@ -1414,10 +1416,10 @@
 template class TreeChunk<Metachunk, FreeList>;
 
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 // Explicitly instantiate these types for FreeChunk.
 template class TreeList<FreeChunk, AdaptiveFreeList>;
 template class BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>;
 template class TreeChunk<FreeChunk, AdaptiveFreeList>;
 
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
--- a/src/share/vm/memory/binaryTreeDictionary.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/binaryTreeDictionary.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -43,6 +43,10 @@
 template <class Chunk_t, template <class> class FreeList_t> class DescendTreeCensusClosure;
 template <class Chunk_t, template <class> class FreeList_t> class DescendTreeSearchClosure;
 
+class FreeChunk;
+template <class> class AdaptiveFreeList;
+typedef BinaryTreeDictionary<FreeChunk, AdaptiveFreeList> AFLBinaryTreeDictionary;
+
 template <class Chunk_t, template <class> class FreeList_t>
 class TreeList : public FreeList_t<Chunk_t> {
   friend class TreeChunk<Chunk_t, FreeList_t>;
--- a/src/share/vm/memory/cardTableModRefBS.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/cardTableModRefBS.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -34,6 +34,7 @@
 #include "runtime/mutexLocker.hpp"
 #include "runtime/virtualspace.hpp"
 #include "services/memTracker.hpp"
+#include "utilities/macros.hpp"
 #ifdef COMPILER1
 #include "c1/c1_LIR.hpp"
 #include "c1/c1_LIRGenerator.hpp"
@@ -499,13 +500,13 @@
     int n_threads =  SharedHeap::heap()->n_par_threads();
     bool is_par = n_threads > 0;
     if (is_par) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
       assert(SharedHeap::heap()->n_par_threads() ==
              SharedHeap::heap()->workers()->active_workers(), "Mismatch");
       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
-#else  // SERIALGC
+#else  // INCLUDE_ALL_GCS
       fatal("Parallel gc not supported here.");
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
     } else {
       // We do not call the non_clean_card_iterate_serial() version below because
       // we want to clear the cards (which non_clean_card_iterate_serial() does not
--- a/src/share/vm/memory/cardTableRS.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/cardTableRS.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -31,10 +31,11 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 CardTableRS::CardTableRS(MemRegion whole_heap,
                          int max_covered_regions) :
@@ -42,7 +43,7 @@
   _cur_youngergen_card_val(youngergenP1_card),
   _regions_to_iterate(max_covered_regions - 1)
 {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   if (UseG1GC) {
       _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap,
                                                   max_covered_regions);
--- a/src/share/vm/memory/collectorPolicy.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/collectorPolicy.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,10 +39,11 @@
 #include "runtime/java.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // CollectorPolicy methods.
 
@@ -235,6 +236,18 @@
   if (NewSize + OldSize > MaxHeapSize) {
     MaxHeapSize = NewSize + OldSize;
   }
+
+  if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) {
+    // NewRatio will be used later to set the young generation size so we use
+    // it to calculate how big the heap should be based on the requested OldSize
+    // and NewRatio.
+    assert(NewRatio > 0, "NewRatio should have been set up earlier");
+    size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
+
+    calculated_heapsize = align_size_up(calculated_heapsize, max_alignment());
+    MaxHeapSize = calculated_heapsize;
+    InitialHeapSize = calculated_heapsize;
+  }
   MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
 
   always_do_update_barrier = UseConcMarkSweepGC;
@@ -384,14 +397,15 @@
 // keeping it simple also seems a worthwhile goal.
 bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
                                                      size_t* gen1_size_ptr,
-                                                     size_t heap_size,
-                                                     size_t min_gen0_size) {
+                                                     const size_t heap_size,
+                                                     const size_t min_gen1_size) {
   bool result = false;
+
   if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
-    if (((*gen0_size_ptr + OldSize) > heap_size) &&
-       (heap_size - min_gen0_size) >= min_alignment()) {
-      // Adjust gen0 down to accomodate OldSize
-      *gen0_size_ptr = heap_size - min_gen0_size;
+    if ((heap_size < (*gen0_size_ptr + min_gen1_size)) &&
+        (heap_size >= min_gen1_size + min_alignment())) {
+      // Adjust gen0 down to accommodate min_gen1_size
+      *gen0_size_ptr = heap_size - min_gen1_size;
       *gen0_size_ptr =
         MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
              min_alignment());
--- a/src/share/vm/memory/collectorPolicy.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/collectorPolicy.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "memory/barrierSet.hpp"
 #include "memory/generationSpec.hpp"
 #include "memory/genRemSet.hpp"
+#include "utilities/macros.hpp"
 
 // This class (or more correctly, subtypes of this class)
 // are used to define global garbage collector attributes.
@@ -48,10 +49,10 @@
 class GenCollectorPolicy;
 class TwoGenerationCollectorPolicy;
 class AdaptiveSizePolicy;
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 class ConcurrentMarkSweepPolicy;
 class G1CollectorPolicy;
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 class GCPolicyCounters;
 class MarkSweepPolicy;
@@ -134,21 +135,21 @@
   virtual GenCollectorPolicy*           as_generation_policy()            { return NULL; }
   virtual TwoGenerationCollectorPolicy* as_two_generation_policy()        { return NULL; }
   virtual MarkSweepPolicy*              as_mark_sweep_policy()            { return NULL; }
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   virtual ConcurrentMarkSweepPolicy*    as_concurrent_mark_sweep_policy() { return NULL; }
   virtual G1CollectorPolicy*            as_g1_policy()                    { return NULL; }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
   // Note that these are not virtual.
   bool is_generation_policy()            { return as_generation_policy() != NULL; }
   bool is_two_generation_policy()        { return as_two_generation_policy() != NULL; }
   bool is_mark_sweep_policy()            { return as_mark_sweep_policy() != NULL; }
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   bool is_concurrent_mark_sweep_policy() { return as_concurrent_mark_sweep_policy() != NULL; }
   bool is_g1_policy()                    { return as_g1_policy() != NULL; }
-#else  // SERIALGC
+#else  // INCLUDE_ALL_GCS
   bool is_concurrent_mark_sweep_policy() { return false; }
   bool is_g1_policy()                    { return false; }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 
   virtual BarrierSet::Name barrier_set_name() = 0;
@@ -321,7 +322,7 @@
 
   // Returns true is gen0 sizes were adjusted
   bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
-                               size_t heap_size, size_t min_gen1_size);
+                         const size_t heap_size, const size_t min_gen1_size);
 };
 
 class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
--- a/src/share/vm/memory/freeBlockDictionary.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/freeBlockDictionary.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -23,13 +23,15 @@
  */
 
 #include "precompiled.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 #include "memory/freeBlockDictionary.hpp"
 #include "memory/metablock.hpp"
 #include "memory/metachunk.hpp"
 #include "runtime/thread.inline.hpp"
+#include "utilities/macros.hpp"
 
 #ifndef PRODUCT
 template <class Chunk> Mutex* FreeBlockDictionary<Chunk>::par_lock() const {
@@ -56,7 +58,7 @@
 template class FreeBlockDictionary<Metablock>;
 template class FreeBlockDictionary<Metachunk>;
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 // Explicitly instantiate for FreeChunk
 template class FreeBlockDictionary<FreeChunk>;
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
--- a/src/share/vm/memory/freeList.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/freeList.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -31,10 +31,11 @@
 #include "runtime/globals.hpp"
 #include "runtime/mutex.hpp"
 #include "runtime/vmThread.hpp"
+#include "utilities/macros.hpp"
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 // Free list.  A FreeList is used to access a linked list of chunks
 // of space in the heap.  The head and tail are maintained so that
@@ -341,6 +342,6 @@
 
 template class FreeList<Metablock>;
 template class FreeList<Metachunk>;
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 template class FreeList<FreeChunk>;
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
--- a/src/share/vm/memory/genCollectedHeap.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -51,10 +51,11 @@
 #include "services/memoryService.hpp"
 #include "utilities/vmError.hpp"
 #include "utilities/workgroup.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 GenCollectedHeap* GenCollectedHeap::_gch;
 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
@@ -141,14 +142,14 @@
   }
   clear_incremental_collection_failed();
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // If we are running CMS, create the collector responsible
   // for collecting the CMS generations.
   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
     bool success = create_cms_collector();
     if (!success) return JNI_ENOMEM;
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   return JNI_OK;
 }
@@ -686,12 +687,12 @@
 
 void GenCollectedHeap::collect(GCCause::Cause cause) {
   if (should_do_concurrent_full_gc(cause)) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     // mostly concurrent full collection
     collect_mostly_concurrent(cause);
-#else  // SERIALGC
+#else  // INCLUDE_ALL_GCS
     ShouldNotReachHere();
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
   } else {
 #ifdef ASSERT
     if (cause == GCCause::_scavenge_alot) {
@@ -736,7 +737,7 @@
   }
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 bool GenCollectedHeap::create_cms_collector() {
 
   assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
@@ -772,7 +773,7 @@
     VMThread::execute(&op);
   }
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
    do_full_collection(clear_all_soft_refs, _n_gens - 1);
@@ -1116,22 +1117,22 @@
   if (workers() != NULL) {
     workers()->threads_do(tc);
   }
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   if (UseConcMarkSweepGC) {
     ConcurrentMarkSweepThread::threads_do(tc);
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 }
 
 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   if (UseParNewGC) {
     workers()->print_worker_threads_on(st);
   }
   if (UseConcMarkSweepGC) {
     ConcurrentMarkSweepThread::print_all_on(st);
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 }
 
 void GenCollectedHeap::print_tracing_info() const {
--- a/src/share/vm/memory/generationSpec.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/generationSpec.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -30,11 +30,12 @@
 #include "memory/generationSpec.hpp"
 #include "memory/tenuredGeneration.hpp"
 #include "runtime/java.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/parNew/asParNewGeneration.hpp"
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
 #include "gc_implementation/parNew/parNewGeneration.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 Generation* GenerationSpec::init(ReservedSpace rs, int level,
                                  GenRemSet* remset) {
@@ -45,7 +46,7 @@
     case Generation::MarkSweepCompact:
       return new TenuredGeneration(rs, init_size(), level, remset);
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     case Generation::ParNew:
       return new ParNewGeneration(rs, init_size(), level);
 
@@ -94,7 +95,7 @@
 
       return g;
     }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
     default:
       guarantee(false, "unrecognized GenerationName");
--- a/src/share/vm/memory/heapInspection.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/heapInspection.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -30,9 +30,10 @@
 #include "memory/resourceArea.hpp"
 #include "runtime/os.hpp"
 #include "utilities/globalDefinitions.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // HeapInspection
 
--- a/src/share/vm/memory/heapInspection.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/heapInspection.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -28,6 +28,7 @@
 #include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/annotations.hpp"
+#include "utilities/macros.hpp"
 
 #if INCLUDE_SERVICES
 
--- a/src/share/vm/memory/metaspace.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/metaspace.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -1737,10 +1737,10 @@
     *class_chunk_word_size = ClassSmallChunk;
     break;
   }
-  assert(chunk_word_size != 0 && class_chunk_word_size != 0,
+  assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
     err_msg("Initial chunks sizes bad: data  " SIZE_FORMAT
             " class " SIZE_FORMAT,
-            chunk_word_size, class_chunk_word_size));
+            *chunk_word_size, *class_chunk_word_size));
 }
 
 size_t SpaceManager::sum_free_in_chunks_in_use() const {
@@ -2040,7 +2040,7 @@
            align_size_up(humongous_chunks->word_size(),
                              HumongousChunkGranularity),
            err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
-                   " granularity " SIZE_FORMAT,
+                   " granularity %d",
                    humongous_chunks->word_size(), HumongousChunkGranularity));
     Metachunk* next_humongous_chunks = humongous_chunks->next();
     chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
@@ -2264,7 +2264,8 @@
   }
   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
   assert(allocation_total() == sum_used_in_chunks_in_use(),
-    err_msg("allocation total is not consistent %d vs %d",
+    err_msg("allocation total is not consistent " SIZE_FORMAT
+            " vs " SIZE_FORMAT,
             allocation_total(), sum_used_in_chunks_in_use()));
 }
 
@@ -2578,7 +2579,8 @@
 // argument passed in is at the top of the compressed space
 void Metaspace::initialize_class_space(ReservedSpace rs) {
   // The reserved space size may be bigger because of alignment, esp with UseLargePages
-  assert(rs.size() >= ClassMetaspaceSize, err_msg("%d != %d", rs.size(), ClassMetaspaceSize));
+  assert(rs.size() >= ClassMetaspaceSize,
+         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
   _class_space_list = new VirtualSpaceList(rs);
 }
 
--- a/src/share/vm/memory/space.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/space.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -40,6 +40,7 @@
 #include "runtime/safepoint.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
 
 void SpaceMemRegionOopsIterClosure::do_oop(oop* p)       { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
 void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
@@ -658,7 +659,7 @@
   }
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)         \
                                                                             \
   void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
@@ -673,7 +674,7 @@
   ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
 
 #undef ContigSpace_PAR_OOP_ITERATE_DEFN
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
   if (is_empty()) return;
--- a/src/share/vm/memory/space.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/space.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -34,6 +34,7 @@
 #include "oops/markOop.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/prefetch.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/workgroup.hpp"
 #ifdef TARGET_OS_FAMILY_linux
 # include "os_linux.inline.hpp"
@@ -884,14 +885,14 @@
   }
 
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // In support of parallel oop_iterate.
   #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix)  \
     void par_oop_iterate(MemRegion mr, OopClosureType* blk);
 
     ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
   #undef ContigSpace_PAR_OOP_ITERATE_DECL
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // Compaction support
   virtual void reset_after_compaction() {
--- a/src/share/vm/memory/specialized_oop_closures.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/specialized_oop_closures.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -26,9 +26,10 @@
 #define SHARE_VM_MEMORY_SPECIALIZED_OOP_CLOSURES_HPP
 
 #include "runtime/atomic.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // The following OopClosure types get specialized versions of
 // "oop_oop_iterate" that invoke the closures' do_oop methods
@@ -80,20 +81,20 @@
   f(FastScanClosure,_nv)                                \
   f(FilteringClosure,_nv)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f)       \
   f(ParScanWithBarrierClosure,_nv)                      \
   f(ParScanWithoutBarrierClosure,_nv)
-#else  // SERIALGC
+#else  // INCLUDE_ALL_GCS
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f)
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(f)       \
   f(NoHeaderExtendedOopClosure,_nv)                     \
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(f)             \
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)       \
   f(MarkRefsIntoAndScanClosure,_nv)                     \
   f(Par_MarkRefsIntoAndScanClosure,_nv)                 \
@@ -104,9 +105,9 @@
   f(CMSKeepAliveClosure,_nv)                            \
   f(CMSInnerParMarkAndPushClosure,_nv)                  \
   FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f)
-#else  // SERIALGC
+#else  // INCLUDE_ALL_GCS
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 
 // We separate these out, because sometime the general one has
@@ -120,7 +121,7 @@
 #define ALL_OOP_OOP_ITERATE_CLOSURES_2(f)               \
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 // This macro applies an argument macro to all OopClosures for which we
 // want specialized bodies of a family of methods related to
 // "par_oop_iterate".  The arguments to f are the same as above.
@@ -136,7 +137,7 @@
 #define ALL_PAR_OOP_ITERATE_CLOSURES(f)                \
   f(ExtendedOopClosure,_v)                             \
   SPECIALIZED_PAR_OOP_ITERATE_CLOSURES(f)
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 // This macro applies an argument macro to all OopClosures for which we
 // want specialized bodies of a family of methods related to
@@ -155,14 +156,14 @@
   f(ScanClosure,_nv)                                     \
   f(FastScanClosure,_nv)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_P(f) \
   f(ParScanWithBarrierClosure,_nv)                       \
   f(ParScanWithoutBarrierClosure,_nv)                    \
   FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(f)
-#else  // SERIALGC
+#else  // INCLUDE_ALL_GCS
 #define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_P(f)
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 #define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(f)  \
   SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_S(f)      \
--- a/src/share/vm/memory/tenuredGeneration.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/tenuredGeneration.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -33,6 +33,7 @@
 #include "memory/tenuredGeneration.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
+#include "utilities/macros.hpp"
 
 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
                                      size_t initial_byte_size, int level,
@@ -61,7 +62,7 @@
   _space_counters = new CSpaceCounters(gen_name, 0,
                                        _virtual_space.reserved_size(),
                                        _the_space, _gen_counters);
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   if (UseParNewGC) {
     typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr;
     _alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr,
@@ -77,7 +78,7 @@
   } else {
     _alloc_buffers = NULL;
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 }
 
 
@@ -339,7 +340,7 @@
 }
 
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 oop TenuredGeneration::par_promote(int thread_num,
                                    oop old, markOop m, size_t word_sz) {
 
@@ -423,10 +424,10 @@
   }
 }
 
-#else  // SERIALGC
+#else  // INCLUDE_ALL_GCS
 void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
 void TenuredGeneration::verify_alloc_buffers_clean() {}
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
   size_t available = max_contiguous_available();
--- a/src/share/vm/memory/tenuredGeneration.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/tenuredGeneration.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -29,6 +29,7 @@
 #include "gc_implementation/shared/gcStats.hpp"
 #include "gc_implementation/shared/generationCounters.hpp"
 #include "memory/generation.hpp"
+#include "utilities/macros.hpp"
 
 // TenuredGeneration models the heap containing old (promoted/tenured) objects.
 
@@ -45,11 +46,11 @@
   size_t _capacity_at_prologue;
   size_t _used_at_prologue;
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // To support parallel promotion: an array of parallel allocation
   // buffers, one per thread, initially NULL.
   ParGCAllocBufferWithBOT** _alloc_buffers;
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // Retire all alloc buffers before a full GC, so that they will be
   // re-allocated at the start of the next young GC.
@@ -93,14 +94,14 @@
                        size_t size,
                        bool is_tlab);
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // Overrides.
   virtual oop par_promote(int thread_num,
                           oop obj, markOop m, size_t word_sz);
   virtual void par_promote_alloc_undo(int thread_num,
                                       HeapWord* obj, size_t word_sz);
   virtual void par_promote_alloc_done(int thread_num);
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // Performance Counter support
   void update_counters();
--- a/src/share/vm/memory/universe.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/memory/universe.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -70,13 +70,14 @@
 #include "utilities/events.hpp"
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/preserveException.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // Known objects
 Klass* Universe::_boolArrayKlassObj                 = NULL;
@@ -758,20 +759,20 @@
 jint Universe::initialize_heap() {
 
   if (UseParallelGC) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     Universe::_collectedHeap = new ParallelScavengeHeap();
-#else  // SERIALGC
+#else  // INCLUDE_ALL_GCS
     fatal("UseParallelGC not supported in this VM.");
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   } else if (UseG1GC) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     G1CollectorPolicy* g1p = new G1CollectorPolicy();
     G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
     Universe::_collectedHeap = g1h;
-#else  // SERIALGC
+#else  // INCLUDE_ALL_GCS
     fatal("UseG1GC not supported in java kernel vm.");
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   } else {
     GenCollectorPolicy *gc_policy;
@@ -779,15 +780,15 @@
     if (UseSerialGC) {
       gc_policy = new MarkSweepPolicy();
     } else if (UseConcMarkSweepGC) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
       if (UseAdaptiveSizePolicy) {
         gc_policy = new ASConcurrentMarkSweepPolicy();
       } else {
         gc_policy = new ConcurrentMarkSweepPolicy();
       }
-#else   // SERIALGC
+#else  // INCLUDE_ALL_GCS
     fatal("UseConcMarkSweepGC not supported in this VM.");
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
     } else { // default old generation
       gc_policy = new MarkSweepPolicy();
     }
--- a/src/share/vm/oops/cpCache.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/cpCache.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -33,9 +33,10 @@
 #include "prims/jvmtiRedefineClassesTrace.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/handles.inline.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 # include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 
 // Implememtation of ConstantPoolCacheEntry
--- a/src/share/vm/oops/instanceClassLoaderKlass.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/instanceClassLoaderKlass.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -36,12 +36,13 @@
 #include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
 #include "runtime/handles.inline.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
 #include "oops/oop.pcgc.inline.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 #define if_do_metadata_checked(closure, nv_suffix)                    \
   /* Make sure the non-virtual and the virtual versions match. */     \
@@ -73,7 +74,7 @@
   return size;                                                                  \
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
                                                                                 \
 int InstanceClassLoaderKlass::                                                  \
@@ -83,7 +84,7 @@
   int size = InstanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \
   return size;                                                                  \
 }
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 
 #define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix)      \
@@ -111,10 +112,10 @@
 
 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN)
 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN)
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m)
 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m)
 
@@ -129,7 +130,7 @@
   }
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 void InstanceClassLoaderKlass::oop_follow_contents(ParCompactionManager* cm,
         oop obj) {
   InstanceKlass::oop_follow_contents(cm, obj);
@@ -155,5 +156,5 @@
   }
   return size_helper();
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
--- a/src/share/vm/oops/instanceClassLoaderKlass.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/instanceClassLoaderKlass.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -26,6 +26,7 @@
 #define SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_HPP
 
 #include "oops/instanceKlass.hpp"
+#include "utilities/macros.hpp"
 
 // An InstanceClassLoaderKlass is a specialization of the InstanceKlass. It does
 // not add any field.  It is added to walk the dependencies for the class loader
@@ -61,13 +62,13 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix)      \
   int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk);
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
     // Garbage collection
   void oop_follow_contents(oop obj);
--- a/src/share/vm/oops/instanceKlass.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/instanceKlass.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -56,7 +56,8 @@
 #include "runtime/thread.inline.hpp"
 #include "services/threadService.hpp"
 #include "utilities/dtrace.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
@@ -67,7 +68,7 @@
 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
 #include "oops/oop.pcgc.inline.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
 #include "c1/c1_Compiler.hpp"
 #endif
@@ -2043,7 +2044,7 @@
     assert_is_in_closed_subset)
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 void InstanceKlass::oop_follow_contents(ParCompactionManager* cm,
                                         oop obj) {
   assert(obj != NULL, "can't follow the content of NULL object");
@@ -2055,7 +2056,7 @@
     PSParallelCompact::mark_and_push(cm, p), \
     assert_is_in)
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 // closure's do_metadata() method dictates whether the given closure should be
 // applied to the klass ptr in the object header.
@@ -2083,7 +2084,7 @@
   return size_helper();                                                 \
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
                                                                                 \
 int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj,                \
@@ -2101,7 +2102,7 @@
     assert_is_in_closed_subset)                                                 \
    return size_helper();                                                        \
 }
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
                                                                         \
@@ -2125,10 +2126,10 @@
 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN)
 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 int InstanceKlass::oop_adjust_pointers(oop obj) {
   int size = size_helper();
@@ -2140,7 +2141,7 @@
   return size;
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 void InstanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
   InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
     obj, \
@@ -2160,7 +2161,7 @@
   return size;
 }
 
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
   assert(is_loader_alive(is_alive), "this klass should be live");
--- a/src/share/vm/oops/instanceKlass.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/instanceKlass.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -36,6 +36,7 @@
 #include "runtime/os.hpp"
 #include "utilities/accessFlags.hpp"
 #include "utilities/bitMap.inline.hpp"
+#include "utilities/macros.hpp"
 
 // An InstanceKlass is the VM level representation of a Java class.
 // It contains all information needed for at class at execution runtime.
@@ -936,13 +937,13 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DECL)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
   int  oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk);
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   u2 idnum_allocated_count() const      { return _idnum_allocated_count; }
 private:
--- a/src/share/vm/oops/instanceMirrorKlass.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/instanceMirrorKlass.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -35,7 +35,8 @@
 #include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
 #include "runtime/handles.inline.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
@@ -45,7 +46,7 @@
 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
 #include "oops/oop.pcgc.inline.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 int InstanceMirrorKlass::_offset_of_static_fields = 0;
 
@@ -168,7 +169,7 @@
     assert_is_in_closed_subset)
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 void InstanceMirrorKlass::oop_follow_contents(ParCompactionManager* cm,
                                               oop obj) {
   InstanceKlass::oop_follow_contents(cm, obj);
@@ -189,7 +190,7 @@
     PSParallelCompact::mark_and_push(cm, p),                                          \
     assert_is_in)
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 int InstanceMirrorKlass::oop_adjust_pointers(oop obj) {
   int size = oop_size(obj);
@@ -262,7 +263,7 @@
   }                                                                                   \
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
                                                                                       \
 int InstanceMirrorKlass::                                                             \
@@ -278,7 +279,7 @@
     InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv_suffix);                 \
   }                                                                                   \
 }
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 
 #define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix)         \
@@ -310,14 +311,14 @@
 
 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN)
 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN)
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m)
 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 void InstanceMirrorKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
   // Note that we don't have to follow the mirror -> klass pointer, since all
   // klasses that are dirty will be scavenged when we iterate over the
@@ -353,7 +354,7 @@
     assert_nothing)
   return size;
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 int InstanceMirrorKlass::instance_size(KlassHandle k) {
   if (k() != NULL && k->oop_is_instance()) {
--- a/src/share/vm/oops/instanceMirrorKlass.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/instanceMirrorKlass.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -28,6 +28,7 @@
 #include "classfile/systemDictionary.hpp"
 #include "oops/instanceKlass.hpp"
 #include "runtime/handles.hpp"
+#include "utilities/macros.hpp"
 
 // An InstanceMirrorKlass is a specialized InstanceKlass for
 // java.lang.Class instances.  These instances are special because
@@ -107,13 +108,13 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DECL)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
   int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk);
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 };
 
 #endif // SHARE_VM_OOPS_INSTANCEMIRRORKLASS_HPP
--- a/src/share/vm/oops/instanceRefKlass.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/instanceRefKlass.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -33,7 +33,8 @@
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "utilities/preserveException.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
@@ -42,7 +43,7 @@
 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
 #include "oops/oop.pcgc.inline.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 template <class T>
 void specialized_oop_follow_contents(InstanceRefKlass* ref, oop obj) {
@@ -120,7 +121,7 @@
   }
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 template <class T>
 void specialized_oop_follow_contents(InstanceRefKlass* ref,
                                      ParCompactionManager* cm,
@@ -194,7 +195,7 @@
     specialized_oop_follow_contents<oop>(this, cm, obj);
   }
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 #ifdef ASSERT
 template <class T> void trace_reference_gc(const char *s, oop obj,
@@ -317,7 +318,7 @@
   }                                                                             \
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
                                                                                 \
 int InstanceRefKlass::                                                          \
@@ -333,7 +334,7 @@
     InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, contains);         \
   }                                                                             \
 }
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 
 #define InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix)      \
@@ -354,14 +355,14 @@
 
 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN)
 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DEFN)
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m)
 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 template <class T>
 void specialized_oop_push_contents(InstanceRefKlass *ref,
                                    PSPromotionManager* pm, oop obj) {
@@ -444,7 +445,7 @@
   }
   return size_helper();
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 void InstanceRefKlass::update_nonstatic_oop_maps(Klass* k) {
   // Clear the nonstatic oop-map entries corresponding to referent
--- a/src/share/vm/oops/instanceRefKlass.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/instanceRefKlass.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -26,6 +26,7 @@
 #define SHARE_VM_OOPS_INSTANCEREFKLASS_HPP
 
 #include "oops/instanceKlass.hpp"
+#include "utilities/macros.hpp"
 
 // An InstanceRefKlass is a specialized InstanceKlass for Java
 // classes that are subclasses of java/lang/ref/Reference.
@@ -83,13 +84,13 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DECL)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix)      \
   int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk);
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   static void release_and_notify_pending_list_lock(BasicLock *pending_list_basic_lock);
   static void acquire_pending_list_lock(BasicLock *pending_list_basic_lock);
--- a/src/share/vm/oops/klass.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/klass.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -38,11 +38,12 @@
 #include "oops/oop.inline2.hpp"
 #include "runtime/atomic.hpp"
 #include "utilities/stack.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 void Klass::set_name(Symbol* n) {
   _name = n;
--- a/src/share/vm/oops/klass.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/klass.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -35,11 +35,12 @@
 #include "runtime/orderAccess.hpp"
 #include "trace/traceMacros.hpp"
 #include "utilities/accessFlags.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp"
 #include "gc_implementation/g1/g1OopClosures.hpp"
 #include "gc_implementation/parNew/parOopClosures.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 //
 // A Klass provides:
@@ -629,13 +630,13 @@
     return oop_oop_iterate(obj, blk);
   }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // In case we don't have a specialized backward scanner use forward
   // iteration.
   virtual int oop_oop_iterate_backwards_v(oop obj, ExtendedOopClosure* blk) {
     return oop_oop_iterate_v(obj, blk);
   }
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // Iterates "blk" over all the oops in "obj" (of type "this") within "mr".
   // (I don't see why the _m should be required, but without it the Solaris
@@ -667,7 +668,7 @@
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL)
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #define Klass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix)      \
   virtual int oop_oop_iterate_backwards##nv_suffix(oop obj,                  \
                                                    OopClosureType* blk) {    \
@@ -677,7 +678,7 @@
 
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL)
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL)
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   virtual void array_klasses_do(void f(Klass* k)) {}
   virtual void with_array_klasses_do(void f(Klass* k));
--- a/src/share/vm/oops/klassPS.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/klassPS.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -27,7 +27,9 @@
 
   // Expands to Parallel Scavenge and Parallel Old declarations
 
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+
+#if INCLUDE_ALL_GCS
 #define PARALLEL_GC_DECLS \
   virtual void oop_push_contents(PSPromotionManager* pm, oop obj);          \
   /* Parallel Old GC support                                                \
@@ -44,9 +46,9 @@
   virtual void oop_push_contents(PSPromotionManager* pm, oop obj) = 0;      \
   virtual void oop_follow_contents(ParCompactionManager* cm, oop obj) = 0;  \
   virtual int  oop_update_pointers(ParCompactionManager* cm, oop obj) = 0;
-#else  // SERIALGC
+#else  // INCLUDE_ALL_GCS
 #define PARALLEL_GC_DECLS
 #define PARALLEL_GC_DECLS_PV
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 #endif // SHARE_VM_OOPS_KLASSPS_HPP
--- a/src/share/vm/oops/method.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/method.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -700,7 +700,7 @@
 }
 
 
-void Method::print_made_not_compilable(int comp_level, bool is_osr, bool report) {
+void Method::print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason) {
   if (PrintCompilation && report) {
     ttyLocker ttyl;
     tty->print("made not %scompilable on ", is_osr ? "OSR " : "");
@@ -714,14 +714,21 @@
     }
     this->print_short_name(tty);
     int size = this->code_size();
-    if (size > 0)
+    if (size > 0) {
       tty->print(" (%d bytes)", size);
+    }
+    if (reason != NULL) {
+      tty->print("   %s", reason);
+    }
     tty->cr();
   }
   if ((TraceDeoptimization || LogCompilation) && (xtty != NULL)) {
     ttyLocker ttyl;
     xtty->begin_elem("make_not_%scompilable thread='" UINTX_FORMAT "'",
                      is_osr ? "osr_" : "", os::current_thread_id());
+    if (reason != NULL) {
+      xtty->print(" reason=\'%s\'", reason);
+    }
     xtty->method(this);
     xtty->stamp();
     xtty->end_elem();
@@ -743,8 +750,8 @@
 }
 
 // call this when compiler finds that this method is not compilable
-void Method::set_not_compilable(int comp_level, bool report) {
-  print_made_not_compilable(comp_level, /*is_osr*/ false, report);
+void Method::set_not_compilable(int comp_level, bool report, const char* reason) {
+  print_made_not_compilable(comp_level, /*is_osr*/ false, report, reason);
   if (comp_level == CompLevel_all) {
     set_not_c1_compilable();
     set_not_c2_compilable();
@@ -769,8 +776,8 @@
   return false;
 }
 
-void Method::set_not_osr_compilable(int comp_level, bool report) {
-  print_made_not_compilable(comp_level, /*is_osr*/ true, report);
+void Method::set_not_osr_compilable(int comp_level, bool report, const char* reason) {
+  print_made_not_compilable(comp_level, /*is_osr*/ true, report, reason);
   if (comp_level == CompLevel_all) {
     set_not_c1_osr_compilable();
     set_not_c2_osr_compilable();
--- a/src/share/vm/oops/method.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/method.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -764,18 +764,18 @@
   // whether it is not compilable for another reason like having a
   // breakpoint set in it.
   bool  is_not_compilable(int comp_level = CompLevel_any) const;
-  void set_not_compilable(int comp_level = CompLevel_all, bool report = true);
+  void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL);
   void set_not_compilable_quietly(int comp_level = CompLevel_all) {
     set_not_compilable(comp_level, false);
   }
   bool  is_not_osr_compilable(int comp_level = CompLevel_any) const;
-  void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true);
+  void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL);
   void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) {
     set_not_osr_compilable(comp_level, false);
   }
 
  private:
-  void print_made_not_compilable(int comp_level, bool is_osr, bool report);
+  void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
 
  public:
   bool  is_not_c1_compilable() const          { return access_flags().is_not_c1_compilable(); }
--- a/src/share/vm/oops/methodData.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/methodData.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -1469,7 +1469,7 @@
   void inc_decompile_count() {
     _nof_decompiles += 1;
     if (decompile_count() > (uint)PerMethodRecompilationCutoff) {
-      method()->set_not_compilable(CompLevel_full_optimization);
+      method()->set_not_compilable(CompLevel_full_optimization, true, "decompile_count > PerMethodRecompilationCutoff");
     }
   }
 
--- a/src/share/vm/oops/objArrayKlass.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/objArrayKlass.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -43,7 +43,8 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "utilities/copy.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
@@ -54,7 +55,7 @@
 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
 #include "oops/oop.pcgc.inline.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 ObjArrayKlass* ObjArrayKlass::allocate(ClassLoaderData* loader_data, int n, KlassHandle klass_handle, Symbol* name, TRAPS) {
   assert(ObjArrayKlass::header_size() <= InstanceKlass::header_size(),
@@ -461,7 +462,7 @@
   }
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 void ObjArrayKlass::oop_follow_contents(ParCompactionManager* cm,
                                         oop obj) {
   assert(obj->is_array(), "obj must be array");
@@ -472,7 +473,7 @@
     objarray_follow_contents<oop>(cm, obj, 0);
   }
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 #define if_do_metadata_checked(closure, nv_suffix)                    \
   /* Make sure the non-virtual and the virtual versions match. */     \
@@ -573,7 +574,7 @@
   return size;
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 void ObjArrayKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
   assert(obj->is_objArray(), "obj must be obj array");
   ObjArrayKlass_OOP_ITERATE( \
@@ -591,7 +592,7 @@
   ObjArrayKlass_OOP_ITERATE(a, p, PSParallelCompact::adjust_pointer(p))
   return size;
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 // JVM support
 
--- a/src/share/vm/oops/objArrayKlass.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/objArrayKlass.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -28,6 +28,7 @@
 #include "classfile/classLoaderData.hpp"
 #include "memory/specialized_oop_closures.hpp"
 #include "oops/arrayKlass.hpp"
+#include "utilities/macros.hpp"
 
 // ObjArrayKlass is the klass for objArrays
 
@@ -111,11 +112,11 @@
 
   // Parallel Scavenge and Parallel Old
   PARALLEL_GC_DECLS
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   inline void oop_follow_contents(ParCompactionManager* cm, oop obj, int index);
   template <class T> inline void
     objarray_follow_contents(ParCompactionManager* cm, oop obj, int index);
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // Iterators
   int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) {
--- a/src/share/vm/oops/objArrayKlass.inline.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/objArrayKlass.inline.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -27,10 +27,11 @@
 
 #include "gc_implementation/shared/markSweep.inline.hpp"
 #include "oops/objArrayKlass.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp"
 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 void ObjArrayKlass::oop_follow_contents(oop obj, int index) {
   if (UseCompressedOops) {
@@ -63,7 +64,7 @@
   }
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 void ObjArrayKlass::oop_follow_contents(ParCompactionManager* cm, oop obj,
                                         int index) {
   if (UseCompressedOops) {
@@ -96,6 +97,6 @@
     cm->push_objarray(a, end_index); // Push the continuation.
   }
 }
-#endif // #ifndef SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 #endif // SHARE_VM_OOPS_OBJARRAYKLASS_INLINE_HPP
--- a/src/share/vm/oops/oop.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/oop.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -29,6 +29,7 @@
 #include "memory/memRegion.hpp"
 #include "memory/specialized_oop_closures.hpp"
 #include "oops/metadata.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/top.hpp"
 
 // oopDesc is the top baseclass for objects classes.  The {name}Desc classes describe
@@ -298,7 +299,7 @@
   // reference field in "this".
   void follow_contents(void);
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // Parallel Scavenge
   void push_contents(PSPromotionManager* pm);
 
@@ -306,7 +307,7 @@
   void update_contents(ParCompactionManager* cm);
 
   void follow_contents(ParCompactionManager* cm);
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   bool is_scavengable() const;
 
@@ -316,13 +317,13 @@
   void forward_to(oop p);
   bool cas_forward_to(oop p, markOop compare);
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // Like "forward_to", but inserts the forwarding pointer atomically.
   // Exactly one thread succeeds in inserting the forwarding pointer, and
   // this call returns "NULL" for that thread; any other thread has the
   // value of the forwarding pointer returned and does not modify "this".
   oop forward_to_atomic(oop p);
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   oop forwardee() const;
 
@@ -334,10 +335,10 @@
   // return the size of this oop.  This is used by the MarkSweep collector.
   int adjust_pointers();
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // Parallel old
   void update_header(ParCompactionManager* cm);
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // mark-sweep support
   void follow_body(int begin, int end);
@@ -354,7 +355,7 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DECL)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 
 #define OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix)            \
   int oop_iterate_backwards(OopClosureType* blk);
--- a/src/share/vm/oops/oop.inline.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/oop.inline.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -40,6 +40,7 @@
 #include "oops/oop.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
+#include "utilities/macros.hpp"
 #ifdef TARGET_ARCH_x86
 # include "bytes_x86.hpp"
 #endif
@@ -760,7 +761,7 @@
 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN)
 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)              \
                                                                            \
 inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) {           \
@@ -770,6 +771,6 @@
 
 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN)
 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN)
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP
--- a/src/share/vm/oops/oop.pcgc.inline.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/oop.pcgc.inline.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -25,14 +25,15 @@
 #ifndef SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP
 #define SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP
 
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/parNew/parNewGeneration.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 inline void oopDesc::update_contents(ParCompactionManager* cm) {
   // The klass field must be updated before anything else
--- a/src/share/vm/oops/oop.psgc.inline.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/oop.psgc.inline.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -25,11 +25,12 @@
 #ifndef SHARE_VM_OOPS_OOP_PSGC_INLINE_HPP
 #define SHARE_VM_OOPS_OOP_PSGC_INLINE_HPP
 
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // ParallelScavengeHeap methods
 
--- a/src/share/vm/oops/symbol.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/symbol.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -152,6 +152,7 @@
 }
 
 void Symbol::print_symbol_on(outputStream* st) const {
+  ResourceMark rm;
   st = st ? st : tty;
   st->print("%s", as_quoted_ascii());
 }
--- a/src/share/vm/oops/typeArrayKlass.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/oops/typeArrayKlass.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -39,6 +39,7 @@
 #include "oops/typeArrayKlass.hpp"
 #include "oops/typeArrayOop.hpp"
 #include "runtime/handles.inline.hpp"
+#include "utilities/macros.hpp"
 
 bool TypeArrayKlass::compute_is_subtype_of(Klass* k) {
   if (!k->oop_is_typeArray()) {
@@ -208,13 +209,13 @@
   // know that Universe::TypeArrayKlass never moves.
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 void TypeArrayKlass::oop_follow_contents(ParCompactionManager* cm, oop obj) {
   assert(obj->is_typeArray(),"must be a type array");
   // Performance tweak: We skip iterating over the klass pointer since we
   // know that Universe::TypeArrayKlass never moves.
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 int TypeArrayKlass::oop_adjust_pointers(oop obj) {
   assert(obj->is_typeArray(),"must be a type array");
@@ -240,7 +241,7 @@
   return t->object_size();
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 void TypeArrayKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
   ShouldNotReachHere();
   assert(obj->is_typeArray(),"must be a type array");
@@ -251,7 +252,7 @@
   assert(obj->is_typeArray(),"must be a type array");
   return typeArrayOop(obj)->object_size();
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 void TypeArrayKlass::initialize(TRAPS) {
   // Nothing to do. Having this function is handy since objArrayKlasses can be
--- a/src/share/vm/opto/bytecodeInfo.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -420,14 +420,24 @@
 }
 
 //------------------------------print_inlining---------------------------------
-// Really, the failure_msg can be a success message also.
-void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const {
-  C->print_inlining(callee_method, inline_level(), caller_bci, failure_msg ? failure_msg : "inline");
-  if (callee_method == NULL)  tty->print(" callee not monotonic or profiled");
-  if (Verbose && callee_method) {
-    const InlineTree *top = this;
-    while( top->caller_tree() != NULL ) { top = top->caller_tree(); }
-    //tty->print("  bcs: %d+%d  invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count());
+void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci,
+                                const char* msg, bool success) const {
+  assert(msg != NULL, "just checking");
+  if (C->log() != NULL) {
+    if (success) {
+      C->log()->inline_success(msg);
+    } else {
+      C->log()->inline_fail(msg);
+    }
+  }
+  if (PrintInlining) {
+    C->print_inlining(callee_method, inline_level(), caller_bci, msg);
+    if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
+    if (Verbose && callee_method) {
+      const InlineTree *top = this;
+      while( top->caller_tree() != NULL ) { top = top->caller_tree(); }
+      //tty->print("  bcs: %d+%d  invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count());
+    }
   }
 }
 
@@ -451,23 +461,23 @@
 
   // Do some initial checks.
   if (!pass_initial_checks(caller_method, caller_bci, callee_method)) {
-    if (PrintInlining)  print_inlining(callee_method, caller_bci, "failed initial checks");
+    print_inlining(callee_method, caller_bci, "failed initial checks",
+                   false /* !success */);
     return NULL;
   }
 
   // Do some parse checks.
   failure_msg = check_can_parse(callee_method);
   if (failure_msg != NULL) {
-    if (PrintInlining)  print_inlining(callee_method, caller_bci, failure_msg);
+    print_inlining(callee_method, caller_bci, failure_msg,
+                   false /* !success */);
     return NULL;
   }
 
   // Check if inlining policy says no.
   WarmCallInfo wci = *(initial_wci);
-  failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, &wci, should_delay);
-  if (failure_msg != NULL && C->log() != NULL) {
-    C->log()->inline_fail(failure_msg);
-  }
+  failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile,
+                              &wci, should_delay);
 
 #ifndef PRODUCT
   if (UseOldInlining && InlineWarmCalls
@@ -487,7 +497,7 @@
       wci = *(WarmCallInfo::always_hot());
     else
       wci = *(WarmCallInfo::always_cold());
-  }
+    }
   if (!InlineWarmCalls) {
     if (!wci.is_cold() && !wci.is_hot()) {
       // Do not inline the warm calls.
@@ -496,11 +506,10 @@
   }
 
   if (!wci.is_cold()) {
-    // In -UseOldInlining, the failure_msg may also be a success message.
-    if (failure_msg == NULL)  failure_msg = "inline (hot)";
-
     // Inline!
-    if (PrintInlining)  print_inlining(callee_method, caller_bci, failure_msg);
+    print_inlining(callee_method, caller_bci,
+                   failure_msg ? failure_msg : "inline (hot)",
+                   true /* success */);
     if (UseOldInlining)
       build_inline_tree_for_callee(callee_method, jvms, caller_bci);
     if (InlineWarmCalls && !wci.is_hot())
@@ -509,8 +518,9 @@
   }
 
   // Do not inline
-  if (failure_msg == NULL)  failure_msg = "too cold to inline";
-  if (PrintInlining)  print_inlining(callee_method, caller_bci, failure_msg);
+  print_inlining(callee_method, caller_bci,
+                 failure_msg ? failure_msg : "too cold to inline",
+                 false /* !success */ );
   return NULL;
 }
 
--- a/src/share/vm/opto/callGenerator.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/opto/callGenerator.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -305,11 +305,13 @@
 void LateInlineCallGenerator::do_late_inline() {
   // Can't inline it
   if (call_node() == NULL || call_node()->outcnt() == 0 ||
-      call_node()->in(0) == NULL || call_node()->in(0)->is_top())
+      call_node()->in(0) == NULL || call_node()->in(0)->is_top()) {
     return;
+  }
 
+  const TypeTuple *r = call_node()->tf()->domain();
   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
-    if (call_node()->in(TypeFunc::Parms + i1)->is_top()) {
+    if (call_node()->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
       return;
     }
--- a/src/share/vm/opto/generateOptoStub.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/opto/generateOptoStub.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -88,12 +88,12 @@
                                             thread,
                                             in_bytes(JavaThread::frame_anchor_offset()) +
                                             in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
-#if defined(SPARC) || defined(IA64)
+#if defined(SPARC)
   Node* adr_flags = basic_plus_adr(top(),
                                    thread,
                                    in_bytes(JavaThread::frame_anchor_offset()) +
                                    in_bytes(JavaFrameAnchor::flags_offset()));
-#endif /* defined(SPARC) || defined(IA64) */
+#endif /* defined(SPARC) */
 
 
   // Drop in the last_Java_sp.  last_Java_fp is not touched.
@@ -102,10 +102,8 @@
   // users will look at the other fields.
   //
   Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset()));
-#ifndef IA64
   Node *last_sp = basic_plus_adr(top(), frameptr(), (intptr_t) STACK_BIAS);
   store_to_memory(NULL, adr_sp, last_sp, T_ADDRESS, NoAlias);
-#endif
 
   // Set _thread_in_native
   // The order of stores into TLS is critical!  Setting _thread_in_native MUST
@@ -210,19 +208,12 @@
   //-----------------------------
 
   // Clear last_Java_sp
-#ifdef IA64
-  if( os::is_MP() ) insert_mem_bar(Op_MemBarRelease);
-#endif
-
   store_to_memory(NULL, adr_sp, null(), T_ADDRESS, NoAlias);
-#ifdef IA64
-  if (os::is_MP() && UseMembar) insert_mem_bar(new MemBarVolatileNode());
-#endif // def IA64
   // Clear last_Java_pc and (optionally)_flags
   store_to_memory(NULL, adr_last_Java_pc, null(), T_ADDRESS, NoAlias);
-#if defined(SPARC) || defined(IA64)
+#if defined(SPARC)
   store_to_memory(NULL, adr_flags, intcon(0), T_INT, NoAlias);
-#endif /* defined(SPARC) || defined(IA64) */
+#endif /* defined(SPARC) */
 #ifdef IA64
   Node* adr_last_Java_fp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_fp_offset()));
   if( os::is_MP() ) insert_mem_bar(Op_MemBarRelease);
--- a/src/share/vm/opto/parse.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/opto/parse.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -73,7 +73,8 @@
   const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result, bool& should_delay);
   const char* should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const;
   const char* should_not_inline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const;
-  void        print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const;
+  void        print_inlining(ciMethod* callee_method, int caller_bci,
+                             const char* msg, bool success) const;
 
   InlineTree *caller_tree()       const { return _caller_tree;  }
   InlineTree* callee_at(int bci, ciMethod* m) const;
--- a/src/share/vm/opto/parse3.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/opto/parse3.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -487,7 +487,8 @@
                           fun, NULL, TypeRawPtr::BOTTOM,
                           makecon(TypeKlassPtr::make(array_klass)),
                           length[0], length[1], length[2],
-                          length[3], length[4]);
+                          (ndimensions > 2) ? length[3] : NULL,
+                          (ndimensions > 3) ? length[4] : NULL);
   } else {
     // Create a java array for dimension sizes
     Node* dims = NULL;
--- a/src/share/vm/precompiled/precompiled.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/precompiled/precompiled.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -24,6 +24,7 @@
 
 // Precompiled headers are turned off for Sun Studion,
 // or if the user passes USE_PRECOMPILED_HEADER=0 to the makefiles.
+
 #ifndef DONT_USE_PRECOMPILED_HEADER
 
 # include "asm/assembler.hpp"
@@ -285,7 +286,7 @@
 # include "c1/c1_ValueType.hpp"
 # include "c1/c1_globals.hpp"
 #endif // COMPILER1
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 # include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp"
 # include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
 # include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
@@ -314,6 +315,6 @@
 # include "gc_implementation/shared/gcAdaptivePolicyCounters.hpp"
 # include "gc_implementation/shared/gcPolicyCounters.hpp"
 # include "gc_implementation/shared/parGCAllocBuffer.hpp"
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 #endif // !DONT_USE_PRECOMPILED_HEADER
--- a/src/share/vm/prims/jni.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/prims/jni.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -32,9 +32,10 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "interpreter/linkResolver.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/gcLocker.inline.hpp"
@@ -2641,7 +2642,7 @@
     o = JvmtiExport::jni_GetField_probe(thread, obj, o, k, fieldID, false);
   }
   jobject ret = JNIHandles::make_local(env, o->obj_field(offset));
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // If G1 is enabled and we are accessing the value of the referent
   // field in a reference object then we need to register a non-null
   // referent with the SATB barrier.
@@ -2660,7 +2661,7 @@
       G1SATBCardTableModRefBS::enqueue(referent);
     }
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetObjectField__return, ret);
 #else /* USDT2 */
--- a/src/share/vm/prims/jvmtiEnvBase.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/prims/jvmtiEnvBase.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -35,6 +35,7 @@
 #include "runtime/thread.hpp"
 #include "runtime/vm_operations.hpp"
 #include "utilities/growableArray.hpp"
+#include "utilities/macros.hpp"
 
 //
 // Forward Declarations
--- a/src/share/vm/prims/jvmtiExport.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/prims/jvmtiExport.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -50,9 +50,10 @@
 #include "runtime/vframe.hpp"
 #include "services/attachListener.hpp"
 #include "services/serviceUtil.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 #ifdef JVMTI_TRACE
 #define EVT_TRACE(evt,out) if ((JvmtiTrace::event_trace_flags(evt) & JvmtiTrace::SHOW_EVENT_SENT) != 0) { SafeResourceMark rm; tty->print_cr out; }
--- a/src/share/vm/prims/jvmtiExport.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/prims/jvmtiExport.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -34,6 +34,7 @@
 #include "runtime/handles.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/growableArray.hpp"
+#include "utilities/macros.hpp"
 
 // Must be included after jvmti.h.
 #include "code/jvmticmlr.h"
--- a/src/share/vm/prims/jvmtiTagMap.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/prims/jvmtiTagMap.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -45,9 +45,10 @@
 #include "runtime/vmThread.hpp"
 #include "runtime/vm_operations.hpp"
 #include "services/serviceUtil.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // JvmtiTagHashmapEntry
 //
--- a/src/share/vm/prims/nativeLookup.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/prims/nativeLookup.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -40,6 +40,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
+#include "utilities/macros.hpp"
 #ifdef TARGET_OS_FAMILY_linux
 # include "os_linux.inline.hpp"
 #endif
--- a/src/share/vm/prims/unsafe.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/prims/unsafe.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -24,9 +24,10 @@
 
 #include "precompiled.hpp"
 #include "classfile/vmSymbols.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 #include "memory/allocation.inline.hpp"
 #include "prims/jni.h"
 #include "prims/jvm.h"
@@ -189,7 +190,7 @@
   if (obj == NULL)  THROW_0(vmSymbols::java_lang_NullPointerException());
   GET_OOP_FIELD(obj, offset, v)
   jobject ret = JNIHandles::make_local(env, v);
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // We could be accessing the referent field in a reference
   // object. If G1 is enabled then we need to register a non-null
   // referent with the SATB barrier.
@@ -212,7 +213,7 @@
       G1SATBCardTableModRefBS::enqueue(referent);
     }
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
   return ret;
 UNSAFE_END
 
@@ -247,7 +248,7 @@
   UnsafeWrapper("Unsafe_GetObject");
   GET_OOP_FIELD(obj, offset, v)
   jobject ret = JNIHandles::make_local(env, v);
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // We could be accessing the referent field in a reference
   // object. If G1 is enabled then we need to register non-null
   // referent with the SATB barrier.
@@ -270,7 +271,7 @@
       G1SATBCardTableModRefBS::enqueue(referent);
     }
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
   return ret;
 UNSAFE_END
 
--- a/src/share/vm/prims/whitebox.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/prims/whitebox.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -36,12 +36,13 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/os.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 #ifdef INCLUDE_NMT
 #include "services/memTracker.hpp"
@@ -89,7 +90,7 @@
   return closure.found();
 WB_END
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
   oop result = JNIHandles::resolve(obj);
@@ -112,7 +113,7 @@
 WB_ENTRY(jint, WB_G1RegionSize(JNIEnv* env, jobject o))
   return (jint)HeapRegion::GrainBytes;
 WB_END
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 #ifdef INCLUDE_NMT
 // Keep track of the 3 allocations in NMTAllocTest so we can free them later
@@ -229,12 +230,12 @@
       CC "(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
       (void*) &WB_ParseCommandLine
   },
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   {CC"g1InConcurrentMark", CC"()Z",                   (void*)&WB_G1InConcurrentMark},
   {CC"g1IsHumongous",      CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous     },
   {CC"g1NumFreeRegions",   CC"()J",                   (void*)&WB_G1NumFreeRegions  },
   {CC"g1RegionSize",       CC"()I",                   (void*)&WB_G1RegionSize      },
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 #ifdef INCLUDE_NMT
   {CC"NMTAllocTest",       CC"()Z",                   (void*)&WB_NMTAllocTest      },
   {CC"NMTFreeTestMemory",  CC"()Z",                   (void*)&WB_NMTFreeTestMemory },
--- a/src/share/vm/runtime/arguments.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/arguments.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -38,6 +38,7 @@
 #include "services/management.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/defaultStream.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/taskqueue.hpp"
 #ifdef TARGET_OS_FAMILY_linux
 # include "os_linux.inline.hpp"
@@ -51,9 +52,9 @@
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // Note: This is a special bug reporting site for the JVM
 #define DEFAULT_VENDOR_URL_BUG "http://bugreport.sun.com/bugreport/crash.jsp"
@@ -1089,7 +1090,7 @@
   }
 }
 
-#if INCLUDE_ALTERNATE_GCS
+#if INCLUDE_ALL_GCS
 static void disable_adaptive_size_policy(const char* collector_name) {
   if (UseAdaptiveSizePolicy) {
     if (FLAG_IS_CMDLINE(UseAdaptiveSizePolicy)) {
@@ -1301,7 +1302,7 @@
     tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
   }
 }
-#endif // INCLUDE_ALTERNATE_GCS
+#endif // INCLUDE_ALL_GCS
 
 void set_object_alignment() {
   // Object alignment.
@@ -1318,10 +1319,10 @@
   // Oop encoding heap max
   OopEncodingHeapMax = (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
 
-#if INCLUDE_ALTERNATE_GCS
+#if INCLUDE_ALL_GCS
   // Set CMS global values
   CompactibleFreeListSpace::set_cms_values();
-#endif // INCLUDE_ALTERNATE_GCS
+#endif // INCLUDE_ALL_GCS
 }
 
 bool verify_object_alignment() {
@@ -1998,7 +1999,7 @@
 
   status = status && verify_min_value(ParGCArrayScanChunk, 1, "ParGCArrayScanChunk");
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   if (UseG1GC) {
     status = status && verify_percentage(InitiatingHeapOccupancyPercent,
                                          "InitiatingHeapOccupancyPercent");
@@ -2007,7 +2008,7 @@
     status = status && verify_min_value((intx)G1ConcMarkStepDurationMillis, 1,
                                         "G1ConcMarkStepDurationMillis");
   }
-#endif
+#endif // INCLUDE_ALL_GCS
 
   status = status && verify_interval(RefDiscoveryPolicy,
                                      ReferenceProcessor::DiscoveryPolicyMin,
@@ -3176,7 +3177,7 @@
   UNSUPPORTED_OPTION(UseLargePages, "-XX:+UseLargePages");
 #endif
 
-#if !INCLUDE_ALTERNATE_GCS
+#if !INCLUDE_ALL_GCS
   if (UseParallelGC) {
     warning("Parallel GC is not supported in this VM.  Using Serial GC.");
   }
@@ -3189,7 +3190,7 @@
   if (UseParNewGC) {
     warning("Par New GC is not supported in this VM.  Using Serial GC.");
   }
-#endif // INCLUDE_ALTERNATE_GCS
+#endif // INCLUDE_ALL_GCS
 
 #ifndef PRODUCT
   if (TraceBytecodesAt != 0) {
@@ -3236,9 +3237,9 @@
   // Set object alignment values.
   set_object_alignment();
 
-#ifdef SERIALGC
+#if !INCLUDE_ALL_GCS
   force_serial_gc();
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 #if !INCLUDE_CDS
   no_shared_spaces();
 #endif // INCLUDE_CDS
@@ -3266,7 +3267,7 @@
   // Set heap size based on available physical memory
   set_heap_size();
 
-#if INCLUDE_ALTERNATE_GCS
+#if INCLUDE_ALL_GCS
   // Set per-collector flags
   if (UseParallelGC || UseParallelOldGC) {
     set_parallel_gc_flags();
@@ -3278,11 +3279,9 @@
     set_g1_gc_flags();
   }
   check_deprecated_gcs();
-#endif // INCLUDE_ALTERNATE_GCS
-
-#ifdef SERIALGC
+#else // INCLUDE_ALL_GCS
   assert(verify_serial_gc_flags(), "SerialGC unset");
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // Set bytecode rewriting flags
   set_bytecode_flags();
@@ -3376,7 +3375,7 @@
 }
 
 jint Arguments::adjust_after_os() {
-#if INCLUDE_ALTERNATE_GCS
+#if INCLUDE_ALL_GCS
   if (UseParallelGC || UseParallelOldGC) {
     if (UseNUMA) {
       if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
@@ -3387,7 +3386,7 @@
       UseNUMAInterleaving = true;
     }
   }
-#endif
+#endif // INCLUDE_ALL_GCS
   return JNI_OK;
 }
 
--- a/src/share/vm/runtime/deoptimization.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/deoptimization.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -1559,7 +1559,7 @@
         if (trap_method() == nm->method()) {
           make_not_compilable = true;
         } else {
-          trap_method->set_not_compilable(CompLevel_full_optimization);
+          trap_method->set_not_compilable(CompLevel_full_optimization, true, "overflow_recompile_count > PerBytecodeRecompilationCutoff");
           // But give grace to the enclosing nm->method().
         }
       }
--- a/src/share/vm/runtime/fprofiler.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/fprofiler.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_RUNTIME_FPROFILER_HPP
 #define SHARE_VM_RUNTIME_FPROFILER_HPP
 
+#include "utilities/macros.hpp"
 #include "runtime/timer.hpp"
 
 // a simple flat profiler for Java
--- a/src/share/vm/runtime/globals.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/globals.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -29,10 +29,11 @@
 #include "runtime/globals.hpp"
 #include "runtime/globals_extension.hpp"
 #include "utilities/ostream.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/top.hpp"
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1_globals.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
 #include "c1/c1_globals.hpp"
 #endif
@@ -256,9 +257,9 @@
 static Flag flagTable[] = {
  RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT, RUNTIME_LP64_PRODUCT_FLAG_STRUCT)
  RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT)
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
  G1_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT)
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
  C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
 #endif
--- a/src/share/vm/runtime/globals_extension.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/globals_extension.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -26,6 +26,7 @@
 #define SHARE_VM_RUNTIME_GLOBALS_EXTENSION_HPP
 
 #include "runtime/globals.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/top.hpp"
 
 // Construct enum of Flag_<cmdline-arg> constants.
@@ -94,9 +95,9 @@
 typedef enum {
  RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER, RUNTIME_LP64_PRODUCT_FLAG_MEMBER)
  RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER)
-#if INCLUDE_ALTERNATE_GCS
+#if INCLUDE_ALL_GCS
  G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER)
-#endif // INCLUDE_ALTERNATE_GCS
+#endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
  C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
 #endif
@@ -187,7 +188,7 @@
                   RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
                   RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
                   RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
-#if INCLUDE_ALTERNATE_GCS
+#if INCLUDE_ALL_GCS
  G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE,
           RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
           RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE,
@@ -197,7 +198,7 @@
           RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE,
           RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE,
           RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE)
-#endif // INCLUDE_ALTERNATE_GCS
+#endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
  C1_FLAGS(C1_DEVELOP_FLAG_MEMBER_WITH_TYPE,
           C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
--- a/src/share/vm/runtime/init.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/init.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -34,6 +34,7 @@
 #include "runtime/init.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "utilities/macros.hpp"
 
 // Initialization done by VM thread in vm_init_globals()
 void check_ThreadShadow();
--- a/src/share/vm/runtime/java.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/java.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -64,6 +64,7 @@
 #include "utilities/dtrace.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/histogram.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
 #ifdef TARGET_ARCH_x86
 # include "vm_version_x86.hpp"
@@ -80,11 +81,11 @@
 #ifdef TARGET_ARCH_ppc
 # include "vm_version_ppc.hpp"
 #endif
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
 #include "c1/c1_Compiler.hpp"
 #include "c1/c1_Runtime1.hpp"
--- a/src/share/vm/runtime/os.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/os.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -985,15 +985,28 @@
 // if C stack is walkable beyond current frame. The check for fp() is not
 // necessary on Sparc, but it's harmless.
 bool os::is_first_C_frame(frame* fr) {
-#ifdef IA64
-  // In order to walk native frames on Itanium, we need to access the unwind
-  // table, which is inside ELF. We don't want to parse ELF after fatal error,
-  // so return true for IA64. If we need to support C stack walking on IA64,
-  // this function needs to be moved to CPU specific files, as fp() on IA64
-  // is register stack, which grows towards higher memory address.
+#if defined(IA64) && !defined(_WIN32)
+  // On IA64 we have to check if the callers bsp is still valid
+  // (i.e. within the register stack bounds).
+  // Notice: this only works for threads created by the VM and only if
+  // we walk the current stack!!! If we want to be able to walk
+  // arbitrary other threads, we'll have to somehow store the thread
+  // object in the frame.
+  Thread *thread = Thread::current();
+  if ((address)fr->fp() <=
+      thread->register_stack_base() HPUX_ONLY(+ 0x0) LINUX_ONLY(+ 0x50)) {
+    // This check is a little hacky, because on Linux the first C
+    // frame's ('start_thread') register stack frame starts at
+    // "register_stack_base + 0x48" while on HPUX, the first C frame's
+    // ('__pthread_bound_body') register stack frame seems to really
+    // start at "register_stack_base".
+    return true;
+  } else {
+    return false;
+  }
+#elif defined(IA64) && defined(_WIN32)
   return true;
-#endif
-
+#else
   // Load up sp, fp, sender sp and sender fp, check for reasonable values.
   // Check usp first, because if that's bad the other accessors may fault
   // on some architectures.  Ditto ufp second, etc.
@@ -1023,6 +1036,7 @@
   if (old_fp - ufp > 64 * K) return true;
 
   return false;
+#endif
 }
 
 #ifdef ASSERT
--- a/src/share/vm/runtime/safepoint.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/safepoint.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -52,6 +52,7 @@
 #include "services/memTracker.hpp"
 #include "services/runtimeService.hpp"
 #include "utilities/events.hpp"
+#include "utilities/macros.hpp"
 #ifdef TARGET_ARCH_x86
 # include "nativeInst_x86.hpp"
 # include "vmreg_x86.inline.hpp"
@@ -72,10 +73,10 @@
 # include "nativeInst_ppc.hpp"
 # include "vmreg_ppc.inline.hpp"
 #endif
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
 #include "gc_implementation/shared/concurrentGCThread.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
 #include "c1/c1_globals.hpp"
 #endif
@@ -103,7 +104,7 @@
     _ts_of_current_safepoint = tty->time_stamp().seconds();
   }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   if (UseConcMarkSweepGC) {
     // In the future we should investigate whether CMS can use the
     // more-general mechanism below.  DLD (01/05).
@@ -111,7 +112,7 @@
   } else if (UseG1GC) {
     ConcurrentGCThread::safepoint_synchronize();
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // By getting the Threads_lock, we assure that no threads are about to start or
   // exit. It is released again in SafepointSynchronize::end().
@@ -480,14 +481,14 @@
     Threads_lock->unlock();
 
   }
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // If there are any concurrent GC threads resume them.
   if (UseConcMarkSweepGC) {
     ConcurrentMarkSweepThread::desynchronize(false);
   } else if (UseG1GC) {
     ConcurrentGCThread::safepoint_desynchronize();
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
   // record this time so VMThread can keep track how much time has elasped
   // since last safepoint.
   _end_of_last_safepoint = os::javaTimeMillis();
--- a/src/share/vm/runtime/sharedRuntime.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -56,6 +56,7 @@
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
 #include "utilities/hashtable.inline.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/xmlstream.hpp"
 #ifdef TARGET_ARCH_x86
 # include "nativeInst_x86.hpp"
@@ -212,7 +213,7 @@
 }
 #endif // PRODUCT
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 
 // G1 write-barrier pre: executed before a pointer store.
 JRT_LEAF(void, SharedRuntime::g1_wb_pre(oopDesc* orig, JavaThread *thread))
@@ -230,7 +231,7 @@
   thread->dirty_card_queue().enqueue(card_addr);
 JRT_END
 
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 
 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
@@ -2816,10 +2817,6 @@
 
 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
 
-#ifdef IA64
-  ShouldNotReachHere(); // NYI
-#endif /* IA64 */
-
   //
   // This code is dependent on the memory layout of the interpreter local
   // array and the monitors. On all of our platforms the layout is identical
--- a/src/share/vm/runtime/sharedRuntime.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/sharedRuntime.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -32,6 +32,7 @@
 #include "memory/resourceArea.hpp"
 #include "runtime/threadLocalStorage.hpp"
 #include "utilities/hashtable.hpp"
+#include "utilities/macros.hpp"
 
 class AdapterHandlerEntry;
 class AdapterHandlerTable;
@@ -168,11 +169,11 @@
   static address raw_exception_handler_for_return_address(JavaThread* thread, address return_address);
   static address exception_handler_for_return_address(JavaThread* thread, address return_address);
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // G1 write barriers
   static void g1_wb_pre(oopDesc* orig, JavaThread *thread);
   static void g1_wb_post(void* card_addr, JavaThread* thread);
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // exception handling and implicit exceptions
   static address compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
--- a/src/share/vm/runtime/synchronizer.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/synchronizer.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -53,7 +53,7 @@
 # include "os_bsd.inline.hpp"
 #endif
 
-#if defined(__GNUC__) && !defined(IA64)
+#if defined(__GNUC__)
   // Need to inhibit inlining for older versions of GCC to avoid build-time failures
   #define ATTR __attribute__((noinline))
 #else
--- a/src/share/vm/runtime/thread.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/thread.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -82,6 +82,7 @@
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
 #include "utilities/preserveException.hpp"
+#include "utilities/macros.hpp"
 #ifdef TARGET_OS_FAMILY_linux
 # include "os_linux.inline.hpp"
 #endif
@@ -94,11 +95,11 @@
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
 #include "c1/c1_Compiler.hpp"
 #endif
@@ -1482,17 +1483,17 @@
   pd_initialize();
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 SATBMarkQueueSet JavaThread::_satb_mark_queue_set;
 DirtyCardQueueSet JavaThread::_dirty_card_queue_set;
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 JavaThread::JavaThread(bool is_attaching_via_jni) :
   Thread()
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   , _satb_mark_queue(&_satb_mark_queue_set),
   _dirty_card_queue(&_dirty_card_queue_set)
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 {
   initialize();
   if (is_attaching_via_jni) {
@@ -1500,7 +1501,7 @@
   } else {
     _jni_attach_state = _not_attaching_via_jni;
   }
-  assert(_deferred_card_mark.is_empty(), "Default MemRegion ctor");
+  assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
   _safepoint_visible = false;
 }
 
@@ -1547,10 +1548,10 @@
 
 JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
   Thread()
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   , _satb_mark_queue(&_satb_mark_queue_set),
   _dirty_card_queue(&_dirty_card_queue_set)
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 {
   if (TraceThreadEvents) {
     tty->print_cr("creating thread %p", this);
@@ -1896,19 +1897,26 @@
     JvmtiExport::cleanup_thread(this);
   }
 
-#ifndef SERIALGC
-  // We must flush G1-related buffers before removing a thread from
+  // We must flush any deferred card marks before removing a thread from
   // the list of active threads.
+  Universe::heap()->flush_deferred_store_barrier(this);
+  assert(deferred_card_mark().is_empty(), "Should have been flushed");
+
+#if INCLUDE_ALL_GCS
+  // We must flush the G1-related buffers before removing a thread
+  // from the list of active threads. We must do this after any deferred
+  // card marks have been flushed (above) so that any entries that are
+  // added to the thread's dirty card queue as a result are not lost.
   if (UseG1GC) {
     flush_barrier_queues();
   }
-#endif
+#endif // INCLUDE_ALL_GCS
 
   // Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread
   Threads::remove(this);
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 // Flush G1-related queues.
 void JavaThread::flush_barrier_queues() {
   satb_mark_queue().flush();
@@ -1936,7 +1944,7 @@
   // active field set to true.
   assert(dirty_queue.is_active(), "dirty card queue should be active");
 }
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 void JavaThread::cleanup_failed_attach_current_thread() {
   if (get_thread_profiler() != NULL) {
@@ -1964,11 +1972,11 @@
     tlab().make_parsable(true);  // retire TLAB, if any
   }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   if (UseG1GC) {
     flush_barrier_queues();
   }
-#endif
+#endif // INCLUDE_ALL_GCS
 
   Threads::remove(this);
   delete this;
@@ -3600,7 +3608,7 @@
     vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
   }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // Support for ConcurrentMarkSweep. This should be cleaned up
   // and better encapsulated. The ugly nested if test would go away
   // once things are properly refactored. XXX YSR
@@ -3614,7 +3622,7 @@
       vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
     }
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // Always call even when there are not JVMTI environments yet, since environments
   // may be attached late and JVMTI must track phases of VM execution
@@ -4187,7 +4195,7 @@
   }
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 // Used by ParallelScavenge
 void Threads::create_thread_roots_tasks(GCTaskQueue* q) {
   ALL_JAVA_THREADS(p) {
@@ -4203,7 +4211,7 @@
   }
   q->enqueue(new ThreadRootsMarkingTask(VMThread::vm_thread()));
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 void Threads::nmethods_do(CodeBlobClosure* cf) {
   ALL_JAVA_THREADS(p) {
@@ -4311,13 +4319,13 @@
                );
   st->cr();
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // Dump concurrent locks
   ConcurrentLocksDump concurrent_locks;
   if (print_concurrent_locks) {
     concurrent_locks.dump_at_safepoint();
   }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   ALL_JAVA_THREADS(p) {
     ResourceMark rm;
@@ -4330,11 +4338,11 @@
       }
     }
     st->cr();
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     if (print_concurrent_locks) {
       concurrent_locks.print_locks_on(p, st);
     }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
   }
 
   VMThread::vm_thread()->print_on(st);
--- a/src/share/vm/runtime/thread.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/thread.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -41,6 +41,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/threadLocalStorage.hpp"
 #include "runtime/unhandledOops.hpp"
+#include "utilities/macros.hpp"
 
 #if INCLUDE_NMT
 #include "services/memRecorder.hpp"
@@ -49,10 +50,10 @@
 #include "trace/tracing.hpp"
 #include "utilities/exceptions.hpp"
 #include "utilities/top.hpp"
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/dirtyCardQueue.hpp"
 #include "gc_implementation/g1/satbQueue.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 #ifdef ZERO
 #ifdef TARGET_ARCH_zero
 # include "stack_zero.hpp"
@@ -929,7 +930,7 @@
   }   _jmp_ring[ jump_ring_buffer_size ];
 #endif /* PRODUCT */
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // Support for G1 barriers
 
   ObjPtrQueue _satb_mark_queue;          // Thread-local log for SATB barrier.
@@ -941,7 +942,7 @@
   static DirtyCardQueueSet _dirty_card_queue_set;
 
   void flush_barrier_queues();
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   friend class VMThread;
   friend class ThreadWaitTransition;
@@ -1345,10 +1346,10 @@
     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
   }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   static ByteSize satb_mark_queue_offset()       { return byte_offset_of(JavaThread, _satb_mark_queue); }
   static ByteSize dirty_card_queue_offset()      { return byte_offset_of(JavaThread, _dirty_card_queue); }
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // Returns the jni environment for this thread
   JNIEnv* jni_environment()                      { return &_jni_environment; }
@@ -1637,7 +1638,7 @@
     _stack_size_at_create = value;
   }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // SATB marking queue support
   ObjPtrQueue& satb_mark_queue() { return _satb_mark_queue; }
   static SATBMarkQueueSet& satb_mark_queue_set() {
@@ -1649,7 +1650,7 @@
   static DirtyCardQueueSet& dirty_card_queue_set() {
     return _dirty_card_queue_set;
   }
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // This method initializes the SATB and dirty card queues before a
   // JavaThread is added to the Java thread list. Right now, we don't
@@ -1668,11 +1669,11 @@
   // might happen between the JavaThread constructor being called and the
   // thread being added to the Java thread list (an example of this is
   // when the structure for the DestroyJavaVM thread is created).
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   void initialize_queues();
-#else // !SERIALGC
+#else  // INCLUDE_ALL_GCS
   void initialize_queues() { }
-#endif // !SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   // Machine dependent stuff
 #ifdef TARGET_OS_ARCH_linux_x86
--- a/src/share/vm/runtime/vframeArray.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/vframeArray.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -233,8 +233,6 @@
       // Force early return from top frame after deoptimization
 #ifndef CC_INTERP
       pc = Interpreter::remove_activation_early_entry(state->earlyret_tos());
-#else
-     // TBD: Need to implement ForceEarlyReturn for CC_INTERP (ia64)
 #endif
     } else {
       // Possibly override the previous pc computation of the top (youngest) frame
--- a/src/share/vm/runtime/vmStructs.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/runtime/vmStructs.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -101,6 +101,7 @@
 #include "utilities/array.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/hashtable.hpp"
+#include "utilities/macros.hpp"
 #ifdef TARGET_ARCH_x86
 # include "vmStructs_x86.hpp"
 #endif
@@ -146,7 +147,7 @@
 #ifdef TARGET_OS_ARCH_bsd_zero
 # include "vmStructs_bsd_zero.hpp"
 #endif
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
@@ -161,7 +162,7 @@
 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
 #include "gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp"
 #include "gc_implementation/g1/vmStructs_g1.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 #ifdef COMPILER2
 #include "opto/addnode.hpp"
 #include "opto/block.hpp"
@@ -1161,6 +1162,7 @@
   static_field(Abstract_VM_Version,            _vm_major_version,                             int)                                   \
   static_field(Abstract_VM_Version,            _vm_minor_version,                             int)                                   \
   static_field(Abstract_VM_Version,            _vm_build_number,                              int)                                   \
+  static_field(Abstract_VM_Version,            _reserve_for_allocation_prefetch,              int)                                   \
                                                                                                                                      \
   static_field(JDK_Version,                    _current,                                      JDK_Version)                           \
   nonstatic_field(JDK_Version,                 _partially_initialized,                        bool)                                  \
@@ -2087,8 +2089,7 @@
   declare_toplevel_type(FreeBlockDictionary<Metablock>*)                  \
   declare_toplevel_type(FreeList<Metablock>*)                             \
   declare_toplevel_type(FreeList<Metablock>)                              \
-  declare_toplevel_type(MetablockTreeDictionary*)                         \
-           declare_type(MetablockTreeDictionary, FreeBlockDictionary<Metablock>)
+  declare_type(MetablockTreeDictionary, FreeBlockDictionary<Metablock>)
 
 
 //--------------------------------------------------------------------------------
@@ -2786,7 +2787,7 @@
              GENERATE_C1_UNCHECKED_STATIC_VM_STRUCT_ENTRY,
              GENERATE_C2_UNCHECKED_STATIC_VM_STRUCT_ENTRY)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   VM_STRUCTS_PARALLELGC(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
                         GENERATE_STATIC_VM_STRUCT_ENTRY)
 
@@ -2796,7 +2797,7 @@
 
   VM_STRUCTS_G1(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
                 GENERATE_STATIC_VM_STRUCT_ENTRY)
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   VM_STRUCTS_CPU(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
                  GENERATE_STATIC_VM_STRUCT_ENTRY,
@@ -2830,7 +2831,7 @@
            GENERATE_C2_VM_TYPE_ENTRY,
            GENERATE_C2_TOPLEVEL_VM_TYPE_ENTRY)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   VM_TYPES_PARALLELGC(GENERATE_VM_TYPE_ENTRY,
                       GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
 
@@ -2841,7 +2842,7 @@
 
   VM_TYPES_G1(GENERATE_VM_TYPE_ENTRY,
               GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   VM_TYPES_CPU(GENERATE_VM_TYPE_ENTRY,
                GENERATE_TOPLEVEL_VM_TYPE_ENTRY,
@@ -2872,11 +2873,11 @@
                    GENERATE_C2_VM_INT_CONSTANT_ENTRY,
                    GENERATE_C2_PREPROCESSOR_VM_INT_CONSTANT_ENTRY)
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   VM_INT_CONSTANTS_CMS(GENERATE_VM_INT_CONSTANT_ENTRY)
 
   VM_INT_CONSTANTS_PARNEW(GENERATE_VM_INT_CONSTANT_ENTRY)
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   VM_INT_CONSTANTS_CPU(GENERATE_VM_INT_CONSTANT_ENTRY,
                        GENERATE_PREPROCESSOR_VM_INT_CONSTANT_ENTRY,
@@ -2930,7 +2931,7 @@
              CHECK_NO_OP,
              CHECK_NO_OP);
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   VM_STRUCTS_PARALLELGC(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
              CHECK_STATIC_VM_STRUCT_ENTRY);
 
@@ -2940,7 +2941,7 @@
 
   VM_STRUCTS_G1(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
                 CHECK_STATIC_VM_STRUCT_ENTRY);
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   VM_STRUCTS_CPU(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
                  CHECK_STATIC_VM_STRUCT_ENTRY,
@@ -2969,7 +2970,7 @@
            CHECK_C2_VM_TYPE_ENTRY,
            CHECK_C2_TOPLEVEL_VM_TYPE_ENTRY);
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   VM_TYPES_PARALLELGC(CHECK_VM_TYPE_ENTRY,
                       CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
 
@@ -2980,7 +2981,7 @@
 
   VM_TYPES_G1(CHECK_VM_TYPE_ENTRY,
               CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
   VM_TYPES_CPU(CHECK_VM_TYPE_ENTRY,
                CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
@@ -3035,7 +3036,7 @@
                         ENSURE_C2_FIELD_TYPE_PRESENT,
                         CHECK_NO_OP,
                         CHECK_NO_OP));
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   debug_only(VM_STRUCTS_PARALLELGC(ENSURE_FIELD_TYPE_PRESENT,
                                    ENSURE_FIELD_TYPE_PRESENT));
   debug_only(VM_STRUCTS_CMS(ENSURE_FIELD_TYPE_PRESENT,
@@ -3043,7 +3044,7 @@
                             ENSURE_FIELD_TYPE_PRESENT));
   debug_only(VM_STRUCTS_G1(ENSURE_FIELD_TYPE_PRESENT,
                            ENSURE_FIELD_TYPE_PRESENT));
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
   debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT,
                             ENSURE_FIELD_TYPE_PRESENT,
                             CHECK_NO_OP,
--- a/src/share/vm/services/attachListener.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/services/attachListener.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -28,6 +28,7 @@
 #include "memory/allocation.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/ostream.hpp"
+#include "utilities/macros.hpp"
 
 // The AttachListener thread services a queue of operations that are enqueued
 // by client tools. Each operation is identified by a name and has up to 3
--- a/src/share/vm/services/classLoadingService.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/services/classLoadingService.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -31,6 +31,7 @@
 #include "services/classLoadingService.hpp"
 #include "services/memoryService.hpp"
 #include "utilities/dtrace.hpp"
+#include "utilities/macros.hpp"
 
 #ifdef DTRACE_ENABLED
 
--- a/src/share/vm/services/classLoadingService.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/services/classLoadingService.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -28,6 +28,7 @@
 #include "runtime/handles.hpp"
 #include "runtime/perfData.hpp"
 #include "utilities/growableArray.hpp"
+#include "utilities/macros.hpp"
 
 class InstanceKlass;
 
--- a/src/share/vm/services/diagnosticCommand.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/services/diagnosticCommand.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -30,6 +30,7 @@
 #include "services/diagnosticFramework.hpp"
 #include "services/heapDumper.hpp"
 #include "services/management.hpp"
+#include "utilities/macros.hpp"
 
 void DCmdRegistrant::register_dcmds(){
   // Registration of the diagnostic commands
--- a/src/share/vm/services/diagnosticCommand.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/services/diagnosticCommand.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -35,6 +35,7 @@
 #include "services/diagnosticCommand.hpp"
 #include "services/diagnosticFramework.hpp"
 #include "services/diagnosticCommand_ext.hpp"
+#include "utilities/macros.hpp"
 
 class HelpDCmd : public DCmdWithParser {
 protected:
--- a/src/share/vm/services/g1MemoryPool.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/services/g1MemoryPool.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -25,11 +25,12 @@
 #ifndef SHARE_VM_SERVICES_G1MEMORYPOOL_HPP
 #define SHARE_VM_SERVICES_G1MEMORYPOOL_HPP
 
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
 #include "services/memoryPool.hpp"
 #include "services/memoryUsage.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // This file contains the three classes that represent the memory
 // pools of the G1 spaces: G1EdenPool, G1SurvivorPool, and
--- a/src/share/vm/services/heapDumper.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/services/heapDumper.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -40,9 +40,10 @@
 #include "services/heapDumper.hpp"
 #include "services/threadService.hpp"
 #include "utilities/ostream.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 /*
  * HPROF binary format - description copied from:
--- a/src/share/vm/services/management.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/services/management.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -53,6 +53,7 @@
 #include "services/memoryService.hpp"
 #include "services/runtimeService.hpp"
 #include "services/threadService.hpp"
+#include "utilities/macros.hpp"
 
 PerfVariable* Management::_begin_vm_creation_time = NULL;
 PerfVariable* Management::_end_vm_creation_time = NULL;
--- a/src/share/vm/services/memReporter.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/services/memReporter.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -29,6 +29,7 @@
 #include "services/memBaseline.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/ostream.hpp"
+#include "utilities/macros.hpp"
 
 #if INCLUDE_NMT
 
--- a/src/share/vm/services/memoryPool.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/services/memoryPool.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -32,6 +32,7 @@
 #include "services/management.hpp"
 #include "services/memoryManager.hpp"
 #include "services/memoryPool.hpp"
+#include "utilities/macros.hpp"
 
 MemoryPool::MemoryPool(const char* name,
                        PoolType type,
@@ -208,7 +209,7 @@
   return MemoryUsage(initial_size(), used, committed, maxSize);
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 CompactibleFreeListSpacePool::CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
                                                            const char* name,
                                                            PoolType type,
@@ -225,7 +226,7 @@
 
   return MemoryUsage(initial_size(), used, committed, maxSize);
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 GenerationPool::GenerationPool(Generation* gen,
                                const char* name,
--- a/src/share/vm/services/memoryPool.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/services/memoryPool.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -30,9 +30,10 @@
 #include "memory/heap.hpp"
 #include "memory/space.hpp"
 #include "services/memoryUsage.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // A memory pool represents the memory area that the VM manages.
 // The Java virtual machine has at least one memory pool
@@ -185,7 +186,7 @@
   }
 };
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 class CompactibleFreeListSpacePool : public CollectedMemoryPool {
 private:
   CompactibleFreeListSpace* _space;
@@ -199,7 +200,7 @@
   MemoryUsage get_memory_usage();
   size_t used_in_bytes()            { return _space->used(); }
 };
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 
 class GenerationPool : public CollectedMemoryPool {
--- a/src/share/vm/services/memoryService.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/services/memoryService.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -43,7 +43,8 @@
 #include "services/memoryPool.hpp"
 #include "services/memoryService.hpp"
 #include "utilities/growableArray.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/parNew/parNewGeneration.hpp"
@@ -52,7 +53,7 @@
 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
 #include "services/g1MemoryPool.hpp"
 #include "services/psMemoryPool.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 GrowableArray<MemoryPool*>* MemoryService::_pools_list =
   new (ResourceObj::C_HEAP, mtInternal) GrowableArray<MemoryPool*>(init_pools_list_size, true);
@@ -83,7 +84,7 @@
       add_gen_collected_heap_info(GenCollectedHeap::heap());
       break;
     }
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     case CollectedHeap::ParallelScavengeHeap : {
       add_parallel_scavenge_heap_info(ParallelScavengeHeap::heap());
       break;
@@ -92,7 +93,7 @@
       add_g1_heap_info(G1CollectedHeap::heap());
       break;
     }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
     default: {
       guarantee(false, "Unrecognized kind of heap");
     }
@@ -130,22 +131,22 @@
       case Generation::DefNew:
         _minor_gc_manager = MemoryManager::get_copy_memory_manager();
         break;
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
       case Generation::ParNew:
       case Generation::ASParNew:
         _minor_gc_manager = MemoryManager::get_parnew_memory_manager();
         break;
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
       default:
         guarantee(false, "Unrecognized generation spec");
         break;
     }
     if (policy->is_mark_sweep_policy()) {
       _major_gc_manager = MemoryManager::get_msc_memory_manager();
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     } else if (policy->is_concurrent_mark_sweep_policy()) {
       _major_gc_manager = MemoryManager::get_cms_memory_manager();
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
     } else {
       guarantee(false, "Unknown two-gen policy");
     }
@@ -159,7 +160,7 @@
   add_generation_memory_pool(heap->get_gen(major), _major_gc_manager);
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 // Add memory pools for ParallelScavengeHeap
 // This function currently only supports two generations collected heap.
 // The collector for ParallelScavengeHeap will have two memory managers.
@@ -185,7 +186,7 @@
   add_g1YoungGen_memory_pool(g1h, _major_gc_manager, _minor_gc_manager);
   add_g1OldGen_memory_pool(g1h, _major_gc_manager);
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 MemoryPool* MemoryService::add_gen(Generation* gen,
                                    const char* name,
@@ -222,7 +223,7 @@
   return (MemoryPool*) pool;
 }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 MemoryPool* MemoryService::add_cms_space(CompactibleFreeListSpace* space,
                                          const char* name,
                                          bool is_heap,
@@ -233,7 +234,7 @@
   _pools_list->append(pool);
   return (MemoryPool*) pool;
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 // Add memory pool(s) for one generation
 void MemoryService::add_generation_memory_pool(Generation* gen,
@@ -261,7 +262,7 @@
       break;
     }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     case Generation::ParNew:
     case Generation::ASParNew:
     {
@@ -282,7 +283,7 @@
 
       break;
     }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
     case Generation::MarkSweepCompact: {
       assert(major_mgr != NULL && minor_mgr == NULL, "Should have only one manager");
@@ -293,7 +294,7 @@
       break;
     }
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     case Generation::ConcurrentMarkSweep:
     case Generation::ASConcurrentMarkSweep:
     {
@@ -306,7 +307,7 @@
                                        true  /* support_usage_threshold */);
       break;
     }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
     default:
       assert(false, "should not reach here");
@@ -326,7 +327,7 @@
 }
 
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 void MemoryService::add_psYoung_memory_pool(PSYoungGen* gen, MemoryManager* major_mgr, MemoryManager* minor_mgr) {
   assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
 
@@ -384,7 +385,7 @@
   mgr->add_pool(old_gen);
   _pools_list->append(old_gen);
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 void MemoryService::add_code_heap_memory_pool(CodeHeap* heap) {
   _code_heap_pool = new CodeHeapPool(heap,
@@ -534,17 +535,17 @@
 TraceMemoryManagerStats::TraceMemoryManagerStats(Generation::Name kind, GCCause::Cause cause) {
   switch (kind) {
     case Generation::DefNew:
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     case Generation::ParNew:
     case Generation::ASParNew:
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
       _fullGC=false;
       break;
     case Generation::MarkSweepCompact:
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     case Generation::ConcurrentMarkSweep:
     case Generation::ASConcurrentMarkSweep:
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
       _fullGC=true;
       break;
     default:
--- a/src/share/vm/services/psMemoryPool.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/services/psMemoryPool.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -25,7 +25,8 @@
 #ifndef SHARE_VM_SERVICES_PSMEMORYPOOL_HPP
 #define SHARE_VM_SERVICES_PSMEMORYPOOL_HPP
 
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
 #include "gc_implementation/shared/mutableSpace.hpp"
@@ -34,7 +35,7 @@
 #include "memory/space.hpp"
 #include "services/memoryPool.hpp"
 #include "services/memoryUsage.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 class PSGenerationPool : public CollectedMemoryPool {
 private:
--- a/src/share/vm/services/runtimeService.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/services/runtimeService.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -29,6 +29,7 @@
 #include "services/runtimeService.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/exceptions.hpp"
+#include "utilities/macros.hpp"
 
 #ifndef USDT2
 HS_DTRACE_PROBE_DECL(hs_private, safepoint__begin);
--- a/src/share/vm/utilities/macros.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/utilities/macros.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -130,23 +130,23 @@
 #endif // INCLUDE_MANAGEMENT
 
 /*
- * When INCLUDE_ALTERNATE_GCS is false the only garbage collectors
+ * When INCLUDE_ALL_GCS is false the only garbage collectors
  * included in the JVM are defaultNewGeneration and markCompact.
  *
- * When INCLUDE_ALTERNATE_GCS is true all garbage collectors are
+ * When INCLUDE_ALL_GCS is true all garbage collectors are
  * included in the JVM.
  */
-#ifndef INCLUDE_ALTERNATE_GCS
-#define INCLUDE_ALTERNATE_GCS 1
-#endif // INCLUDE_ALTERNATE_GCS
+#ifndef INCLUDE_ALL_GCS
+#define INCLUDE_ALL_GCS 1
+#endif // INCLUDE_ALL_GCS
 
-#if INCLUDE_ALTERNATE_GCS
-#define NOT_ALTERNATE_GCS_RETURN        /* next token must be ; */
-#define NOT_ALTERNATE_GCS_RETURN_(code) /* next token must be ; */
+#if INCLUDE_ALL_GCS
+#define NOT_ALL_GCS_RETURN        /* next token must be ; */
+#define NOT_ALL_GCS_RETURN_(code) /* next token must be ; */
 #else
-#define NOT_ALTERNATE_GCS_RETURN        {}
-#define NOT_ALTERNATE_GCS_RETURN_(code) { return code; }
-#endif // INCLUDE_ALTERNATE_GCS
+#define NOT_ALL_GCS_RETURN        {}
+#define NOT_ALL_GCS_RETURN_(code) { return code; }
+#endif // INCLUDE_ALL_GCS
 
 #ifndef INCLUDE_NMT
 #define INCLUDE_NMT 1
--- a/src/share/vm/utilities/top.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/utilities/top.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -33,9 +33,9 @@
 #include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
 #include "utilities/sizes.hpp"
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1_globals.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
 #include "c1/c1_globals.hpp"
 #endif
--- a/src/share/vm/utilities/vmError.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/utilities/vmError.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -917,7 +917,7 @@
         bool copy_ok =
           Arguments::copy_expand_pid(ErrorFile, strlen(ErrorFile), buffer, sizeof(buffer));
         if (copy_ok) {
-          fd = open(buffer, O_RDWR | O_CREAT | O_TRUNC, 0666);
+          fd = open(buffer, O_RDWR | O_CREAT | O_TRUNC, 0600);
         }
       }
 
@@ -928,7 +928,7 @@
         // so use the default name in the current directory
         jio_snprintf(&buffer[len], sizeof(buffer)-len, "%shs_err_pid%u.log",
                      os::file_separator(), os::current_process_id());
-        fd = open(buffer, O_RDWR | O_CREAT | O_TRUNC, 0666);
+        fd = open(buffer, O_RDWR | O_CREAT | O_TRUNC, 0600);
       }
 
       if (fd == -1) {
@@ -937,7 +937,7 @@
         if (tmpdir != NULL && tmpdir[0] != '\0') {
           jio_snprintf(buffer, sizeof(buffer), "%s%shs_err_pid%u.log",
                        tmpdir, os::file_separator(), os::current_process_id());
-          fd = open(buffer, O_RDWR | O_CREAT | O_TRUNC, 0666);
+          fd = open(buffer, O_RDWR | O_CREAT | O_TRUNC, 0600);
         }
       }
 
--- a/src/share/vm/utilities/yieldingWorkgroup.cpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/utilities/yieldingWorkgroup.cpp	Fri Feb 08 16:56:03 2013 -0800
@@ -23,9 +23,10 @@
  */
 
 #include "precompiled.hpp"
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "utilities/yieldingWorkgroup.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 // Forward declaration of classes declared here.
 
--- a/src/share/vm/utilities/yieldingWorkgroup.hpp	Fri Feb 08 13:55:41 2013 -0800
+++ b/src/share/vm/utilities/yieldingWorkgroup.hpp	Fri Feb 08 16:56:03 2013 -0800
@@ -25,9 +25,10 @@
 #ifndef SHARE_VM_UTILITIES_YIELDINGWORKGROUP_HPP
 #define SHARE_VM_UTILITIES_YIELDINGWORKGROUP_HPP
 
-#ifndef SERIALGC
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
 #include "utilities/workgroup.hpp"
-#endif
+#endif // INCLUDE_ALL_GCS
 
 
 // Forward declarations
--- a/test/compiler/8004741/Test8004741.java	Fri Feb 08 13:55:41 2013 -0800
+++ b/test/compiler/8004741/Test8004741.java	Fri Feb 08 16:56:03 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,70 +25,160 @@
  * @test Test8004741.java
  * @bug 8004741
  * @summary Missing compiled exception handle table entry for multidimensional array allocation
+ * @run main/othervm -Xmx64m -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -XX:+StressCompiledExceptionHandlers -XX:+SafepointALot -XX:GuaranteedSafepointInterval=100 Test8004741
  * @run main/othervm -Xmx64m -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -XX:+StressCompiledExceptionHandlers Test8004741
- *
  */
 
 import java.util.*;
 
 public class Test8004741 extends Thread {
 
+  static int passed = 0;
+
+  /**
+   * Loop forever allocating 2-d arrays.
+   * Catches and rethrows all exceptions; in the case of ThreadDeath, increments passed.
+   * Note that passed is incremented here because this is the exception handler with
+   * the smallest scope; we only want to declare success in the case where it is highly
+   * likely that the test condition
+   * (exception in 2-d array alloc interrupted by ThreadDeath)
+   * actually occurs.
+   */
   static int[][] test(int a, int b) throws Exception {
-    int[][] ar = null;
+    int[][] ar;
     try {
       ar = new int[a][b];
-    } catch (Error e) {
-      System.out.println("test got Error");
-      passed = true;
-      throw(e);
-    } catch (Exception e) {
-      System.out.println("test got Exception");
+    } catch (ThreadDeath e) {
+      System.out.println("test got ThreadDeath");
+      passed++;
       throw(e);
     }
     return ar;
   }
 
-  static boolean passed = false;
+  /* Cookbook wait-notify to track progress of test thread. */
+  Object progressLock = new Object();
+  private static final int NOT_STARTED = 0;
+  private static final int RUNNING = 1;
+  private static final int STOPPING = 2;
+
+  int progressState = NOT_STARTED;
 
-  public void run() {
-      System.out.println("test started");
-      try {
-        while(true) {
-          test(2,20000);
+  void toState(int state) {
+    synchronized (progressLock) {
+      progressState = state;
+      progressLock.notify();
+    }
+  }
+
+  void waitFor(int state) {
+    synchronized (progressLock) {
+      while (progressState < state) {
+        try {
+          progressLock.wait();
+        } catch (InterruptedException e) {
+          e.printStackTrace();
+          System.out.println("unexpected InterruptedException");
+          fail();
         }
-      } catch (ThreadDeath e) {
-        System.out.println("test got ThreadDeath");
-        passed = true;
-      } catch (Error e) {
-        e.printStackTrace();
-        System.out.println("test got Error");
-      } catch (Exception e) {
-        e.printStackTrace();
-        System.out.println("test got Exception");
+      }
+      if (progressState > state) {
+        System.out.println("unexpected test state change, expected " +
+                            state + " but saw " + progressState);
+        fail();
+      }
+    }
+  }
+
+  /**
+   * Loops running test until some sort of an exception or error,
+   * expects to see ThreadDeath.
+   */
+  public void run() {
+    try {
+      // Print before state change, so that other thread is most likely
+      // to see this thread executing calls to test() in a loop.
+      System.out.println("thread running");
+      toState(RUNNING);
+      while (true) {
+        // (2,2) (2,10) (2,100) were observed to tickle the bug;
+        test(2, 100);
       }
+    } catch (ThreadDeath e) {
+      // nothing to say, passing was incremented by the test.
+    } catch (Throwable e) {
+      e.printStackTrace();
+      System.out.println("unexpected Throwable " + e);
+      fail();
+    }
+    toState(STOPPING);
+  }
+
+  /**
+   * Runs a single trial of the test in a thread.
+   * No single trial is definitive, since the ThreadDeath
+   * exception might not land in the tested region of code.
+   */
+  public static void threadTest() throws InterruptedException {
+    Test8004741 t = new Test8004741();
+    t.start();
+    t.waitFor(RUNNING);
+    Thread.sleep(100);
+    System.out.println("stopping thread");
+    t.stop();
+    t.waitFor(STOPPING);
+    t.join();
   }
 
   public static void main(String[] args) throws Exception {
+    // Warm up "test"
+    // t will never be started.
     for (int n = 0; n < 11000; n++) {
-      test(2, 20);
+      test(2, 100);
+    }
+
+    // Will this sleep help ensure that the compiler is run?
+    Thread.sleep(500);
+    passed = 0;
+
+    try {
+      test(-1, 100);
+      System.out.println("Missing NegativeArraySizeException #1");
+      fail();
+    } catch ( java.lang.NegativeArraySizeException e ) {
+      System.out.println("Saw expected NegativeArraySizeException #1");
     }
 
-    // First test exception catch
-    Test8004741 t = new Test8004741();
+    try {
+      test(100, -1);
+      fail();
+      System.out.println("Missing NegativeArraySizeException #2");
+      fail();
+    } catch ( java.lang.NegativeArraySizeException e ) {
+      System.out.println("Saw expected NegativeArraySizeException #2");
+    }
 
-    passed = false;
-    t.start();
-    Thread.sleep(1000);
-    t.stop();
+    /* Test repetitions.  If the test succeeds-mostly, it succeeds,
+     * as long as it does not crash (the outcome if the exception range
+     * table entry for the array allocation is missing).
+     */
+    int N = 12;
+    for (int n = 0; n < N; n++) {
+      threadTest();
+    }
 
-    Thread.sleep(5000);
-    t.join();
-    if (passed) {
+    if (passed > N/2) {
+      System.out.println("Saw " + passed + " out of " + N + " possible ThreadDeath hits");
       System.out.println("PASSED");
     } else {
-      System.out.println("FAILED");
-      System.exit(97);
+      System.out.println("Too few ThreadDeath hits; expected at least " + N/2 +
+                         " but saw only " + passed);
+      fail();
     }
   }
 
+  static void fail() {
+    System.out.println("FAILED");
+    System.exit(97);
+  }
 };