changeset 1704:63f4675ac87d

Merge
author kevinw
date Sat, 31 Jul 2010 15:10:59 +0100
parents f6f3eef8a521 (current diff) 1890dc9151da (diff)
children 2d160770d2e5
files src/os/linux/vm/vtune_linux.cpp src/os/solaris/vm/vtune_solaris.cpp src/os/windows/vm/vtune_windows.cpp src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp src/share/vm/runtime/vtune.hpp
diffstat 188 files changed, 4058 insertions(+), 2688 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Fri Jul 30 22:43:50 2010 +0100
+++ b/.hgtags	Sat Jul 31 15:10:59 2010 +0100
@@ -98,3 +98,11 @@
 d38f45079fe98792a7381dbb4b64f5b589ec8c58 jdk7-b94
 8bfe9058ca4661779ac1d0572329f3943e68362e hs19-b01
 91d861ba858daca645993a1ab6ba2fa06a8f4a5b jdk7-b95
+573e8ea5fd68e8e51eb6308d283ac3b3889d15e0 jdk7-b96
+573e8ea5fd68e8e51eb6308d283ac3b3889d15e0 hs19-b02
+5f42499e57adc16380780f40541e1a66cd601891 jdk7-b97
+8a045b3f5c13eaad92ff4baf15ca671845fcad1a jdk7-b98
+6a236384a379642b5a2398e2819db9ab4e711e9b jdk7-b99
+ad1977f08c4d69162a0775fe3f9576b9fd521d10 jdk7-b100
+6c3a919105b68c15b7db923ec9a00006e9560910 jdk7-b101
+ad1977f08c4d69162a0775fe3f9576b9fd521d10 hs19-b03
--- a/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Sat Jul 31 15:10:59 2010 +0100
@@ -35,7 +35,6 @@
 
 public class NMethod extends CodeBlob {
   private static long          pcDescSize;
-  private static CIntegerField zombieInstructionSizeField;
   private static sun.jvm.hotspot.types.OopField methodField;
   /** != InvocationEntryBci if this nmethod is an on-stack replacement method */
   private static CIntegerField entryBCIField;
@@ -88,7 +87,6 @@
   private static void initialize(TypeDataBase db) {
     Type type = db.lookupType("nmethod");
 
-    zombieInstructionSizeField  = type.getCIntegerField("_zombie_instruction_size");
     methodField                 = type.getOopField("_method");
     entryBCIField               = type.getCIntegerField("_entry_bci");
     osrLinkField                = type.getAddressField("_osr_link");
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeDisassembler.java	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeDisassembler.java	Sat Jul 31 15:10:59 2010 +0100
@@ -72,6 +72,7 @@
       addBytecodeClass(Bytecodes._invokestatic, BytecodeInvoke.class);
       addBytecodeClass(Bytecodes._invokespecial, BytecodeInvoke.class);
       addBytecodeClass(Bytecodes._invokeinterface, BytecodeInvoke.class);
+      addBytecodeClass(Bytecodes._invokedynamic, BytecodeInvoke.class);
       addBytecodeClass(Bytecodes._jsr, BytecodeJsr.class);
       addBytecodeClass(Bytecodes._jsr_w, BytecodeJsrW.class);
       addBytecodeClass(Bytecodes._iload, BytecodeLoad.class);
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeInvoke.java	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeInvoke.java	Sat Jul 31 15:10:59 2010 +0100
@@ -54,15 +54,31 @@
   // returns the name of the invoked method
   public Symbol name() {
     ConstantPool cp = method().getConstants();
+    if (isInvokedynamic()) {
+       int[] nt = cp.getNameAndTypeAt(indexForFieldOrMethod());
+       return cp.getSymbolAt(nt[0]);
+    }
     return cp.getNameRefAt(index());
   }
 
   // returns the signature of the invoked method
   public Symbol signature() {
     ConstantPool cp = method().getConstants();
+    if (isInvokedynamic()) {
+       int[] nt = cp.getNameAndTypeAt(indexForFieldOrMethod());
+       return cp.getSymbolAt(nt[1]);
+    }
     return cp.getSignatureRefAt(index());
   }
 
+  public int getSecondaryIndex() {
+    if (isInvokedynamic()) {
+      // change byte-ordering of 4-byte integer
+      return VM.getVM().getBytes().swapInt(javaSignedWordAt(1));
+    }
+    return super.getSecondaryIndex();  // throw an error
+  }
+
   public Method getInvokedMethod() {
     return method().getConstants().getMethodRefAt(index());
   }
@@ -87,6 +103,7 @@
   public boolean isInvokevirtual()   { return adjustedInvokeCode() == Bytecodes._invokevirtual;   }
   public boolean isInvokestatic()    { return adjustedInvokeCode() == Bytecodes._invokestatic;    }
   public boolean isInvokespecial()   { return adjustedInvokeCode() == Bytecodes._invokespecial;   }
+  public boolean isInvokedynamic()   { return adjustedInvokeCode() == Bytecodes._invokedynamic; }
 
   public boolean isValid()           { return isInvokeinterface() ||
                                               isInvokevirtual()   ||
@@ -104,6 +121,11 @@
     buf.append(spaces);
     buf.append('#');
     buf.append(Integer.toString(indexForFieldOrMethod()));
+    if (isInvokedynamic()) {
+       buf.append('(');
+       buf.append(Integer.toString(getSecondaryIndex()));
+       buf.append(')');
+    }
     buf.append(" [Method ");
     StringBuffer sigBuf = new StringBuffer();
     new SignatureConverter(signature(), sigBuf).iterateReturntype();
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeLoadConstant.java	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeLoadConstant.java	Sat Jul 31 15:10:59 2010 +0100
@@ -25,6 +25,7 @@
 package sun.jvm.hotspot.interpreter;
 
 import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.runtime.*;
 import sun.jvm.hotspot.utilities.*;
 
 public class BytecodeLoadConstant extends BytecodeWithCPIndex {
@@ -32,10 +33,47 @@
     super(method, bci);
   }
 
+  public boolean hasCacheIndex() {
+    // normal ldc uses CP index, but fast_aldc uses swapped CP cache index
+    return javaCode() != code();
+  }
+
   public int index() {
-    return javaCode() == Bytecodes._ldc ?
+    int i = javaCode() == Bytecodes._ldc ?
                  (int) (0xFF & javaByteAt(1))
                : (int) (0xFFFF & javaShortAt(1));
+    if (hasCacheIndex()) {
+      return (0xFFFF & VM.getVM().getBytes().swapShort((short) i));
+    } else {
+      return i;
+    }
+  }
+
+  public int poolIndex() {
+    int i = index();
+    if (hasCacheIndex()) {
+      ConstantPoolCache cpCache = method().getConstants().getCache();
+      return cpCache.getEntryAt(i).getConstantPoolIndex();
+    } else {
+      return i;
+    }
+  }
+
+  public int cacheIndex() {
+    if (hasCacheIndex()) {
+      return index();
+    } else {
+      return -1;  // no cache index
+    }
+  }
+
+  private Oop getCachedConstant() {
+    int i = cacheIndex();
+    if (i >= 0) {
+      ConstantPoolCache cpCache = method().getConstants().getCache();
+      return cpCache.getEntryAt(i).getF1();
+    }
+    return null;
   }
 
   public void verify() {
@@ -58,6 +96,7 @@
        // has to be int or float or String or Klass
        return (ctag.isUnresolvedString() || ctag.isString()
                || ctag.isUnresolvedKlass() || ctag.isKlass()
+               || ctag.isMethodHandle() || ctag.isMethodType()
                || ctag.isInt() || ctag.isFloat())? true: false;
     }
   }
@@ -112,7 +151,7 @@
 
   public String getConstantValue() {
     ConstantPool cpool = method().getConstants();
-    int cpIndex = index();
+    int cpIndex = poolIndex();
     ConstantTag ctag = cpool.getTagAt(cpIndex);
     if (ctag.isInt()) {
        return "<int " + Integer.toString(cpool.getIntAt(cpIndex)) +">";
@@ -149,6 +188,18 @@
        } else {
           throw new RuntimeException("should not reach here");
        }
+    } else if (ctag.isMethodHandle() || ctag.isMethodType()) {
+       Oop x = getCachedConstant();
+       int refidx = cpool.getMethodHandleIndexAt(cpIndex);
+       int refkind = cpool.getMethodHandleRefKindAt(cpIndex);
+       return "<MethodHandle kind=" + Integer.toString(refkind) +
+           " ref=" + Integer.toString(refidx)
+           + (x == null ? "" : " @" + x.getHandle()) + ">";
+    } else if (ctag.isMethodType()) {
+       Oop x = getCachedConstant();
+       int refidx = cpool.getMethodTypeIndexAt(cpIndex);
+       return "<MethodType " + cpool.getSymbolAt(refidx).asString()
+           + (x == null ? "" : " @" + x.getHandle()) + ">";
     } else {
        if (Assert.ASSERTS_ENABLED) {
          Assert.that(false, "invalid load constant type");
@@ -162,7 +213,12 @@
     buf.append(getJavaBytecodeName());
     buf.append(spaces);
     buf.append('#');
-    buf.append(Integer.toString(index()));
+    buf.append(Integer.toString(poolIndex()));
+    if (hasCacheIndex()) {
+       buf.append('(');
+       buf.append(Integer.toString(cacheIndex()));
+       buf.append(')');
+    }
     buf.append(spaces);
     buf.append(getConstantValue());
     if (code() != javaCode()) {
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithCPIndex.java	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithCPIndex.java	Sat Jul 31 15:10:59 2010 +0100
@@ -37,12 +37,19 @@
   // the constant pool index for this bytecode
   public int index() { return 0xFFFF & javaShortAt(1); }
 
+  public int getSecondaryIndex() {
+     throw new IllegalArgumentException("must be invokedynamic");
+  }
+
   protected int indexForFieldOrMethod() {
      ConstantPoolCache cpCache = method().getConstants().getCache();
      // get ConstantPool index from ConstantPoolCacheIndex at given bci
      int cpCacheIndex = index();
      if (cpCache == null) {
         return cpCacheIndex;
+     } else if (code() == Bytecodes._invokedynamic) {
+        int secondaryIndex = getSecondaryIndex();
+        return cpCache.getMainEntryAt(secondaryIndex).getConstantPoolIndex();
      } else {
         // change byte-ordering and go via cache
         return cpCache.getEntryAt((int) (0xFFFF & VM.getVM().getBytes().swapShort((short) cpCacheIndex))).getConstantPoolIndex();
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java	Sat Jul 31 15:10:59 2010 +0100
@@ -222,7 +222,7 @@
   public static final int _invokespecial        = 183; // 0xb7
   public static final int _invokestatic         = 184; // 0xb8
   public static final int _invokeinterface      = 185; // 0xb9
-  public static final int _xxxunusedxxx         = 186; // 0xba
+  public static final int _invokedynamic        = 186; // 0xba
   public static final int _new                  = 187; // 0xbb
   public static final int _newarray             = 188; // 0xbc
   public static final int _anewarray            = 189; // 0xbd
@@ -269,9 +269,12 @@
   public static final int _fast_invokevfinal    = 226;
   public static final int _fast_linearswitch    = 227;
   public static final int _fast_binaryswitch    = 228;
-  public static final int _shouldnotreachhere   = 229; // For debugging
+  public static final int _fast_aldc            = 229;
+  public static final int _fast_aldc_w          = 230;
+  public static final int _return_register_finalizer = 231;
+  public static final int _shouldnotreachhere   = 232; // For debugging
 
-  public static final int number_of_codes       = 230;
+  public static final int number_of_codes       = 233;
 
   public static int specialLengthAt(Method method, int bci) {
     int code = codeAt(method, bci);
@@ -458,9 +461,9 @@
     def(_dconst_1            , "dconst_1"            , "b"    , null    , BasicType.getTDouble() ,  2, false);
     def(_bipush              , "bipush"              , "bc"   , null    , BasicType.getTInt()    ,  1, false);
     def(_sipush              , "sipush"              , "bcc"  , null    , BasicType.getTInt()    ,  1, false);
-    def(_ldc                 , "ldc"                 , "bi"   , null    , BasicType.getTIllegal(),  1, true );
-    def(_ldc_w               , "ldc_w"               , "bii"  , null    , BasicType.getTIllegal(),  1, true );
-    def(_ldc2_w              , "ldc2_w"              , "bii"  , null    , BasicType.getTIllegal(),  2, true );
+    def(_ldc                 , "ldc"                 , "bk"   , null    , BasicType.getTIllegal(),  1, true );
+    def(_ldc_w               , "ldc_w"               , "bkk"  , null    , BasicType.getTIllegal(),  1, true );
+    def(_ldc2_w              , "ldc2_w"              , "bkk"  , null    , BasicType.getTIllegal(),  2, true );
     def(_iload               , "iload"               , "bi"   , "wbii"  , BasicType.getTInt()    ,  1, false);
     def(_lload               , "lload"               , "bi"   , "wbii"  , BasicType.getTLong()   ,  2, false);
     def(_fload               , "fload"               , "bi"   , "wbii"  , BasicType.getTFloat()  ,  1, false);
@@ -618,26 +621,26 @@
     def(_dreturn             , "dreturn"             , "b"    , null    , BasicType.getTDouble() , -2, true );
     def(_areturn             , "areturn"             , "b"    , null    , BasicType.getTObject() , -1, true );
     def(_return              , "return"              , "b"    , null    , BasicType.getTVoid()   ,  0, true );
-    def(_getstatic           , "getstatic"           , "bjj"  , null    , BasicType.getTIllegal(),  1, true );
-    def(_putstatic           , "putstatic"           , "bjj"  , null    , BasicType.getTIllegal(), -1, true );
-    def(_getfield            , "getfield"            , "bjj"  , null    , BasicType.getTIllegal(),  0, true );
-    def(_putfield            , "putfield"            , "bjj"  , null    , BasicType.getTIllegal(), -2, true );
-    def(_invokevirtual       , "invokevirtual"       , "bjj"  , null    , BasicType.getTIllegal(), -1, true );
-    def(_invokespecial       , "invokespecial"       , "bjj"  , null    , BasicType.getTIllegal(), -1, true );
-    def(_invokestatic        , "invokestatic"        , "bjj"  , null    , BasicType.getTIllegal(),  0, true );
-    def(_invokeinterface     , "invokeinterface"     , "bjj__", null    , BasicType.getTIllegal(), -1, true );
-    def(_xxxunusedxxx        , "xxxunusedxxx"        , null   , null    , BasicType.getTVoid()   ,  0, false);
-    def(_new                 , "new"                 , "bii"  , null    , BasicType.getTObject() ,  1, true );
+    def(_getstatic           , "getstatic"           , "bJJ"  , null    , BasicType.getTIllegal(),  1, true );
+    def(_putstatic           , "putstatic"           , "bJJ"  , null    , BasicType.getTIllegal(), -1, true );
+    def(_getfield            , "getfield"            , "bJJ"  , null    , BasicType.getTIllegal(),  0, true );
+    def(_putfield            , "putfield"            , "bJJ"  , null    , BasicType.getTIllegal(), -2, true );
+    def(_invokevirtual       , "invokevirtual"       , "bJJ"  , null    , BasicType.getTIllegal(), -1, true );
+    def(_invokespecial       , "invokespecial"       , "bJJ"  , null    , BasicType.getTIllegal(), -1, true );
+    def(_invokestatic        , "invokestatic"        , "bJJ"  , null    , BasicType.getTIllegal(),  0, true );
+    def(_invokeinterface     , "invokeinterface"     , "bJJ__", null    , BasicType.getTIllegal(), -1, true );
+    def(_invokedynamic       , "invokedynamic"       , "bJJJJ", null    , BasicType.getTIllegal(), -1, true );
+    def(_new                 , "new"                 , "bkk"  , null    , BasicType.getTObject() ,  1, true );
     def(_newarray            , "newarray"            , "bc"   , null    , BasicType.getTObject() ,  0, true );
-    def(_anewarray           , "anewarray"           , "bii"  , null    , BasicType.getTObject() ,  0, true );
+    def(_anewarray           , "anewarray"           , "bkk"  , null    , BasicType.getTObject() ,  0, true );
     def(_arraylength         , "arraylength"         , "b"    , null    , BasicType.getTVoid()   ,  0, true );
     def(_athrow              , "athrow"              , "b"    , null    , BasicType.getTVoid()   , -1, true );
-    def(_checkcast           , "checkcast"           , "bii"  , null    , BasicType.getTObject() ,  0, true );
-    def(_instanceof          , "instanceof"          , "bii"  , null    , BasicType.getTInt()    ,  0, true );
+    def(_checkcast           , "checkcast"           , "bkk"  , null    , BasicType.getTObject() ,  0, true );
+    def(_instanceof          , "instanceof"          , "bkk"  , null    , BasicType.getTInt()    ,  0, true );
     def(_monitorenter        , "monitorenter"        , "b"    , null    , BasicType.getTVoid()   , -1, true );
     def(_monitorexit         , "monitorexit"         , "b"    , null    , BasicType.getTVoid()   , -1, true );
     def(_wide                , "wide"                , ""     , null    , BasicType.getTVoid()   ,  0, false);
-    def(_multianewarray      , "multianewarray"      , "biic" , null    , BasicType.getTObject() ,  1, true );
+    def(_multianewarray      , "multianewarray"      , "bkkc" , null    , BasicType.getTObject() ,  1, true );
     def(_ifnull              , "ifnull"              , "boo"  , null    , BasicType.getTVoid()   , -1, false);
     def(_ifnonnull           , "ifnonnull"           , "boo"  , null    , BasicType.getTVoid()   , -1, false);
     def(_goto_w              , "goto_w"              , "boooo", null    , BasicType.getTVoid()   ,  0, false);
@@ -646,38 +649,44 @@
 
     //  JVM bytecodes
     //  bytecode               bytecode name           format   wide f.   result tp               stk traps  std code
-    def(_fast_agetfield      , "fast_agetfield"      , "bjj"  , null    , BasicType.getTObject() ,  0, true , _getfield       );
-    def(_fast_bgetfield      , "fast_bgetfield"      , "bjj"  , null    , BasicType.getTInt()    ,  0, true , _getfield       );
-    def(_fast_cgetfield      , "fast_cgetfield"      , "bjj"  , null    , BasicType.getTChar()   ,  0, true , _getfield       );
-    def(_fast_dgetfield      , "fast_dgetfield"      , "bjj"  , null    , BasicType.getTDouble() ,  0, true , _getfield       );
-    def(_fast_fgetfield      , "fast_fgetfield"      , "bjj"  , null    , BasicType.getTFloat()  ,  0, true , _getfield       );
-    def(_fast_igetfield      , "fast_igetfield"      , "bjj"  , null    , BasicType.getTInt()    ,  0, true , _getfield       );
-    def(_fast_lgetfield      , "fast_lgetfield"      , "bjj"  , null    , BasicType.getTLong()   ,  0, true , _getfield       );
-    def(_fast_sgetfield      , "fast_sgetfield"      , "bjj"  , null    , BasicType.getTShort()  ,  0, true , _getfield       );
+    def(_fast_agetfield      , "fast_agetfield"      , "bJJ"  , null    , BasicType.getTObject() ,  0, true , _getfield       );
+    def(_fast_bgetfield      , "fast_bgetfield"      , "bJJ"  , null    , BasicType.getTInt()    ,  0, true , _getfield       );
+    def(_fast_cgetfield      , "fast_cgetfield"      , "bJJ"  , null    , BasicType.getTChar()   ,  0, true , _getfield       );
+    def(_fast_dgetfield      , "fast_dgetfield"      , "bJJ"  , null    , BasicType.getTDouble() ,  0, true , _getfield       );
+    def(_fast_fgetfield      , "fast_fgetfield"      , "bJJ"  , null    , BasicType.getTFloat()  ,  0, true , _getfield       );
+    def(_fast_igetfield      , "fast_igetfield"      , "bJJ"  , null    , BasicType.getTInt()    ,  0, true , _getfield       );
+    def(_fast_lgetfield      , "fast_lgetfield"      , "bJJ"  , null    , BasicType.getTLong()   ,  0, true , _getfield       );
+    def(_fast_sgetfield      , "fast_sgetfield"      , "bJJ"  , null    , BasicType.getTShort()  ,  0, true , _getfield       );
 
-    def(_fast_aputfield      , "fast_aputfield"      , "bjj"  , null    , BasicType.getTObject() ,  0, true , _putfield       );
-    def(_fast_bputfield      , "fast_bputfield"      , "bjj"  , null    , BasicType.getTInt()    ,  0, true , _putfield       );
-    def(_fast_cputfield      , "fast_cputfield"      , "bjj"  , null    , BasicType.getTChar()   ,  0, true , _putfield       );
-    def(_fast_dputfield      , "fast_dputfield"      , "bjj"  , null    , BasicType.getTDouble() ,  0, true , _putfield       );
-    def(_fast_fputfield      , "fast_fputfield"      , "bjj"  , null    , BasicType.getTFloat()  ,  0, true , _putfield       );
-    def(_fast_iputfield      , "fast_iputfield"      , "bjj"  , null    , BasicType.getTInt()    ,  0, true , _putfield       );
-    def(_fast_lputfield      , "fast_lputfield"      , "bjj"  , null    , BasicType.getTLong()   ,  0, true , _putfield       );
-    def(_fast_sputfield      , "fast_sputfield"      , "bjj"  , null    , BasicType.getTShort()  ,  0, true , _putfield       );
+    def(_fast_aputfield      , "fast_aputfield"      , "bJJ"  , null    , BasicType.getTObject() ,  0, true , _putfield       );
+    def(_fast_bputfield      , "fast_bputfield"      , "bJJ"  , null    , BasicType.getTInt()    ,  0, true , _putfield       );
+    def(_fast_cputfield      , "fast_cputfield"      , "bJJ"  , null    , BasicType.getTChar()   ,  0, true , _putfield       );
+    def(_fast_dputfield      , "fast_dputfield"      , "bJJ"  , null    , BasicType.getTDouble() ,  0, true , _putfield       );
+    def(_fast_fputfield      , "fast_fputfield"      , "bJJ"  , null    , BasicType.getTFloat()  ,  0, true , _putfield       );
+    def(_fast_iputfield      , "fast_iputfield"      , "bJJ"  , null    , BasicType.getTInt()    ,  0, true , _putfield       );
+    def(_fast_lputfield      , "fast_lputfield"      , "bJJ"  , null    , BasicType.getTLong()   ,  0, true , _putfield       );
+    def(_fast_sputfield      , "fast_sputfield"      , "bJJ"  , null    , BasicType.getTShort()  ,  0, true , _putfield       );
 
     def(_fast_aload_0        , "fast_aload_0"        , "b"    , null    , BasicType.getTObject() ,  1, true , _aload_0        );
-    def(_fast_iaccess_0      , "fast_iaccess_0"      , "b_jj" , null    , BasicType.getTInt()    ,  1, true , _aload_0        );
-    def(_fast_aaccess_0      , "fast_aaccess_0"      , "b_jj" , null    , BasicType.getTObject() ,  1, true , _aload_0        );
-    def(_fast_faccess_0      , "fast_faccess_0"      , "b_jj" , null    , BasicType.getTObject() ,  1, true , _aload_0        );
+    def(_fast_iaccess_0      , "fast_iaccess_0"      , "b_JJ" , null    , BasicType.getTInt()    ,  1, true , _aload_0        );
+    def(_fast_aaccess_0      , "fast_aaccess_0"      , "b_JJ" , null    , BasicType.getTObject() ,  1, true , _aload_0        );
+    def(_fast_faccess_0      , "fast_faccess_0"      , "b_JJ" , null    , BasicType.getTObject() ,  1, true , _aload_0        );
 
     def(_fast_iload          , "fast_iload"          , "bi"   , null    , BasicType.getTInt()    ,  1, false, _iload);
     def(_fast_iload2         , "fast_iload2"         , "bi_i" , null    , BasicType.getTInt()    ,  2, false, _iload);
     def(_fast_icaload        , "fast_icaload"        , "bi_"  , null    , BasicType.getTInt()    ,  0, false, _iload);
 
     // Faster method invocation.
-    def(_fast_invokevfinal   , "fast_invokevfinal"   , "bjj"  , null    , BasicType.getTIllegal(), -1, true, _invokevirtual);
+    def(_fast_invokevfinal   , "fast_invokevfinal"   , "bJJ"  , null    , BasicType.getTIllegal(), -1, true, _invokevirtual);
 
     def(_fast_linearswitch   , "fast_linearswitch"   , ""     , null    , BasicType.getTVoid()   , -1, false, _lookupswitch   );
     def(_fast_binaryswitch   , "fast_binaryswitch"   , ""     , null    , BasicType.getTVoid()   , -1, false, _lookupswitch   );
+
+    def(_return_register_finalizer, "return_register_finalizer", "b"    , null    , BasicType.getTVoid()   , 0, true, _return );
+
+    def(_fast_aldc           , "fast_aldc"           , "bj"   , null    , BasicType.getTObject(),   1, true,  _ldc   );
+    def(_fast_aldc_w         , "fast_aldc_w"         , "bJJ"  , null    , BasicType.getTObject(),   1, true,  _ldc_w );
+
     def(_shouldnotreachhere  , "_shouldnotreachhere" , "b"    , null    , BasicType.getTVoid()   ,  0, false);
 
     if (Assert.ASSERTS_ENABLED) {
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Sat Jul 31 15:10:59 2010 +0100
@@ -152,7 +152,7 @@
     return res;
   }
 
-  public int getNameAndTypeAt(int which) {
+  public int[] getNameAndTypeAt(int which) {
     if (Assert.ASSERTS_ENABLED) {
       Assert.that(getTagAt(which).isNameAndType(), "Corrupted constant pool");
     }
@@ -160,18 +160,16 @@
     if (DEBUG) {
       System.err.println("ConstantPool.getNameAndTypeAt(" + which + "): result = " + i);
     }
-    return i;
+    return new int[] { extractLowShortFromInt(i), extractHighShortFromInt(i) };
   }
 
   public Symbol getNameRefAt(int which) {
-    int refIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which));
-    int nameIndex = extractLowShortFromInt(refIndex);
+    int nameIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which))[0];
     return getSymbolAt(nameIndex);
   }
 
   public Symbol getSignatureRefAt(int which) {
-    int refIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which));
-    int sigIndex = extractHighShortFromInt(refIndex);
+    int sigIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which))[1];
     return getSymbolAt(sigIndex);
   }
 
@@ -220,11 +218,11 @@
 
   /** Lookup for entries consisting of (name_index, signature_index) */
   public int getNameRefIndexAt(int index) {
-    int refIndex = getNameAndTypeAt(index);
+    int[] refIndex = getNameAndTypeAt(index);
     if (DEBUG) {
-      System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): refIndex = " + refIndex);
+      System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): refIndex = " + refIndex[0]+"/"+refIndex[1]);
     }
-    int i = extractLowShortFromInt(refIndex);
+    int i = refIndex[0];
     if (DEBUG) {
       System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): result = " + i);
     }
@@ -233,17 +231,53 @@
 
   /** Lookup for entries consisting of (name_index, signature_index) */
   public int getSignatureRefIndexAt(int index) {
-    int refIndex = getNameAndTypeAt(index);
+    int[] refIndex = getNameAndTypeAt(index);
     if (DEBUG) {
-      System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): refIndex = " + refIndex);
+      System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): refIndex = " + refIndex[0]+"/"+refIndex[1]);
     }
-    int i = extractHighShortFromInt(refIndex);
+    int i = refIndex[1];
     if (DEBUG) {
       System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): result = " + i);
     }
     return i;
   }
 
+  /** Lookup for MethodHandle entries. */
+  public int getMethodHandleIndexAt(int i) {
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(getTagAt(i).isMethodHandle(), "Corrupted constant pool");
+    }
+    int res = extractHighShortFromInt(getIntAt(i));
+    if (DEBUG) {
+      System.err.println("ConstantPool.getMethodHandleIndexAt(" + i + "): result = " + res);
+    }
+    return res;
+  }
+
+  /** Lookup for MethodHandle entries. */
+  public int getMethodHandleRefKindAt(int i) {
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(getTagAt(i).isMethodHandle(), "Corrupted constant pool");
+    }
+    int res = extractLowShortFromInt(getIntAt(i));
+    if (DEBUG) {
+      System.err.println("ConstantPool.getMethodHandleRefKindAt(" + i + "): result = " + res);
+    }
+    return res;
+  }
+
+  /** Lookup for MethodType entries. */
+  public int getMethodTypeIndexAt(int i) {
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(getTagAt(i).isMethodType(), "Corrupted constant pool");
+    }
+    int res = getIntAt(i);
+    if (DEBUG) {
+      System.err.println("ConstantPool.getMethodHandleTypeAt(" + i + "): result = " + res);
+    }
+    return res;
+  }
+
   final private static String[] nameForTag = new String[] {
   };
 
@@ -261,6 +295,9 @@
     case JVM_CONSTANT_Methodref:          return "JVM_CONSTANT_Methodref";
     case JVM_CONSTANT_InterfaceMethodref: return "JVM_CONSTANT_InterfaceMethodref";
     case JVM_CONSTANT_NameAndType:        return "JVM_CONSTANT_NameAndType";
+    case JVM_CONSTANT_MethodHandle:       return "JVM_CONSTANT_MethodHandle";
+    case JVM_CONSTANT_MethodType:         return "JVM_CONSTANT_MethodType";
+    case JVM_CONSTANT_InvokeDynamic:      return "JVM_CONSTANT_InvokeDynamic";
     case JVM_CONSTANT_Invalid:            return "JVM_CONSTANT_Invalid";
     case JVM_CONSTANT_UnresolvedClass:    return "JVM_CONSTANT_UnresolvedClass";
     case JVM_CONSTANT_UnresolvedClassInError:    return "JVM_CONSTANT_UnresolvedClassInError";
@@ -317,6 +354,9 @@
         case JVM_CONSTANT_Methodref:
         case JVM_CONSTANT_InterfaceMethodref:
         case JVM_CONSTANT_NameAndType:
+        case JVM_CONSTANT_MethodHandle:
+        case JVM_CONSTANT_MethodType:
+        case JVM_CONSTANT_InvokeDynamic:
           visitor.doInt(new IntField(new NamedFieldIdentifier(nameForTag(ctag)), indexOffset(index), true), true);
           break;
         }
@@ -467,6 +507,30 @@
                                           + ", type = " + signatureIndex);
                   break;
               }
+
+              case JVM_CONSTANT_MethodHandle: {
+                  dos.writeByte(cpConstType);
+                  int value = getIntAt(ci);
+                  short nameIndex = (short) extractLowShortFromInt(value);
+                  short signatureIndex = (short) extractHighShortFromInt(value);
+                  dos.writeShort(nameIndex);
+                  dos.writeShort(signatureIndex);
+                  if (DEBUG) debugMessage("CP[" + ci + "] = N&T name = " + nameIndex
+                                          + ", type = " + signatureIndex);
+                  break;
+              }
+
+              case JVM_CONSTANT_InvokeDynamic: {
+                  dos.writeByte(cpConstType);
+                  int value = getIntAt(ci);
+                  short bootstrapMethodIndex = (short) extractLowShortFromInt(value);
+                  short nameAndTypeIndex = (short) extractHighShortFromInt(value);
+                  dos.writeShort(bootstrapMethodIndex);
+                  dos.writeShort(nameAndTypeIndex);
+                  if (DEBUG) debugMessage("CP[" + ci + "] = indy BSM = " + bootstrapMethodIndex
+                                          + ", N&T = " + nameAndTypeIndex);
+                  break;
+              }
               default:
                   throw new InternalError("unknown tag: " + cpConstType);
           } // switch
@@ -488,10 +552,12 @@
   //
 
   private static int extractHighShortFromInt(int val) {
+    // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc.
     return (val >> 16) & 0xFFFF;
   }
 
   private static int extractLowShortFromInt(int val) {
+    // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc.
     return val & 0xFFFF;
   }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java	Sat Jul 31 15:10:59 2010 +0100
@@ -78,6 +78,31 @@
     return new ConstantPoolCacheEntry(this, i);
   }
 
+  public static boolean isSecondaryIndex(int i)     { return (i < 0); }
+  public static int     decodeSecondaryIndex(int i) { return  isSecondaryIndex(i) ? ~i : i; }
+  public static int     encodeSecondaryIndex(int i) { return !isSecondaryIndex(i) ? ~i : i; }
+
+  // secondary entries hold invokedynamic call site bindings
+  public ConstantPoolCacheEntry getSecondaryEntryAt(int i) {
+    ConstantPoolCacheEntry e = new ConstantPoolCacheEntry(this, decodeSecondaryIndex(i));
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(e.isSecondaryEntry(), "must be a secondary entry");
+    }
+    return e;
+  }
+
+  public ConstantPoolCacheEntry getMainEntryAt(int i) {
+    if (isSecondaryIndex(i)) {
+      // run through an extra level of indirection:
+      i = getSecondaryEntryAt(i).getMainEntryIndex();
+    }
+    ConstantPoolCacheEntry e = new ConstantPoolCacheEntry(this, i);
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(!e.isSecondaryEntry(), "must not be a secondary entry");
+    }
+    return e;
+  }
+
   public int getIntAt(int entry, int fld) {
     //alignObjectSize ?
     long offset = baseOffset + /*alignObjectSize*/entry * elementSize + fld* getHeap().getIntSize();
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheEntry.java	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheEntry.java	Sat Jul 31 15:10:59 2010 +0100
@@ -28,6 +28,7 @@
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.runtime.*;
 import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
 
 public class ConstantPoolCacheEntry {
   private static long          size;
@@ -67,9 +68,23 @@
   }
 
   public int getConstantPoolIndex() {
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(!isSecondaryEntry(), "must not be a secondary CP entry");
+    }
     return (int) (getIndices() & 0xFFFF);
   }
 
+  public boolean isSecondaryEntry() {
+    return (getIndices() & 0xFFFF) == 0;
+  }
+
+  public int getMainEntryIndex() {
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(isSecondaryEntry(), "must be a secondary CP entry");
+    }
+    return (int) (getIndices() >>> 16);
+  }
+
   private long getIndices() {
       return cp.getHandle().getCIntegerAt(indices.getOffset() + offset, indices.getSize(), indices.isUnsigned());
   }
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java	Sat Jul 31 15:10:59 2010 +0100
@@ -566,6 +566,7 @@
       case Bytecodes._invokespecial:
       case Bytecodes._invokestatic:
       case Bytecodes._invokeinterface:
+      case Bytecodes._invokedynamic:
         // FIXME: print signature of referenced method (need more
         // accessors in ConstantPool and ConstantPoolCache)
         int idx = currentBC.getIndexBig();
@@ -605,6 +606,7 @@
       case Bytecodes._invokespecial:
       case Bytecodes._invokestatic:
       case Bytecodes._invokeinterface:
+      case Bytecodes._invokedynamic:
         // FIXME: print signature of referenced method (need more
         // accessors in ConstantPool and ConstantPoolCache)
         int idx = currentBC.getIndexBig();
@@ -1134,6 +1136,7 @@
       case Bytecodes._invokespecial:
       case Bytecodes._invokestatic:
       case Bytecodes._invokeinterface:
+      case Bytecodes._invokedynamic:
         _itr_send = itr;
         _report_result_for_send = true;
         break;
@@ -1379,6 +1382,7 @@
     case Bytecodes._invokevirtual:
     case Bytecodes._invokespecial:     doMethod(false, false, itr.getIndexBig(), itr.bci()); break;
     case Bytecodes._invokestatic:      doMethod(true,  false, itr.getIndexBig(), itr.bci()); break;
+    case Bytecodes._invokedynamic:     doMethod(false, true,  itr.getIndexBig(), itr.bci()); break;
     case Bytecodes._invokeinterface:   doMethod(false, true,  itr.getIndexBig(), itr.bci()); break;
     case Bytecodes._newarray:
     case Bytecodes._anewarray:         ppNewRef(vCTS, itr.bci()); break;
@@ -1725,7 +1729,7 @@
   void  doMethod                            (boolean is_static, boolean is_interface, int idx, int bci) {
     // Dig up signature for field in constant pool
     ConstantPool cp       = _method.getConstants();
-    int nameAndTypeIdx    = cp.getNameAndTypeRefIndexAt(idx);
+    int nameAndTypeIdx    = cp.getTagAt(idx).isNameAndType() ? idx : cp.getNameAndTypeRefIndexAt(idx);
     int signatureIdx      = cp.getSignatureRefIndexAt(nameAndTypeIdx);
     Symbol signature      = cp.getSymbolAt(signatureIdx);
 
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java	Sat Jul 31 15:10:59 2010 +0100
@@ -40,6 +40,20 @@
     public static final int JVM_CONSTANT_Methodref          = 10;
     public static final int JVM_CONSTANT_InterfaceMethodref = 11;
     public static final int JVM_CONSTANT_NameAndType        = 12;
+    public static final int JVM_CONSTANT_MethodHandle       = 15;
+    public static final int JVM_CONSTANT_MethodType         = 16;
+    public static final int JVM_CONSTANT_InvokeDynamic      = 17;
+
+    // JVM_CONSTANT_MethodHandle subtypes
+    public static final int JVM_REF_getField                = 1;
+    public static final int JVM_REF_getStatic               = 2;
+    public static final int JVM_REF_putField                = 3;
+    public static final int JVM_REF_putStatic               = 4;
+    public static final int JVM_REF_invokeVirtual           = 5;
+    public static final int JVM_REF_invokeStatic            = 6;
+    public static final int JVM_REF_invokeSpecial           = 7;
+    public static final int JVM_REF_newInvokeSpecial        = 8;
+    public static final int JVM_REF_invokeInterface         = 9;
 
     // HotSpot specific constant pool constant types.
 
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java	Sat Jul 31 15:10:59 2010 +0100
@@ -54,14 +54,34 @@
 
     }
 
-    protected short getConstantPoolIndex(int bci) {
+    protected short getConstantPoolIndex(int rawcode, int bci) {
        // get ConstantPool index from ConstantPoolCacheIndex at given bci
-       short cpCacheIndex = method.getBytecodeShortArg(bci);
+       String fmt = Bytecodes.format(rawcode);
+       int cpCacheIndex;
+       switch (fmt.length()) {
+       case 2: cpCacheIndex = method.getBytecodeByteArg(bci); break;
+       case 3: cpCacheIndex = method.getBytecodeShortArg(bci); break;
+       case 5:
+           if (fmt.indexOf("__") >= 0)
+               cpCacheIndex = method.getBytecodeShortArg(bci);
+           else
+               cpCacheIndex = method.getBytecodeIntArg(bci);
+           break;
+       default: throw new IllegalArgumentException();
+       }
        if (cpCache == null) {
-          return cpCacheIndex;
+          return (short) cpCacheIndex;
+       } else if (fmt.indexOf("JJJJ") >= 0) {
+          // change byte-ordering and go via secondary cache entry
+          return (short) cpCache.getMainEntryAt(bytes.swapInt(cpCacheIndex)).getConstantPoolIndex();
+       } else if (fmt.indexOf("JJ") >= 0) {
+          // change byte-ordering and go via cache
+          return (short) cpCache.getEntryAt((int) (0xFFFF & bytes.swapShort((short)cpCacheIndex))).getConstantPoolIndex();
+       } else if (fmt.indexOf("j") >= 0) {
+          // go via cache
+          return (short) cpCache.getEntryAt((int) (0xFF & cpCacheIndex)).getConstantPoolIndex();
        } else {
-          // change byte-ordering and go via cache
-          return (short) cpCache.getEntryAt((int) (0xFFFF & bytes.swapShort(cpCacheIndex))).getConstantPoolIndex();
+          return (short) cpCacheIndex;
        }
     }
 
@@ -100,10 +120,31 @@
                 case Bytecodes._invokespecial:
                 case Bytecodes._invokestatic:
                 case Bytecodes._invokeinterface: {
-                    cpoolIndex = getConstantPoolIndex(bci + 1);
+                    cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1);
                     writeShort(code, bci + 1, cpoolIndex);
                     break;
                 }
+
+                case Bytecodes._invokedynamic:
+                    cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1);
+                    writeShort(code, bci + 1, cpoolIndex);
+                    writeShort(code, bci + 3, (short)0);  // clear out trailing bytes
+                    break;
+
+                case Bytecodes._ldc_w:
+                    if (hotspotcode != bytecode) {
+                        // fast_aldc_w puts constant in CP cache
+                        cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1);
+                        writeShort(code, bci + 1, cpoolIndex);
+                    }
+                    break;
+                case Bytecodes._ldc:
+                    if (hotspotcode != bytecode) {
+                        // fast_aldc puts constant in CP cache
+                        cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1);
+                        code[bci + 1] = (byte)(cpoolIndex);
+                    }
+                    break;
             }
 
             len = Bytecodes.lengthFor(bytecode);
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java	Sat Jul 31 15:10:59 2010 +0100
@@ -61,10 +61,12 @@
     protected short  _signatureIndex;
 
     protected static int extractHighShortFromInt(int val) {
+        // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc.
         return (val >> 16) & 0xFFFF;
     }
 
     protected static int extractLowShortFromInt(int val) {
+        // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc.
         return val & 0xFFFF;
     }
 
@@ -297,6 +299,37 @@
                                         + ", type = " + signatureIndex);
                      break;
                 }
+
+                case JVM_CONSTANT_MethodHandle: {
+                     dos.writeByte(cpConstType);
+                     int value = cpool.getIntAt(ci);
+                     short bootstrapMethodIndex = (short) extractLowShortFromInt(value);
+                     short nameAndTypeIndex = (short) extractHighShortFromInt(value);
+                     dos.writeShort(bootstrapMethodIndex);
+                     dos.writeShort(nameAndTypeIndex);
+                     if (DEBUG) debugMessage("CP[" + ci + "] = indy BSM = " +
+                           bootstrapMethodIndex + ", N&T = " + nameAndTypeIndex);
+                     break;
+                }
+
+                case JVM_CONSTANT_MethodType: {
+                     dos.writeByte(cpConstType);
+                     int value = cpool.getIntAt(ci);
+                     short refIndex = (short) value;
+                     dos.writeShort(refIndex);
+                     if (DEBUG) debugMessage("CP[" + ci + "] = MT index = " + refIndex);
+                     break;
+                }
+
+                case JVM_CONSTANT_InvokeDynamic: {
+                     dos.writeByte(cpConstType);
+                     int value = cpool.getIntAt(ci);
+                     short refIndex = (short) value;
+                     dos.writeShort(refIndex);
+                     if (DEBUG) debugMessage("CP[" + ci + "] = MT index = " + refIndex);
+                     break;
+                }
+
                 default:
                   throw new InternalError("Unknown tag: " + cpConstType);
             } // switch
--- a/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Sat Jul 31 15:10:59 2010 +0100
@@ -572,6 +572,21 @@
                buf.cell(Integer.toString(cpool.getIntAt(index)));
                break;
 
+            case JVM_CONSTANT_MethodHandle:
+               buf.cell("JVM_CONSTANT_MethodHandle");
+               buf.cell(genLowHighShort(cpool.getIntAt(index)));
+               break;
+
+            case JVM_CONSTANT_MethodType:
+               buf.cell("JVM_CONSTANT_MethodType");
+               buf.cell(Integer.toString(cpool.getIntAt(index)));
+               break;
+
+            case JVM_CONSTANT_InvokeDynamic:
+               buf.cell("JVM_CONSTANT_InvokeDynamic");
+               buf.cell(genLowHighShort(cpool.getIntAt(index)));
+               break;
+
             default:
                throw new InternalError("unknown tag: " + ctag);
          }
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java	Sat Jul 31 15:10:59 2010 +0100
@@ -38,12 +38,27 @@
   private static int JVM_CONSTANT_Methodref               = 10;
   private static int JVM_CONSTANT_InterfaceMethodref      = 11;
   private static int JVM_CONSTANT_NameAndType             = 12;
+  private static int JVM_CONSTANT_MethodHandle            = 15;  // JSR 292
+  private static int JVM_CONSTANT_MethodType              = 16;  // JSR 292
+  private static int JVM_CONSTANT_InvokeDynamic           = 17;  // JSR 292
   private static int JVM_CONSTANT_Invalid                 = 0;   // For bad value initialization
   private static int JVM_CONSTANT_UnresolvedClass         = 100; // Temporary tag until actual use
   private static int JVM_CONSTANT_ClassIndex              = 101; // Temporary tag while constructing constant pool
   private static int JVM_CONSTANT_UnresolvedString        = 102; // Temporary tag until actual use
   private static int JVM_CONSTANT_StringIndex             = 103; // Temporary tag while constructing constant pool
   private static int JVM_CONSTANT_UnresolvedClassInError  = 104; // Resolution failed
+  private static int JVM_CONSTANT_Object                  = 105; // Required for BoundMethodHandle arguments.
+
+  // JVM_CONSTANT_MethodHandle subtypes //FIXME: connect these to data structure
+  private static int JVM_REF_getField                = 1;
+  private static int JVM_REF_getStatic               = 2;
+  private static int JVM_REF_putField                = 3;
+  private static int JVM_REF_putStatic               = 4;
+  private static int JVM_REF_invokeVirtual           = 5;
+  private static int JVM_REF_invokeStatic            = 6;
+  private static int JVM_REF_invokeSpecial           = 7;
+  private static int JVM_REF_newInvokeSpecial        = 8;
+  private static int JVM_REF_invokeInterface         = 9;
 
   private byte tag;
 
@@ -62,6 +77,9 @@
   public boolean isDouble()           { return tag == JVM_CONSTANT_Double; }
   public boolean isNameAndType()      { return tag == JVM_CONSTANT_NameAndType; }
   public boolean isUtf8()             { return tag == JVM_CONSTANT_Utf8; }
+  public boolean isMethodHandle()     { return tag == JVM_CONSTANT_MethodHandle; }
+  public boolean isMethodType()       { return tag == JVM_CONSTANT_MethodType; }
+  public boolean isInvokeDynamic()    { return tag == JVM_CONSTANT_InvokeDynamic; }
 
   public boolean isInvalid()          { return tag == JVM_CONSTANT_Invalid; }
 
@@ -73,6 +91,8 @@
   public boolean isUnresolvedString()       { return tag == JVM_CONSTANT_UnresolvedString; }
   public boolean isStringIndex()            { return tag == JVM_CONSTANT_StringIndex; }
 
+  public boolean isObject()                 { return tag == JVM_CONSTANT_Object; }
+
   public boolean isKlassReference()   { return isKlassIndex() || isUnresolvedKlass(); }
   public boolean isFieldOrMethod()    { return isField() || isMethod() || isInterfaceMethod(); }
   public boolean isSymbol()           { return isUtf8(); }
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Fri Jul 30 22:43:50 2010 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Sat Jul 31 15:10:59 2010 +0100
@@ -825,6 +825,8 @@
    }
    writeln("");
    disAsm.decode(new sapkg.interpreter.BytecodeVisitor() {
+                    prologue: function(method) { },
+                    epilogue: function() { },
                     visit: function(bytecode) {
                        if (hasLines) {
                           var line = method.getLineNumberFromBCI(bci);
--- a/make/hotspot_version	Fri Jul 30 22:43:50 2010 +0100
+++ b/make/hotspot_version	Sat Jul 31 15:10:59 2010 +0100
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=19
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=02
+HS_BUILD_NUMBER=04
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/make/linux/makefiles/adlc.make	Fri Jul 30 22:43:50 2010 +0100
+++ b/make/linux/makefiles/adlc.make	Sat Jul 31 15:10:59 2010 +0100
@@ -138,7 +138,11 @@
 
 # Normally, debugging is done directly on the ad_<arch>*.cpp files.
 # But -g will put #line directives in those files pointing back to <arch>.ad.
+# Some builds of gcc 3.2 have a bug that gets tickled by the extra #line directives
+# so skip it for 3.2 and ealier.
+ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
 ADLCFLAGS += -g
+endif
 
 ifdef LP64
 ADLCFLAGS += -D_LP64
--- a/make/linux/makefiles/sa.make	Fri Jul 30 22:43:50 2010 +0100
+++ b/make/linux/makefiles/sa.make	Sat Jul 31 15:10:59 2010 +0100
@@ -40,6 +40,9 @@
 # tools.jar is needed by the JDI - SA binding
 SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
 
+# TODO: if it's a modules image, check if SA module is installed.
+MODULELIB_PATH= $(BOOT_JAVA_HOME)/lib/modules
+
 # gnumake 3.78.1 does not accept the *s that
 # are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them
 AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1))
@@ -65,7 +68,7 @@
 	  echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
 	  exit 1; \
 	fi
-	$(QUIETLY) if [ ! -f $(SA_CLASSPATH) ] ; then \
+	$(QUIETLY) if [ ! -f $(SA_CLASSPATH) -a ! -d $(MODULELIB_PATH) ] ; then \
 	  echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\
 	  echo ""; \
 	  exit 1; \
--- a/make/solaris/makefiles/sa.make	Fri Jul 30 22:43:50 2010 +0100
+++ b/make/solaris/makefiles/sa.make	Sat Jul 31 15:10:59 2010 +0100
@@ -36,6 +36,9 @@
 # tools.jar is needed by the JDI - SA binding
 SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
 
+# TODO: if it's a modules image, check if SA module is installed.
+MODULELIB_PATH= $(BOOT_JAVA_HOME)/lib/modules
+
 # gnumake 3.78.1 does not accept the *s that
 # are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them
 AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1))
@@ -59,7 +62,7 @@
 	   echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
 	   exit 1; \
 	fi
-	$(QUIETLY) if [ ! -f $(SA_CLASSPATH) ] ; then \
+	$(QUIETLY) if [ ! -f $(SA_CLASSPATH) -a ! -d $(MODULELIB_PATH) ] ; then \
 	  echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\
 	  echo ""; \
 	  exit 1; \
--- a/make/windows/makefiles/defs.make	Fri Jul 30 22:43:50 2010 +0100
+++ b/make/windows/makefiles/defs.make	Sat Jul 31 15:10:59 2010 +0100
@@ -32,6 +32,17 @@
 PATH_SEP = ;
 
 # Need PLATFORM (os-arch combo names) for jdk and hotspot, plus libarch name
+ifeq ($(ARCH_DATA_MODEL),32)
+  ARCH_DATA_MODEL=32
+  PLATFORM=windows-i586
+  VM_PLATFORM=windows_i486
+  HS_ARCH=x86
+  MAKE_ARGS += ARCH=x86
+  MAKE_ARGS += BUILDARCH=i486
+  MAKE_ARGS += Platform_arch=x86
+  MAKE_ARGS += Platform_arch_model=x86_32
+endif
+
 ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) x86),)
   ARCH_DATA_MODEL=32
   PLATFORM=windows-i586
@@ -43,43 +54,57 @@
   MAKE_ARGS += Platform_arch_model=x86_32
 endif
 
-ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) ia64),)
-  ARCH_DATA_MODEL=64
-  PLATFORM=windows-ia64
-  VM_PLATFORM=windows_ia64
-  HS_ARCH=ia64
-  MAKE_ARGS += LP64=1
-  MAKE_ARGS += ARCH=ia64
-  MAKE_ARGS += BUILDARCH=ia64
-  MAKE_ARGS += Platform_arch=ia64
-  MAKE_ARGS += Platform_arch_model=ia64
-endif
+ifneq ($(ARCH_DATA_MODEL),32)
+  ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) ia64),)
+    ARCH_DATA_MODEL=64
+    PLATFORM=windows-ia64
+    VM_PLATFORM=windows_ia64
+    HS_ARCH=ia64
+    MAKE_ARGS += LP64=1
+    MAKE_ARGS += ARCH=ia64
+    MAKE_ARGS += BUILDARCH=ia64
+    MAKE_ARGS += Platform_arch=ia64
+    MAKE_ARGS += Platform_arch_model=ia64
+  endif
 
 # http://support.microsoft.com/kb/888731 : this can be either
 # AMD64 for AMD, or EM64T for Intel chips.
-ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) AMD64),)
-  ARCH_DATA_MODEL=64
-  PLATFORM=windows-amd64
-  VM_PLATFORM=windows_amd64
-  HS_ARCH=x86
-  MAKE_ARGS += LP64=1
-  MAKE_ARGS += ARCH=x86
-  MAKE_ARGS += BUILDARCH=amd64
-  MAKE_ARGS += Platform_arch=x86
-  MAKE_ARGS += Platform_arch_model=x86_64
-endif
+  ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) AMD64),)
+    ARCH_DATA_MODEL=64
+    PLATFORM=windows-amd64
+    VM_PLATFORM=windows_amd64
+    HS_ARCH=x86
+    MAKE_ARGS += LP64=1
+    MAKE_ARGS += ARCH=x86
+    MAKE_ARGS += BUILDARCH=amd64
+    MAKE_ARGS += Platform_arch=x86
+    MAKE_ARGS += Platform_arch_model=x86_64
+  endif
+
+ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) EM64T),)
+    ARCH_DATA_MODEL=64
+    PLATFORM=windows-amd64
+    VM_PLATFORM=windows_amd64
+    HS_ARCH=x86
+    MAKE_ARGS += LP64=1
+    MAKE_ARGS += ARCH=x86
+    MAKE_ARGS += BUILDARCH=amd64
+    MAKE_ARGS += Platform_arch=x86
+    MAKE_ARGS += Platform_arch_model=x86_64
+  endif
 
 # NB later OS versions than 2003 may report "Intel64"
-ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) "EM64T\|Intel64"),)
-  ARCH_DATA_MODEL=64
-  PLATFORM=windows-amd64
-  VM_PLATFORM=windows_amd64
-  HS_ARCH=x86
-  MAKE_ARGS += LP64=1
-  MAKE_ARGS += ARCH=x86
-  MAKE_ARGS += BUILDARCH=amd64
-  MAKE_ARGS += Platform_arch=x86
-  MAKE_ARGS += Platform_arch_model=x86_64
+  ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) Intel64),)
+    ARCH_DATA_MODEL=64
+    PLATFORM=windows-amd64
+    VM_PLATFORM=windows_amd64
+    HS_ARCH=x86
+    MAKE_ARGS += LP64=1
+    MAKE_ARGS += ARCH=x86
+    MAKE_ARGS += BUILDARCH=amd64
+    MAKE_ARGS += Platform_arch=x86
+    MAKE_ARGS += Platform_arch_model=x86_64
+  endif
 endif
 
 JDK_INCLUDE_SUBDIR=win32
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1007,9 +1007,9 @@
         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp));
       __ delayed()->cmp(to_from, byte_count);
       if (NOLp == NULL)
-        __ brx(Assembler::greaterEqual, false, Assembler::pt, no_overlap_target);
+        __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target);
       else
-        __ brx(Assembler::greaterEqual, false, Assembler::pt, (*NOLp));
+        __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp));
       __ delayed()->nop();
   }
 
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -318,6 +318,31 @@
   __ bind(exit);
 }
 
+// Fast path for caching oop constants.
+// %%% We should use this to handle Class and String constants also.
+// %%% It will simplify the ldc/primitive path considerably.
+void TemplateTable::fast_aldc(bool wide) {
+  transition(vtos, atos);
+
+  if (!EnableMethodHandles) {
+    // We should not encounter this bytecode if !EnableMethodHandles.
+    // The verifier will stop it.  However, if we get past the verifier,
+    // this will stop the thread in a reasonable way, without crashing the JVM.
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address,
+                     InterpreterRuntime::throw_IncompatibleClassChangeError));
+    // the call_VM checks for exception, so we should never return here.
+    __ should_not_reach_here();
+    return;
+  }
+
+  Register Rcache = G3_scratch;
+  Register Rscratch = G4_scratch;
+
+  resolve_cache_and_index(f1_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1));
+
+  __ verify_oop(Otos_i);
+}
+
 void TemplateTable::ldc2_w() {
   transition(vtos, vtos);
   Label retry, resolved, Long, exit;
@@ -1994,6 +2019,8 @@
     case Bytecodes::_invokestatic   : // fall through
     case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);  break;
     case Bytecodes::_invokedynamic  : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);  break;
+    case Bytecodes::_fast_aldc      : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);     break;
+    case Bytecodes::_fast_aldc_w    : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);     break;
     default                         : ShouldNotReachHere();                                 break;
   }
   // first time invocation - must resolve first
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -375,6 +375,32 @@
   __ bind(Done);
 }
 
+// Fast path for caching oop constants.
+// %%% We should use this to handle Class and String constants also.
+// %%% It will simplify the ldc/primitive path considerably.
+void TemplateTable::fast_aldc(bool wide) {
+  transition(vtos, atos);
+
+  if (!EnableMethodHandles) {
+    // We should not encounter this bytecode if !EnableMethodHandles.
+    // The verifier will stop it.  However, if we get past the verifier,
+    // this will stop the thread in a reasonable way, without crashing the JVM.
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address,
+                     InterpreterRuntime::throw_IncompatibleClassChangeError));
+    // the call_VM checks for exception, so we should never return here.
+    __ should_not_reach_here();
+    return;
+  }
+
+  const Register cache = rcx;
+  const Register index = rdx;
+
+  resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
+  if (VerifyOops) {
+    __ verify_oop(rax);
+  }
+}
+
 void TemplateTable::ldc2_w() {
   transition(vtos, vtos);
   Label Long, Done;
@@ -2055,6 +2081,8 @@
     case Bytecodes::_invokestatic   : // fall through
     case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);  break;
     case Bytecodes::_invokedynamic  : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
+    case Bytecodes::_fast_aldc      : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);     break;
+    case Bytecodes::_fast_aldc_w    : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);     break;
     default                         : ShouldNotReachHere();                                 break;
   }
   __ movl(temp, (int)bytecode());
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -389,6 +389,32 @@
   __ bind(Done);
 }
 
+// Fast path for caching oop constants.
+// %%% We should use this to handle Class and String constants also.
+// %%% It will simplify the ldc/primitive path considerably.
+void TemplateTable::fast_aldc(bool wide) {
+  transition(vtos, atos);
+
+  if (!EnableMethodHandles) {
+    // We should not encounter this bytecode if !EnableMethodHandles.
+    // The verifier will stop it.  However, if we get past the verifier,
+    // this will stop the thread in a reasonable way, without crashing the JVM.
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address,
+                     InterpreterRuntime::throw_IncompatibleClassChangeError));
+    // the call_VM checks for exception, so we should never return here.
+    __ should_not_reach_here();
+    return;
+  }
+
+  const Register cache = rcx;
+  const Register index = rdx;
+
+  resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
+  if (VerifyOops) {
+    __ verify_oop(rax);
+  }
+}
+
 void TemplateTable::ldc2_w() {
   transition(vtos, vtos);
   Label Long, Done;
@@ -2063,6 +2089,12 @@
   case Bytecodes::_invokedynamic:
     entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
     break;
+  case Bytecodes::_fast_aldc:
+    entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
+    break;
+  case Bytecodes::_fast_aldc_w:
+    entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
+    break;
   default:
     ShouldNotReachHere();
     break;
--- a/src/cpu/x86/vm/vm_version_x86.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/cpu/x86/vm/vm_version_x86.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
 VM_Version::CpuidInfo VM_Version::_cpuid_info   = { 0, };
 
 static BufferBlob* stub_blob;
-static const int stub_size = 300;
+static const int stub_size = 400;
 
 extern "C" {
   typedef void (*getPsrInfo_stub_t)(void*);
@@ -56,7 +56,7 @@
     const uint32_t CPU_FAMILY_386   = (3 << CPU_FAMILY_SHIFT);
     const uint32_t CPU_FAMILY_486   = (4 << CPU_FAMILY_SHIFT);
 
-    Label detect_486, cpu486, detect_586, std_cpuid1;
+    Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
     Label ext_cpuid1, ext_cpuid5, done;
 
     StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
@@ -131,13 +131,62 @@
     __ movl(Address(rsi, 8), rcx);
     __ movl(Address(rsi,12), rdx);
 
-    __ cmpl(rax, 3);     // Is cpuid(0x4) supported?
-    __ jccb(Assembler::belowEqual, std_cpuid1);
+    __ cmpl(rax, 0xa);                  // Is cpuid(0xB) supported?
+    __ jccb(Assembler::belowEqual, std_cpuid4);
+
+    //
+    // cpuid(0xB) Processor Topology
+    //
+    __ movl(rax, 0xb);
+    __ xorl(rcx, rcx);   // Threads level
+    __ cpuid();
+
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset())));
+    __ movl(Address(rsi, 0), rax);
+    __ movl(Address(rsi, 4), rbx);
+    __ movl(Address(rsi, 8), rcx);
+    __ movl(Address(rsi,12), rdx);
+
+    __ movl(rax, 0xb);
+    __ movl(rcx, 1);     // Cores level
+    __ cpuid();
+    __ push(rax);
+    __ andl(rax, 0x1f);  // Determine if valid topology level
+    __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
+    __ andl(rax, 0xffff);
+    __ pop(rax);
+    __ jccb(Assembler::equal, std_cpuid4);
+
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset())));
+    __ movl(Address(rsi, 0), rax);
+    __ movl(Address(rsi, 4), rbx);
+    __ movl(Address(rsi, 8), rcx);
+    __ movl(Address(rsi,12), rdx);
+
+    __ movl(rax, 0xb);
+    __ movl(rcx, 2);     // Packages level
+    __ cpuid();
+    __ push(rax);
+    __ andl(rax, 0x1f);  // Determine if valid topology level
+    __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
+    __ andl(rax, 0xffff);
+    __ pop(rax);
+    __ jccb(Assembler::equal, std_cpuid4);
+
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset())));
+    __ movl(Address(rsi, 0), rax);
+    __ movl(Address(rsi, 4), rbx);
+    __ movl(Address(rsi, 8), rcx);
+    __ movl(Address(rsi,12), rdx);
 
     //
     // cpuid(0x4) Deterministic cache params
     //
+    __ bind(std_cpuid4);
     __ movl(rax, 4);
+    __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported?
+    __ jccb(Assembler::greater, std_cpuid1);
+
     __ xorl(rcx, rcx);   // L1 cache
     __ cpuid();
     __ push(rax);
@@ -460,13 +509,18 @@
   AllocatePrefetchDistance = allocate_prefetch_distance();
   AllocatePrefetchStyle    = allocate_prefetch_style();
 
-  if( AllocatePrefetchStyle == 2 && is_intel() &&
-      cpu_family() == 6 && supports_sse3() ) { // watermark prefetching on Core
+  if( is_intel() && cpu_family() == 6 && supports_sse3() ) {
+    if( AllocatePrefetchStyle == 2 ) { // watermark prefetching on Core
 #ifdef _LP64
-    AllocatePrefetchDistance = 384;
+      AllocatePrefetchDistance = 384;
 #else
-    AllocatePrefetchDistance = 320;
+      AllocatePrefetchDistance = 320;
 #endif
+    }
+    if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus
+      AllocatePrefetchDistance = 192;
+      AllocatePrefetchLines = 4;
+    }
   }
   assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");
 
--- a/src/cpu/x86/vm/vm_version_x86.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/cpu/x86/vm/vm_version_x86.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -114,6 +114,14 @@
     } bits;
   };
 
+  union TplCpuidBEbx {
+    uint32_t value;
+    struct {
+      uint32_t logical_cpus : 16,
+                            : 16;
+    } bits;
+  };
+
   union ExtCpuid1Ecx {
     uint32_t value;
     struct {
@@ -211,6 +219,25 @@
     uint32_t     dcp_cpuid4_ecx; // unused currently
     uint32_t     dcp_cpuid4_edx; // unused currently
 
+    // cpuid function 0xB (processor topology)
+    // ecx = 0
+    uint32_t     tpl_cpuidB0_eax;
+    TplCpuidBEbx tpl_cpuidB0_ebx;
+    uint32_t     tpl_cpuidB0_ecx; // unused currently
+    uint32_t     tpl_cpuidB0_edx; // unused currently
+
+    // ecx = 1
+    uint32_t     tpl_cpuidB1_eax;
+    TplCpuidBEbx tpl_cpuidB1_ebx;
+    uint32_t     tpl_cpuidB1_ecx; // unused currently
+    uint32_t     tpl_cpuidB1_edx; // unused currently
+
+    // ecx = 2
+    uint32_t     tpl_cpuidB2_eax;
+    TplCpuidBEbx tpl_cpuidB2_ebx;
+    uint32_t     tpl_cpuidB2_ecx; // unused currently
+    uint32_t     tpl_cpuidB2_edx; // unused currently
+
     // cpuid function 0x80000000 // example, unused
     uint32_t ext_max_function;
     uint32_t ext_vendor_name_0;
@@ -316,6 +343,9 @@
   static ByteSize ext_cpuid1_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1_eax); }
   static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); }
   static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); }
+  static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); }
+  static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); }
+  static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); }
 
   // Initialization
   static void initialize();
@@ -346,10 +376,22 @@
   static bool is_amd()            { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA'
   static bool is_intel()          { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG'
 
+  static bool supports_processor_topology() {
+    return (_cpuid_info.std_max_function >= 0xB) &&
+           // eax[4:0] | ebx[0:15] == 0 indicates invalid topology level.
+           // Some cpus have max cpuid >= 0xB but do not support processor topology.
+           ((_cpuid_info.tpl_cpuidB0_eax & 0x1f | _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus) != 0);
+  }
+
   static uint cores_per_cpu()  {
     uint result = 1;
     if (is_intel()) {
-      result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
+      if (supports_processor_topology()) {
+        result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
+                 _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
+      } else {
+        result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
+      }
     } else if (is_amd()) {
       result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1);
     }
@@ -358,7 +400,9 @@
 
   static uint threads_per_core()  {
     uint result = 1;
-    if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
+    if (is_intel() && supports_processor_topology()) {
+      result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
+    } else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
       result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu /
                cores_per_cpu();
     }
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -820,7 +820,7 @@
                                            bool      is_top_frame) {
   assert(popframe_extra_args == 0, "what to do?");
   assert(!is_top_frame || (!callee_locals && !callee_param_count),
-         "top frame should have no caller")
+         "top frame should have no caller");
 
   // This code must exactly match what InterpreterFrame::build
   // does (the full InterpreterFrame::build, that is, not the
--- a/src/os/linux/vm/os_linux.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/os/linux/vm/os_linux.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -2079,9 +2079,9 @@
 static char saved_jvm_path[MAXPATHLEN] = {0};
 
 // Find the full path to the current module, libjvm.so or libjvm_g.so
-void os::jvm_path(char *buf, jint len) {
+void os::jvm_path(char *buf, jint buflen) {
   // Error checking.
-  if (len < MAXPATHLEN) {
+  if (buflen < MAXPATHLEN) {
     assert(false, "must use a large-enough buffer");
     buf[0] = '\0';
     return;
@@ -2117,6 +2117,9 @@
       // Look for JAVA_HOME in the environment.
       char* java_home_var = ::getenv("JAVA_HOME");
       if (java_home_var != NULL && java_home_var[0] != 0) {
+        char* jrelib_p;
+        int len;
+
         // Check the current module name "libjvm.so" or "libjvm_g.so".
         p = strrchr(buf, '/');
         assert(strstr(p, "/libjvm") == p, "invalid library name");
@@ -2124,14 +2127,24 @@
 
         if (realpath(java_home_var, buf) == NULL)
           return;
-        sprintf(buf + strlen(buf), "/jre/lib/%s", cpu_arch);
+
+        // determine if this is a legacy image or modules image
+        // modules image doesn't have "jre" subdirectory
+        len = strlen(buf);
+        jrelib_p = buf + len;
+        snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
+        if (0 != access(buf, F_OK)) {
+          snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
+        }
+
         if (0 == access(buf, F_OK)) {
           // Use current module name "libjvm[_g].so" instead of
           // "libjvm"debug_only("_g")".so" since for fastdebug version
           // we should have "libjvm.so" but debug_only("_g") adds "_g"!
           // It is used when we are choosing the HPI library's name
           // "libhpi[_g].so" in hpi::initialize_get_interface().
-          sprintf(buf + strlen(buf), "/hotspot/libjvm%s.so", p);
+          len = strlen(buf);
+          snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
         } else {
           // Go back to path of .so
           if (realpath(dli_fname, buf) == NULL)
--- a/src/os/linux/vm/vtune_linux.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "incls/_precompiled.incl"
-#include "incls/_vtune_linux.cpp.incl"
-
-// empty implementation
-
-void VTune::start_GC() {}
-void VTune::end_GC() {}
-void VTune::start_class_load() {}
-void VTune::end_class_load() {}
-void VTune::exit() {}
-void VTune::register_stub(const char* name, address start, address end) {}
-
-void VTune::create_nmethod(nmethod* nm) {}
-void VTune::delete_nmethod(nmethod* nm) {}
-
-void vtune_init() {}
-
-
-// Reconciliation History
-// vtune_solaris.cpp    1.8 99/07/12 23:54:21
-// End
--- a/src/os/solaris/vm/osThread_solaris.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/os/solaris/vm/osThread_solaris.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -123,7 +123,7 @@
 
   int set_interrupt_callback    (Sync_Interrupt_Callback * cb);
   void remove_interrupt_callback(Sync_Interrupt_Callback * cb);
-  void OSThread::do_interrupt_callbacks_at_interrupt(InterruptArguments *args);
+  void do_interrupt_callbacks_at_interrupt(InterruptArguments *args);
 
  // ***************************************************************
  // java.lang.Thread.interrupt state.
--- a/src/os/solaris/vm/os_solaris.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/os/solaris/vm/os_solaris.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -2435,6 +2435,8 @@
       char* java_home_var = ::getenv("JAVA_HOME");
       if (java_home_var != NULL && java_home_var[0] != 0) {
         char cpu_arch[12];
+        char* jrelib_p;
+        int   len;
         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 #ifdef _LP64
         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
@@ -2450,14 +2452,23 @@
         p = strstr(p, "_g") ? "_g" : "";
 
         realpath(java_home_var, buf);
-        sprintf(buf + strlen(buf), "/jre/lib/%s", cpu_arch);
+        // determine if this is a legacy image or modules image
+        // modules image doesn't have "jre" subdirectory
+        len = strlen(buf);
+        jrelib_p = buf + len;
+        snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
+        if (0 != access(buf, F_OK)) {
+          snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
+        }
+
         if (0 == access(buf, F_OK)) {
           // Use current module name "libjvm[_g].so" instead of
           // "libjvm"debug_only("_g")".so" since for fastdebug version
           // we should have "libjvm.so" but debug_only("_g") adds "_g"!
           // It is used when we are choosing the HPI library's name
           // "libhpi[_g].so" in hpi::initialize_get_interface().
-          sprintf(buf + strlen(buf), "/hotspot/libjvm%s.so", p);
+          len = strlen(buf);
+          snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
         } else {
           // Go back to path of .so
           realpath((char *)dlinfo.dli_fname, buf);
--- a/src/os/solaris/vm/vtune_solaris.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "incls/_precompiled.incl"
-#include "incls/_vtune_solaris.cpp.incl"
-
-// empty implementation
-
-void VTune::start_GC() {}
-void VTune::end_GC() {}
-void VTune::start_class_load() {}
-void VTune::end_class_load() {}
-void VTune::exit() {}
-void VTune::register_stub(const char* name, address start, address end) {}
-
-void VTune::create_nmethod(nmethod* nm) {}
-void VTune::delete_nmethod(nmethod* nm) {}
-
-void vtune_init() {}
--- a/src/os/windows/vm/vtune_windows.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,290 +0,0 @@
-/*
- * Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "incls/_precompiled.incl"
-#include "incls/_vtune_windows.cpp.incl"
-
-static int current_method_ID = 0;
-
-// ------------- iJITProf.h -------------------
-// defined by Intel -- do not change
-
-#include "windows.h"
-
-extern "C" {
-  enum iJITP_Event {
-    ExceptionOccurred_S,                  // Java exception
-    ExceptionOccurred_IDS,
-
-    Shutdown,                             // VM exit
-
-    ThreadCreate,                         // threads
-    ThreadDestroy,
-    ThreadSwitch,
-
-    ClassLoadStart,                       // class loading
-    ClassLoadEnd,
-
-    GCStart,                              // GC
-    GCEnd,
-
-    NMethodCreate = 13,                   // nmethod creation
-    NMethodDelete
-
-    // rest of event types omitted (call profiling not supported yet)
-  };
-
-  // version number -- 0 if VTune not installed
-  int WINAPI iJitP_VersionNumber();
-
-  enum iJITP_ModeFlags {
-    NoNotification = 0x0,                // don't call vtune
-    NotifyNMethodCreate   = 0x1,         // notify NMethod_Create
-    NotifyNMethodDelete   = 0x2,         // notify NMethod_Create
-    NotifyMethodEnter     = 0x4,         // method entry
-    NotifyMethodExit      = 0x8,         // method exit
-    NotifyShutdown        = 0x10,        // VM exit
-    NotifyGC              = 0x20,        // GC
-  };
-
-  // call back function type
-  typedef void (WINAPI *ModeChangedFn)(iJITP_ModeFlags flags);
-
-  // -------------  VTune method interfaces ----------------------
-  typedef void  (WINAPI *RegisterCallbackFn)(ModeChangedFn fn);   // register callback
-  typedef int   (WINAPI *NotifyEventFn)(iJITP_Event, void* event_data);
-
-  // specific event data structures
-
-  // data for NMethodCreate
-
-  struct VTuneObj {                       // base class for allocation
-                                          // (can't use CHeapObj -- has vtable ptr)
-    void* operator new(size_t size) { return os::malloc(size); }
-    void  operator delete(void* p)  { fatal("never delete VTune data"); }
-  };
-
-  struct LineNumberInfo : VTuneObj {      // PC-to-line number mapping
-    unsigned long offset;                 // byte offset from start of method
-    unsigned long line_num;               // corresponding line number
-  };
-
-  struct MethodLoadInfo : VTuneObj {
-    unsigned long methodID;               // unique method ID
-    const char* name;                     // method name
-    unsigned long instr_start;            // start address
-    unsigned long instr_size;             // length in bytes
-    unsigned long line_number_size;       // size of line number table
-    LineNumberInfo* line_number_table;    // line number mapping
-    unsigned long classID;                // unique class ID
-    char* class_file_name;                // fully qualified class file name
-    char* source_file_name;               // fully qualified source file name
-
-    MethodLoadInfo(nmethod* nm);          // for real nmethods
-    MethodLoadInfo(const char* vm_name, address start, address end);
-                                          // for "nmethods" like stubs, interpreter, etc
-
-  };
-
-  // data for NMethodDelete
-  struct MethodInfo : VTuneObj {
-    unsigned long methodID;               // unique method ID
-    unsigned long classID;                // (added for convenience -- not part of Intel interface)
-
-    MethodInfo(methodOop m);
-  };
-};
-
-MethodInfo::MethodInfo(methodOop m) {
-  // just give it a new ID -- we're not compiling methods twice (usually)
-  // (and even if we did, one might want to see the two versions separately)
-  methodID = ++current_method_ID;
-}
-
-MethodLoadInfo::MethodLoadInfo(const char* vm_name, address start, address end) {
-  classID  = 0;
-  methodID = ++current_method_ID;
-  name = vm_name;
-  instr_start = (unsigned long)start;
-  instr_size = end - start;
-  line_number_size = 0;
-  line_number_table = NULL;
-  class_file_name = source_file_name = "HotSpot JVM";
-}
-
-MethodLoadInfo::MethodLoadInfo(nmethod* nm) {
-  methodOop m = nm->method();
-  MethodInfo info(m);
-  classID  = info.classID;
-  methodID = info.methodID;
-  name = strdup(m->name()->as_C_string());
-  instr_start = (unsigned long)nm->instructions_begin();
-  instr_size = nm->code_size();
-  line_number_size = 0;
-  line_number_table = NULL;
-  klassOop kl = m->method_holder();
-  char* class_name = Klass::cast(kl)->name()->as_C_string();
-  char* file_name = NEW_C_HEAP_ARRAY(char, strlen(class_name) + 1);
-  strcpy(file_name, class_name);
-  class_file_name = file_name;
-  char* src_name = NEW_C_HEAP_ARRAY(char, strlen(class_name) + strlen(".java") + 1);
-  strcpy(src_name, class_name);
-  strcat(src_name, ".java");
-  source_file_name = src_name;
-}
-
-// --------------------- DLL loading functions ------------------------
-
-#define DLLNAME "iJitProf.dll"
-
-static HINSTANCE load_lib(char* name) {
-  HINSTANCE lib = NULL;
-  HKEY hk;
-
-  // try to get VTune directory from the registry
-  if (RegOpenKey(HKEY_CURRENT_USER, "Software\\VB and VBA Program Settings\\VTune\\StartUp", &hk) == ERROR_SUCCESS) {
-    for (int i = 0; true; i++) {
-      char szName[MAX_PATH + 1];
-      char szVal [MAX_PATH + 1];
-      DWORD cbName, cbVal;
-
-      cbName = cbVal = MAX_PATH + 1;
-      if (RegEnumValue(hk, i, szName, &cbName, NULL, NULL, (LPBYTE)szVal, &cbVal) == ERROR_SUCCESS) {
-        // get VTune directory
-        if (!strcmp(szName, name)) {
-          char*p = szVal;
-          while (*p == ' ') p++;    // trim
-          char* q = p + strlen(p) - 1;
-          while (*q == ' ') *(q--) = '\0';
-
-          // chdir to the VTune dir
-          GetCurrentDirectory(MAX_PATH + 1, szName);
-          SetCurrentDirectory(p);
-          // load lib
-          lib = LoadLibrary(strcat(strcat(p, "\\"), DLLNAME));
-          if (lib != NULL && WizardMode) tty->print_cr("*loaded VTune DLL %s", p);
-          // restore current dir
-          SetCurrentDirectory(szName);
-          break;
-        }
-      } else {
-        break;
-      }
-    }
-  }
-  return lib;
-}
-
-static RegisterCallbackFn iJIT_RegisterCallback = NULL;
-static NotifyEventFn      iJIT_NotifyEvent      = NULL;
-
-static bool load_iJIT_funcs() {
-  // first try to load from PATH
-  HINSTANCE lib = LoadLibrary(DLLNAME);
-  if (lib != NULL && WizardMode) tty->print_cr("*loaded VTune DLL %s via PATH", DLLNAME);
-
-  // if not successful, try to look in the VTUNE directory
-  if (lib == NULL) lib = load_lib("VTUNEDIR30");
-  if (lib == NULL) lib = load_lib("VTUNEDIR25");
-  if (lib == NULL) lib = load_lib("VTUNEDIR");
-
-  if (lib == NULL) return false;    // unsuccessful
-
-  // try to load the functions
-  iJIT_RegisterCallback = (RegisterCallbackFn)GetProcAddress(lib, "iJIT_RegisterCallback");
-  iJIT_NotifyEvent      = (NotifyEventFn)     GetProcAddress(lib, "iJIT_NotifyEvent");
-
-  if (!iJIT_RegisterCallback) tty->print_cr("*couldn't find VTune entry point iJIT_RegisterCallback");
-  if (!iJIT_NotifyEvent)      tty->print_cr("*couldn't find VTune entry point iJIT_NotifyEvent");
-  return iJIT_RegisterCallback != NULL && iJIT_NotifyEvent != NULL;
-}
-
-// --------------------- VTune class ------------------------
-
-static bool active = false;
-static int  flags  = 0;
-
-void VTune::start_GC() {
-  if (active && (flags & NotifyGC)) iJIT_NotifyEvent(GCStart, NULL);
-}
-
-void VTune::end_GC() {
-  if (active && (flags & NotifyGC)) iJIT_NotifyEvent(GCEnd, NULL);
-}
-
-void VTune::start_class_load() {
-  // not yet implemented in VTune
-}
-
-void VTune::end_class_load() {
-  // not yet implemented in VTune
-}
-
-void VTune::exit() {
-  if (active && (flags & NotifyShutdown)) iJIT_NotifyEvent(Shutdown, NULL);
-}
-
-void VTune::register_stub(const char* name, address start, address end) {
-  if (flags & NotifyNMethodCreate) {
-    MethodLoadInfo* info = new MethodLoadInfo(name, start, end);
-    if (PrintMiscellaneous && WizardMode && Verbose) {
-      tty->print_cr("NMethodCreate %s (%d): %#x..%#x", info->name, info->methodID,
-                    info->instr_start, info->instr_start + info->instr_size);
-    }
-    iJIT_NotifyEvent(NMethodCreate, info);
-  }
-}
-
-void VTune::create_nmethod(nmethod* nm) {
-  if (flags & NotifyNMethodCreate) {
-    MethodLoadInfo* info = new MethodLoadInfo(nm);
-    if (PrintMiscellaneous && WizardMode && Verbose) {
-      tty->print_cr("NMethodCreate %s (%d): %#x..%#x", info->name, info->methodID,
-                    info->instr_start, info->instr_start + info->instr_size);
-    }
-    iJIT_NotifyEvent(NMethodCreate, info);
-  }
-}
-
-void VTune::delete_nmethod(nmethod* nm) {
-  if (flags & NotifyNMethodDelete) {
-    MethodInfo* info = new MethodInfo(nm->method());
-    iJIT_NotifyEvent(NMethodDelete, info);
-  }
-}
-
-static void set_flags(int new_flags) {
-  flags = new_flags;
-  // if (WizardMode) tty->print_cr("*new VTune flags: %#x", flags);
-}
-
-void vtune_init() {
-  if (!UseVTune) return;
-  active = load_iJIT_funcs();
-  if (active) {
-    iJIT_RegisterCallback((ModeChangedFn)set_flags);
-  } else {
-    assert(flags == 0, "flags shouldn't be set");
-  }
-}
--- a/src/os_cpu/linux_x86/vm/copy_linux_x86.inline.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/os_cpu/linux_x86/vm/copy_linux_x86.inline.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -26,7 +26,7 @@
 #ifdef AMD64
   (void)memmove(to, from, count * HeapWordSize);
 #else
-  // Same as pd_aligned_conjoint_words, except includes a zero-count check.
+  // Includes a zero-count check.
   intx temp;
   __asm__ volatile("        testl   %6,%6         ;"
                    "        jz      7f            ;"
@@ -84,7 +84,7 @@
     break;
   }
 #else
-  // Same as pd_aligned_disjoint_words, except includes a zero-count check.
+  // Includes a zero-count check.
   intx temp;
   __asm__ volatile("        testl   %6,%6       ;"
                    "        jz      3f          ;"
@@ -130,75 +130,18 @@
 }
 
 static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
-#ifdef AMD64
-  (void)memmove(to, from, count * HeapWordSize);
-#else
-  // Same as pd_conjoint_words, except no zero-count check.
-  intx temp;
-  __asm__ volatile("        cmpl    %4,%5         ;"
-                   "        leal    -4(%4,%6,4),%3;"
-                   "        jbe     1f            ;"
-                   "        cmpl    %7,%5         ;"
-                   "        jbe     4f            ;"
-                   "1:      cmpl    $32,%6        ;"
-                   "        ja      3f            ;"
-                   "        subl    %4,%1         ;"
-                   "2:      movl    (%4),%3       ;"
-                   "        movl    %7,(%5,%4,1)  ;"
-                   "        addl    $4,%0         ;"
-                   "        subl    $1,%2          ;"
-                   "        jnz     2b            ;"
-                   "        jmp     7f            ;"
-                   "3:      rep;    smovl         ;"
-                   "        jmp     7f            ;"
-                   "4:      cmpl    $32,%2        ;"
-                   "        movl    %7,%0         ;"
-                   "        leal    -4(%5,%6,4),%1;"
-                   "        ja      6f            ;"
-                   "        subl    %4,%1         ;"
-                   "5:      movl    (%4),%3       ;"
-                   "        movl    %7,(%5,%4,1)  ;"
-                   "        subl    $4,%0         ;"
-                   "        subl    $1,%2          ;"
-                   "        jnz     5b            ;"
-                   "        jmp     7f            ;"
-                   "6:      std                   ;"
-                   "        rep;    smovl         ;"
-                   "        cld                   ;"
-                   "7:      nop                    "
-                   : "=S" (from), "=D" (to), "=c" (count), "=r" (temp)
-                   : "0"  (from), "1"  (to), "2"  (count), "3"  (temp)
-                   : "memory", "flags");
-#endif // AMD64
+  pd_conjoint_words(from, to, count);
 }
 
 static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
-#ifdef AMD64
   pd_disjoint_words(from, to, count);
-#else
-  // Same as pd_disjoint_words, except no zero-count check.
-  intx temp;
-  __asm__ volatile("        cmpl    $32,%6      ;"
-                   "        ja      2f          ;"
-                   "        subl    %4,%1       ;"
-                   "1:      movl    (%4),%3     ;"
-                   "        movl    %7,(%5,%4,1);"
-                   "        addl    $4,%0       ;"
-                   "        subl    $1,%2        ;"
-                   "        jnz     1b          ;"
-                   "        jmp     3f          ;"
-                   "2:      rep;    smovl       ;"
-                   "3:      nop                  "
-                   : "=S" (from), "=D" (to), "=c" (count), "=r" (temp)
-                   : "0"  (from), "1"  (to), "2"  (count), "3"  (temp)
-                   : "memory", "cc");
-#endif // AMD64
 }
 
 static void pd_conjoint_bytes(void* from, void* to, size_t count) {
 #ifdef AMD64
   (void)memmove(to, from, count);
 #else
+  // Includes a zero-count check.
   intx temp;
   __asm__ volatile("        testl   %6,%6          ;"
                    "        jz      13f            ;"
--- a/src/os_cpu/linux_x86/vm/linux_x86_32.s	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/os_cpu/linux_x86/vm/linux_x86_32.s	Sat Jul 31 15:10:59 2010 +0100
@@ -121,10 +121,10 @@
         jnz      3b
         addl     %esi,%edi
 4:      movl     %eax,%ecx            # byte count less prefix
-        andl     $3,%ecx              # suffix byte count
+5:      andl     $3,%ecx              # suffix byte count
         jz       7f                   # no suffix
         # copy suffix
-5:      xorl     %eax,%eax
+        xorl     %eax,%eax
 6:      movb     (%esi,%eax,1),%dl
         movb     %dl,(%edi,%eax,1)
         addl     $1,%eax
@@ -159,10 +159,10 @@
         # copy dwords, aligned or not
 3:      rep;     smovl
 4:      movl     %eax,%ecx            # byte count
-        andl     $3,%ecx              # suffix byte count
+5:      andl     $3,%ecx              # suffix byte count
         jz       7f                   # no suffix
         # copy suffix
-5:      subl     %esi,%edi
+        subl     %esi,%edi
         addl     $3,%esi
 6:      movb     (%esi),%dl
         movb     %dl,(%edi,%esi,1)
@@ -214,10 +214,10 @@
         # copy aligned dwords
 3:      rep;     smovl
 4:      movl     %eax,%ecx
-        andl     $3,%ecx
+5:      andl     $3,%ecx
         jz       7f
         # copy suffix
-5:      xorl     %eax,%eax
+        xorl     %eax,%eax
 6:      movb     (%esi,%eax,1),%dl
         movb     %dl,(%edi,%eax,1)
         addl     $1,%eax
@@ -250,9 +250,9 @@
         jnz      3b
         addl     %esi,%edi
 4:      movl     %eax,%ecx
-        andl     $3,%ecx
+5:      andl     $3,%ecx
         jz       7f
-5:      subl     %esi,%edi
+        subl     %esi,%edi
         addl     $3,%esi
 6:      movb     (%esi),%dl
         movb     %dl,(%edi,%esi,1)
@@ -287,11 +287,12 @@
         andl     $3,%eax              # either 0 or 2
         jz       1f                   # no prefix
         # copy prefix
+        subl     $1,%ecx
+        jl       5f                   # zero count
         movw     (%esi),%dx
         movw     %dx,(%edi)
         addl     %eax,%esi            # %eax == 2
         addl     %eax,%edi
-        subl     $1,%ecx
 1:      movl     %ecx,%eax            # word count less prefix
         sarl     %ecx                 # dword count
         jz       4f                   # no dwords to move
@@ -454,12 +455,13 @@
         ret
         .=.+10
 2:      subl     %esi,%edi
+        jmp      4f
         .p2align 4,,15
 3:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        subl     $1,%ecx
-        jnz      3b
+4:      subl     $1,%ecx
+        jge      3b
         popl     %edi
         popl     %esi
         ret
@@ -467,19 +469,20 @@
         std
         leal     -4(%edi,%ecx,4),%edi # to + count*4 - 4
         cmpl     $32,%ecx
-        ja       3f                   # > 32 dwords
+        ja       4f                   # > 32 dwords
         subl     %eax,%edi            # eax == from + count*4 - 4
+        jmp      3f
         .p2align 4,,15
 2:      movl     (%eax),%edx
         movl     %edx,(%edi,%eax,1)
         subl     $4,%eax
-        subl     $1,%ecx
-        jnz      2b
+3:      subl     $1,%ecx
+        jge      2b
         cld
         popl     %edi
         popl     %esi
         ret
-3:      movl     %eax,%esi            # from + count*4 - 4
+4:      movl     %eax,%esi            # from + count*4 - 4
         rep;     smovl
         cld
         popl     %edi
--- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -861,7 +861,7 @@
 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
 add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
 
-extern "C" _solaris_raw_setup_fpu(address ptr);
+extern "C" void _solaris_raw_setup_fpu(address ptr);
 void os::setup_fpu() {
   address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std();
   _solaris_raw_setup_fpu(fpu_cntrl);
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.s	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.s	Sat Jul 31 15:10:59 2010 +0100
@@ -154,10 +154,10 @@
         jnz      3b
         addl     %esi,%edi
 4:      movl     %eax,%ecx            / byte count less prefix
-        andl     $3,%ecx              / suffix byte count
+5:      andl     $3,%ecx              / suffix byte count
         jz       7f                   / no suffix
         / copy suffix
-5:      xorl     %eax,%eax
+        xorl     %eax,%eax
 6:      movb     (%esi,%eax,1),%dl
         movb     %dl,(%edi,%eax,1)
         addl     $1,%eax
@@ -192,10 +192,10 @@
         / copy dwords, aligned or not
 3:      rep;     smovl
 4:      movl     %eax,%ecx            / byte count
-        andl     $3,%ecx              / suffix byte count
+5:      andl     $3,%ecx              / suffix byte count
         jz       7f                   / no suffix
         / copy suffix
-5:      subl     %esi,%edi
+        subl     %esi,%edi
         addl     $3,%esi
 6:      movb     (%esi),%dl
         movb     %dl,(%edi,%esi,1)
@@ -246,10 +246,10 @@
         / copy aligned dwords
 3:      rep;     smovl
 4:      movl     %eax,%ecx
-        andl     $3,%ecx
+5:      andl     $3,%ecx
         jz       7f
         / copy suffix
-5:      xorl     %eax,%eax
+        xorl     %eax,%eax
 6:      movb     (%esi,%eax,1),%dl
         movb     %dl,(%edi,%eax,1)
         addl     $1,%eax
@@ -282,9 +282,9 @@
         jnz      3b
         addl     %esi,%edi
 4:      movl     %eax,%ecx
-        andl     $3,%ecx
+5:      andl     $3,%ecx
         jz       7f
-5:      subl     %esi,%edi
+        subl     %esi,%edi
         addl     $3,%esi
 6:      movb     (%esi),%dl
         movb     %dl,(%edi,%esi,1)
@@ -318,11 +318,12 @@
         andl     $3,%eax              / either 0 or 2
         jz       1f                   / no prefix
         / copy prefix
+        subl     $1,%ecx
+        jl       5f                   / zero count
         movw     (%esi),%dx
         movw     %dx,(%edi)
         addl     %eax,%esi            / %eax == 2
         addl     %eax,%edi
-        subl     $1,%ecx
 1:      movl     %ecx,%eax            / word count less prefix
         sarl     %ecx                 / dword count
         jz       4f                   / no dwords to move
@@ -482,12 +483,13 @@
         ret
         .=.+10
 2:      subl     %esi,%edi
+        jmp      4f
         .align   16
 3:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        subl     $1,%ecx
-        jnz      3b
+4:      subl     $1,%ecx
+        jge      3b
         popl     %edi
         popl     %esi
         ret
@@ -495,19 +497,20 @@
         std
         leal     -4(%edi,%ecx,4),%edi / to + count*4 - 4
         cmpl     $32,%ecx
-        ja       3f                   / > 32 dwords
+        ja       4f                   / > 32 dwords
         subl     %eax,%edi            / eax == from + count*4 - 4
+        jmp      3f
         .align   16
 2:      movl     (%eax),%edx
         movl     %edx,(%edi,%eax,1)
         subl     $4,%eax
-        subl     $1,%ecx
-        jnz      2b
+3:      subl     $1,%ecx
+        jge      2b
         cld
         popl     %edi
         popl     %esi
         ret
-3:      movl     %eax,%esi            / from + count*4 - 4
+4:      movl     %eax,%esi            / from + count*4 - 4
         rep;     smovl
         cld
         popl     %edi
--- a/src/share/vm/asm/codeBuffer.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/asm/codeBuffer.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -404,7 +404,7 @@
       locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity);
     } else {
       locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity);
-      Copy::conjoint_bytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo));
+      Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo));
       _locs_own = true;
     }
     _locs_start    = locs_start;
@@ -581,7 +581,7 @@
                              (HeapWord*)(buf+buf_offset),
                              (lsize + HeapWordSize-1) / HeapWordSize);
       } else {
-        Copy::conjoint_bytes(lstart, buf+buf_offset, lsize);
+        Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize);
       }
     }
     buf_offset += lsize;
--- a/src/share/vm/c1/c1_Compilation.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/c1/c1_Compilation.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -242,10 +242,10 @@
   code->insts()->initialize_shared_locs((relocInfo*)locs_buffer,
                                         locs_buffer_size / sizeof(relocInfo));
   code->initialize_consts_size(Compilation::desired_max_constant_size());
-  // Call stubs + deopt/exception handler
+  // Call stubs + two deopt handlers (regular and MH) + exception handler
   code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) +
                               LIR_Assembler::exception_handler_size +
-                              LIR_Assembler::deopt_handler_size);
+                              2 * LIR_Assembler::deopt_handler_size);
 }
 
 
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -878,15 +878,12 @@
       case T_OBJECT :
        {
         ciObject* obj = con.as_object();
-        if (obj->is_klass()) {
-          ciKlass* klass = obj->as_klass();
-          if (!klass->is_loaded() || PatchALot) {
-            patch_state = state()->copy();
-            t = new ObjectConstant(obj);
-          } else {
-            t = new InstanceConstant(klass->java_mirror());
-          }
+        if (!obj->is_loaded()
+            || (PatchALot && obj->klass() != ciEnv::current()->String_klass())) {
+          patch_state = state()->copy();
+          t = new ObjectConstant(obj);
         } else {
+          assert(!obj->is_klass(), "must be java_mirror of klass");
           t = new InstanceConstant(obj->as_instance());
         }
         break;
--- a/src/share/vm/c1/c1_Runtime1.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -601,7 +601,7 @@
 
 
 static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
-  Bytecode_field* field_access = Bytecode_field_at(caller(), caller->bcp_from(bci));
+  Bytecode_field* field_access = Bytecode_field_at(caller, bci);
   // This can be static or non-static field access
   Bytecodes::Code code       = field_access->code();
 
@@ -721,7 +721,7 @@
   Handle load_klass(THREAD, NULL);                // oop needed by load_klass_patching code
   if (stub_id == Runtime1::access_field_patching_id) {
 
-    Bytecode_field* field_access = Bytecode_field_at(caller_method(), caller_method->bcp_from(bci));
+    Bytecode_field* field_access = Bytecode_field_at(caller_method, bci);
     FieldAccessInfo result; // initialize class if needed
     Bytecodes::Code code = field_access->code();
     constantPoolHandle constants(THREAD, caller_method->constants());
@@ -781,11 +781,9 @@
       case Bytecodes::_ldc:
       case Bytecodes::_ldc_w:
         {
-          Bytecode_loadconstant* cc = Bytecode_loadconstant_at(caller_method(),
-                                                               caller_method->bcp_from(bci));
-          klassOop resolved = caller_method->constants()->klass_at(cc->index(), CHECK);
-          // ldc wants the java mirror.
-          k = resolved->klass_part()->java_mirror();
+          Bytecode_loadconstant* cc = Bytecode_loadconstant_at(caller_method, bci);
+          k = cc->resolve_constant(CHECK);
+          assert(k != NULL && !k->is_klass(), "must be class mirror or other Java constant");
         }
         break;
       default: Unimplemented();
@@ -816,6 +814,15 @@
     // Return to the now deoptimized frame.
   }
 
+  // If we are patching in a non-perm oop, make sure the nmethod
+  // is on the right list.
+  if (ScavengeRootsInCode && load_klass.not_null() && load_klass->is_scavengable()) {
+    MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
+    guarantee(nm != NULL, "only nmethods can contain non-perm oops");
+    if (!nm->on_scavenge_root_list())
+      CodeCache::add_scavenge_root_nmethod(nm);
+  }
 
   // Now copy code back
 
@@ -1115,7 +1122,7 @@
   if (length == 0) return;
   // Not guaranteed to be word atomic, but that doesn't matter
   // for anything but an oop array, which is covered by oop_arraycopy.
-  Copy::conjoint_bytes(src, dst, length);
+  Copy::conjoint_jbytes(src, dst, length);
 JRT_END
 
 JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
--- a/src/share/vm/ci/bcEscapeAnalyzer.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -106,7 +106,7 @@
 void BCEscapeAnalyzer::set_returned(ArgumentMap vars) {
   for (int i = 0; i < _arg_size; i++) {
     if (vars.contains(i))
-      _arg_returned.set_bit(i);
+      _arg_returned.set(i);
   }
   _return_local = _return_local && !(vars.contains_unknown() || vars.contains_allocated());
   _return_allocated = _return_allocated && vars.contains_allocated() && !(vars.contains_unknown() || vars.contains_vars());
@@ -126,16 +126,16 @@
   if (_conservative)
     return true;
   for (int i = 0; i < _arg_size; i++) {
-    if (vars.contains(i) && _arg_stack.at(i))
+    if (vars.contains(i) && _arg_stack.test(i))
       return true;
   }
   return false;
 }
 
-void BCEscapeAnalyzer::clear_bits(ArgumentMap vars, BitMap &bm) {
+void BCEscapeAnalyzer::clear_bits(ArgumentMap vars, VectorSet &bm) {
   for (int i = 0; i < _arg_size; i++) {
     if (vars.contains(i)) {
-      bm.clear_bit(i);
+      bm >>= i;
     }
   }
 }
@@ -1157,15 +1157,15 @@
   ciSignature* sig = method()->signature();
   int j = 0;
   if (!method()->is_static()) {
-    _arg_local.set_bit(0);
-    _arg_stack.set_bit(0);
+    _arg_local.set(0);
+    _arg_stack.set(0);
     j++;
   }
   for (i = 0; i < sig->count(); i++) {
     ciType* t = sig->type_at(i);
     if (!t->is_primitive_type()) {
-      _arg_local.set_bit(j);
-      _arg_stack.set_bit(j);
+      _arg_local.set(j);
+      _arg_stack.set(j);
     }
     j += t->size();
   }
@@ -1198,9 +1198,9 @@
     set_modified(var, OFFSET_ANY, 4);
     set_global_escape(var);
   }
-  _arg_local.clear();
-  _arg_stack.clear();
-  _arg_returned.clear();
+  _arg_local.Clear();
+  _arg_stack.Clear();
+  _arg_returned.Clear();
   _return_local = false;
   _return_allocated = false;
   _allocated_escapes = true;
@@ -1254,7 +1254,7 @@
 
   // Do not scan method if it has no object parameters and
   // does not returns an object (_return_allocated is set in initialize()).
-  if (_arg_local.is_empty() && !_return_allocated) {
+  if (_arg_local.Size() == 0 && !_return_allocated) {
     // Clear all info since method's bytecode was not analysed and
     // set pessimistic escape information.
     clear_escape_info();
@@ -1275,14 +1275,14 @@
   //
   if (!has_dependencies() && !methodData()->is_empty()) {
     for (i = 0; i < _arg_size; i++) {
-      if (_arg_local.at(i)) {
-        assert(_arg_stack.at(i), "inconsistent escape info");
+      if (_arg_local.test(i)) {
+        assert(_arg_stack.test(i), "inconsistent escape info");
         methodData()->set_arg_local(i);
         methodData()->set_arg_stack(i);
-      } else if (_arg_stack.at(i)) {
+      } else if (_arg_stack.test(i)) {
         methodData()->set_arg_stack(i);
       }
-      if (_arg_returned.at(i)) {
+      if (_arg_returned.test(i)) {
         methodData()->set_arg_returned(i);
       }
       methodData()->set_arg_modified(i, _arg_modified[i]);
@@ -1308,9 +1308,12 @@
 
   // read escape information from method descriptor
   for (int i = 0; i < _arg_size; i++) {
-    _arg_local.at_put(i, methodData()->is_arg_local(i));
-    _arg_stack.at_put(i, methodData()->is_arg_stack(i));
-    _arg_returned.at_put(i, methodData()->is_arg_returned(i));
+    if (methodData()->is_arg_local(i))
+      _arg_local.set(i);
+    if (methodData()->is_arg_stack(i))
+      _arg_stack.set(i);
+    if (methodData()->is_arg_returned(i))
+      _arg_returned.set(i);
     _arg_modified[i] = methodData()->arg_modified(i);
   }
   _return_local = methodData()->eflag_set(methodDataOopDesc::return_local);
@@ -1358,26 +1361,26 @@
 
 BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent)
     : _conservative(method == NULL || !EstimateArgEscape)
+    , _arena(CURRENT_ENV->arena())
     , _method(method)
     , _methodData(method ? method->method_data() : NULL)
     , _arg_size(method ? method->arg_size() : 0)
-    , _stack()
-    , _arg_local(_arg_size)
-    , _arg_stack(_arg_size)
-    , _arg_returned(_arg_size)
-    , _dirty(_arg_size)
+    , _arg_local(_arena)
+    , _arg_stack(_arena)
+    , _arg_returned(_arena)
+    , _dirty(_arena)
     , _return_local(false)
     , _return_allocated(false)
     , _allocated_escapes(false)
     , _unknown_modified(false)
-    , _dependencies()
+    , _dependencies(_arena, 4, 0, NULL)
     , _parent(parent)
     , _level(parent == NULL ? 0 : parent->level() + 1) {
   if (!_conservative) {
-    _arg_local.clear();
-    _arg_stack.clear();
-    _arg_returned.clear();
-    _dirty.clear();
+    _arg_local.Clear();
+    _arg_stack.Clear();
+    _arg_returned.Clear();
+    _dirty.Clear();
     Arena* arena = CURRENT_ENV->arena();
     _arg_modified = (uint *) arena->Amalloc(_arg_size * sizeof(uint));
     Copy::zero_to_bytes(_arg_modified, _arg_size * sizeof(uint));
@@ -1414,8 +1417,8 @@
     deps->assert_evol_method(method());
   }
   for (int i = 0; i < _dependencies.length(); i+=2) {
-    ciKlass *k = _dependencies[i]->as_klass();
-    ciMethod *m = _dependencies[i+1]->as_method();
+    ciKlass *k = _dependencies.at(i)->as_klass();
+    ciMethod *m = _dependencies.at(i+1)->as_method();
     deps->assert_unique_concrete_method(k, m);
   }
 }
--- a/src/share/vm/ci/bcEscapeAnalyzer.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/bcEscapeAnalyzer.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -22,9 +22,6 @@
  *
  */
 
-define_array(ciObjectArray, ciObject*);
-define_stack(ciObjectList, ciObjectArray);
-
 // This class implements a fast, conservative analysis of effect of methods
 // on the escape state of their arguments.  The analysis is at the bytecode
 // level.
@@ -34,18 +31,17 @@
 
 class BCEscapeAnalyzer : public ResourceObj {
  private:
+  Arena*            _arena;        // ciEnv arena
+
   bool              _conservative; // If true, return maximally
                                    // conservative results.
   ciMethod*         _method;
   ciMethodData*     _methodData;
   int               _arg_size;
-
-  intStack          _stack;
-
-  BitMap            _arg_local;
-  BitMap            _arg_stack;
-  BitMap            _arg_returned;
-  BitMap            _dirty;
+  VectorSet         _arg_local;
+  VectorSet         _arg_stack;
+  VectorSet         _arg_returned;
+  VectorSet         _dirty;
   enum{ ARG_OFFSET_MAX = 31};
   uint              *_arg_modified;
 
@@ -54,7 +50,7 @@
   bool              _allocated_escapes;
   bool              _unknown_modified;
 
-  ciObjectList     _dependencies;
+  GrowableArray<ciObject *> _dependencies;
 
   ciMethodBlocks   *_methodBlocks;
 
@@ -68,20 +64,10 @@
  private:
   // helper functions
   bool is_argument(int i)    { return i >= 0 && i < _arg_size; }
-
-  void raw_push(int i)       { _stack.push(i); }
-  int  raw_pop()             { return _stack.is_empty() ? -1 : _stack.pop(); }
-  void apush(int i)          { raw_push(i); }
-  void spush()               { raw_push(-1); }
-  void lpush()               { spush(); spush(); }
-  int  apop()                { return raw_pop(); }
-  void spop()                { assert(_stack.is_empty() || _stack.top() == -1, ""); raw_pop(); }
-  void lpop()                { spop(); spop(); }
-
   void set_returned(ArgumentMap vars);
   bool is_argument(ArgumentMap vars);
   bool is_arg_stack(ArgumentMap vars);
-  void clear_bits(ArgumentMap vars, BitMap &bs);
+  void clear_bits(ArgumentMap vars, VectorSet &bs);
   void set_method_escape(ArgumentMap vars);
   void set_global_escape(ArgumentMap vars);
   void set_dirty(ArgumentMap vars);
@@ -116,25 +102,25 @@
   ciMethodData*     methodData() const           { return _methodData; }
   BCEscapeAnalyzer* parent() const               { return _parent; }
   int               level() const                { return _level; }
-  ciObjectList*     dependencies()               { return &_dependencies; }
+  GrowableArray<ciObject *>* dependencies()               { return &_dependencies; }
   bool              has_dependencies() const     { return !_dependencies.is_empty(); }
 
   // retrieval of interprocedural escape information
 
   // The given argument does not escape the callee.
   bool is_arg_local(int i) const {
-    return !_conservative && _arg_local.at(i);
+    return !_conservative && _arg_local.test(i);
   }
 
   // The given argument escapes the callee, but does not become globally
   // reachable.
   bool is_arg_stack(int i) const {
-    return !_conservative && _arg_stack.at(i);
+    return !_conservative && _arg_stack.test(i);
   }
 
   // The given argument does not escape globally, and may be returned.
   bool is_arg_returned(int i) const {
-    return !_conservative && _arg_returned.at(i); }
+    return !_conservative && _arg_returned.test(i); }
 
   // True iff only input arguments are returned.
   bool is_return_local() const {
--- a/src/share/vm/ci/ciCPCache.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/ciCPCache.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -44,13 +44,23 @@
 // ciCPCache::is_f1_null_at
 bool ciCPCache::is_f1_null_at(int index) {
   VM_ENTRY_MARK;
-  constantPoolCacheOop cpcache = (constantPoolCacheOop) get_oop();
-  oop f1 = cpcache->secondary_entry_at(index)->f1();
+  oop f1 = entry_at(index)->f1();
   return (f1 == NULL);
 }
 
 
 // ------------------------------------------------------------------
+// ciCPCache::get_pool_index
+int ciCPCache::get_pool_index(int index) {
+  VM_ENTRY_MARK;
+  ConstantPoolCacheEntry* e = entry_at(index);
+  if (e->is_secondary_entry())
+    e = entry_at(e->main_entry_index());
+  return e->constant_pool_index();
+}
+
+
+// ------------------------------------------------------------------
 // ciCPCache::print
 //
 // Print debugging information about the cache.
--- a/src/share/vm/ci/ciCPCache.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/ciCPCache.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -29,6 +29,18 @@
 // Note: This class is called ciCPCache as ciConstantPoolCache is used
 // for something different.
 class ciCPCache : public ciObject {
+private:
+  constantPoolCacheOop get_cpCacheOop() {   // must be called inside a VM_ENTRY_MARK
+    return (constantPoolCacheOop) get_oop();
+  }
+
+  ConstantPoolCacheEntry* entry_at(int i) {
+    int raw_index = i;
+    if (constantPoolCacheOopDesc::is_secondary_index(i))
+      raw_index = constantPoolCacheOopDesc::decode_secondary_index(i);
+    return get_cpCacheOop()->entry_at(raw_index);
+  }
+
 public:
   ciCPCache(constantPoolCacheHandle cpcache) : ciObject(cpcache) {}
 
@@ -41,5 +53,7 @@
 
   bool is_f1_null_at(int index);
 
+  int get_pool_index(int index);
+
   void print();
 };
--- a/src/share/vm/ci/ciClassList.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/ciClassList.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -85,6 +85,7 @@
 friend class ciConstantPoolCache;      \
 friend class ciField;                  \
 friend class ciConstant;               \
+friend class ciCPCache;                \
 friend class ciFlags;                  \
 friend class ciExceptionHandler;       \
 friend class ciCallProfile;            \
--- a/src/share/vm/ci/ciEnv.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/ciEnv.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -511,9 +511,22 @@
 //
 // Implementation of get_constant_by_index().
 ciConstant ciEnv::get_constant_by_index_impl(constantPoolHandle cpool,
-                                             int index,
+                                             int pool_index, int cache_index,
                                              ciInstanceKlass* accessor) {
+  bool ignore_will_link;
   EXCEPTION_CONTEXT;
+  int index = pool_index;
+  if (cache_index >= 0) {
+    assert(index < 0, "only one kind of index at a time");
+    ConstantPoolCacheEntry* cpc_entry = cpool->cache()->entry_at(cache_index);
+    index = cpc_entry->constant_pool_index();
+    oop obj = cpc_entry->f1();
+    if (obj != NULL) {
+      assert(obj->is_instance(), "must be an instance");
+      ciObject* ciobj = get_object(obj);
+      return ciConstant(T_OBJECT, ciobj);
+    }
+  }
   constantTag tag = cpool->tag_at(index);
   if (tag.is_int()) {
     return ciConstant(T_INT, (jint)cpool->int_at(index));
@@ -540,8 +553,7 @@
     return ciConstant(T_OBJECT, constant);
   } else if (tag.is_klass() || tag.is_unresolved_klass()) {
     // 4881222: allow ldc to take a class type
-    bool ignore;
-    ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore, accessor);
+    ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore_will_link, accessor);
     if (HAS_PENDING_EXCEPTION) {
       CLEAR_PENDING_EXCEPTION;
       record_out_of_memory_failure();
@@ -549,12 +561,26 @@
     }
     assert (klass->is_instance_klass() || klass->is_array_klass(),
             "must be an instance or array klass ");
-    return ciConstant(T_OBJECT, klass);
+    return ciConstant(T_OBJECT, klass->java_mirror());
   } else if (tag.is_object()) {
     oop obj = cpool->object_at(index);
     assert(obj->is_instance(), "must be an instance");
     ciObject* ciobj = get_object(obj);
     return ciConstant(T_OBJECT, ciobj);
+  } else if (tag.is_method_type()) {
+    // must execute Java code to link this CP entry into cache[i].f1
+    ciSymbol* signature = get_object(cpool->method_type_signature_at(index))->as_symbol();
+    ciObject* ciobj = get_unloaded_method_type_constant(signature);
+    return ciConstant(T_OBJECT, ciobj);
+  } else if (tag.is_method_handle()) {
+    // must execute Java code to link this CP entry into cache[i].f1
+    int ref_kind        = cpool->method_handle_ref_kind_at(index);
+    int callee_index    = cpool->method_handle_klass_index_at(index);
+    ciKlass* callee     = get_klass_by_index_impl(cpool, callee_index, ignore_will_link, accessor);
+    ciSymbol* name      = get_object(cpool->method_handle_name_ref_at(index))->as_symbol();
+    ciSymbol* signature = get_object(cpool->method_handle_signature_ref_at(index))->as_symbol();
+    ciObject* ciobj     = get_unloaded_method_handle_constant(callee, name, signature, ref_kind);
+    return ciConstant(T_OBJECT, ciobj);
   } else {
     ShouldNotReachHere();
     return ciConstant();
@@ -562,61 +588,15 @@
 }
 
 // ------------------------------------------------------------------
-// ciEnv::is_unresolved_string_impl
-//
-// Implementation of is_unresolved_string().
-bool ciEnv::is_unresolved_string_impl(instanceKlass* accessor, int index) const {
-  EXCEPTION_CONTEXT;
-  assert(accessor->is_linked(), "must be linked before accessing constant pool");
-  constantPoolOop cpool = accessor->constants();
-  constantTag tag = cpool->tag_at(index);
-  return tag.is_unresolved_string();
-}
-
-// ------------------------------------------------------------------
-// ciEnv::is_unresolved_klass_impl
-//
-// Implementation of is_unresolved_klass().
-bool ciEnv::is_unresolved_klass_impl(instanceKlass* accessor, int index) const {
-  EXCEPTION_CONTEXT;
-  assert(accessor->is_linked(), "must be linked before accessing constant pool");
-  constantPoolOop cpool = accessor->constants();
-  constantTag tag = cpool->tag_at(index);
-  return tag.is_unresolved_klass();
-}
-
-// ------------------------------------------------------------------
 // ciEnv::get_constant_by_index
 //
 // Pull a constant out of the constant pool.  How appropriate.
 //
 // Implementation note: this query is currently in no way cached.
 ciConstant ciEnv::get_constant_by_index(constantPoolHandle cpool,
-                                        int index,
+                                        int pool_index, int cache_index,
                                         ciInstanceKlass* accessor) {
-  GUARDED_VM_ENTRY(return get_constant_by_index_impl(cpool, index, accessor);)
-}
-
-// ------------------------------------------------------------------
-// ciEnv::is_unresolved_string
-//
-// Check constant pool
-//
-// Implementation note: this query is currently in no way cached.
-bool ciEnv::is_unresolved_string(ciInstanceKlass* accessor,
-                                 int index) const {
-  GUARDED_VM_ENTRY(return is_unresolved_string_impl(accessor->get_instanceKlass(), index); )
-}
-
-// ------------------------------------------------------------------
-// ciEnv::is_unresolved_klass
-//
-// Check constant pool
-//
-// Implementation note: this query is currently in no way cached.
-bool ciEnv::is_unresolved_klass(ciInstanceKlass* accessor,
-                                int index) const {
-  GUARDED_VM_ENTRY(return is_unresolved_klass_impl(accessor->get_instanceKlass(), index); )
+  GUARDED_VM_ENTRY(return get_constant_by_index_impl(cpool, pool_index, cache_index, accessor);)
 }
 
 // ------------------------------------------------------------------
@@ -748,8 +728,8 @@
   }
 
   // Get the invoker methodOop from the constant pool.
-  intptr_t f2_value = cpool->cache()->main_entry_at(index)->f2();
-  methodOop signature_invoker = methodOop(f2_value);
+  oop f1_value = cpool->cache()->main_entry_at(index)->f1();
+  methodOop signature_invoker = methodOop(f1_value);
   assert(signature_invoker != NULL && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(),
          "correct result from LinkResolver::resolve_invokedynamic");
 
--- a/src/share/vm/ci/ciEnv.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/ciEnv.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -116,12 +116,8 @@
                                 bool& is_accessible,
                                 ciInstanceKlass* loading_klass);
   ciConstant get_constant_by_index(constantPoolHandle cpool,
-                                   int constant_index,
+                                   int pool_index, int cache_index,
                                    ciInstanceKlass* accessor);
-  bool       is_unresolved_string(ciInstanceKlass* loading_klass,
-                                   int constant_index) const;
-  bool       is_unresolved_klass(ciInstanceKlass* loading_klass,
-                                   int constant_index) const;
   ciField*   get_field_by_index(ciInstanceKlass* loading_klass,
                                 int field_index);
   ciMethod*  get_method_by_index(constantPoolHandle cpool,
@@ -137,12 +133,8 @@
                                      bool& is_accessible,
                                      ciInstanceKlass* loading_klass);
   ciConstant get_constant_by_index_impl(constantPoolHandle cpool,
-                                        int constant_index,
+                                        int pool_index, int cache_index,
                                         ciInstanceKlass* loading_klass);
-  bool       is_unresolved_string_impl (instanceKlass* loading_klass,
-                                        int constant_index) const;
-  bool       is_unresolved_klass_impl (instanceKlass* loading_klass,
-                                        int constant_index) const;
   ciField*   get_field_by_index_impl(ciInstanceKlass* loading_klass,
                                      int field_index);
   ciMethod*  get_method_by_index_impl(constantPoolHandle cpool,
@@ -190,6 +182,25 @@
     return _factory->get_unloaded_klass(accessing_klass, name, true);
   }
 
+  // Get a ciKlass representing an unloaded klass mirror.
+  // Result is not necessarily unique, but will be unloaded.
+  ciInstance* get_unloaded_klass_mirror(ciKlass* type) {
+    return _factory->get_unloaded_klass_mirror(type);
+  }
+
+  // Get a ciInstance representing an unresolved method handle constant.
+  ciInstance* get_unloaded_method_handle_constant(ciKlass*  holder,
+                                                  ciSymbol* name,
+                                                  ciSymbol* signature,
+                                                  int       ref_kind) {
+    return _factory->get_unloaded_method_handle_constant(holder, name, signature, ref_kind);
+  }
+
+  // Get a ciInstance representing an unresolved method type constant.
+  ciInstance* get_unloaded_method_type_constant(ciSymbol* signature) {
+    return _factory->get_unloaded_method_type_constant(signature);
+  }
+
   // See if we already have an unloaded klass for the given name
   // or return NULL if not.
   ciKlass *check_get_unloaded_klass(ciKlass* accessing_klass, ciSymbol* name) {
--- a/src/share/vm/ci/ciInstanceKlass.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/ciInstanceKlass.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,9 +44,7 @@
   _flags = ciFlags(access_flags);
   _has_finalizer = access_flags.has_finalizer();
   _has_subklass = ik->subklass() != NULL;
-  _is_initialized = ik->is_initialized();
-  // Next line must follow and use the result of the previous line:
-  _is_linked = _is_initialized || ik->is_linked();
+  _init_state = (instanceKlass::ClassState)ik->get_init_state();
   _nonstatic_field_size = ik->nonstatic_field_size();
   _has_nonstatic_fields = ik->has_nonstatic_fields();
   _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
@@ -91,8 +89,7 @@
   : ciKlass(name, ciInstanceKlassKlass::make())
 {
   assert(name->byte_at(0) != '[', "not an instance klass");
-  _is_initialized = false;
-  _is_linked = false;
+  _init_state = (instanceKlass::ClassState)0;
   _nonstatic_field_size = -1;
   _has_nonstatic_fields = false;
   _nonstatic_fields = NULL;
@@ -109,21 +106,10 @@
 
 // ------------------------------------------------------------------
 // ciInstanceKlass::compute_shared_is_initialized
-bool ciInstanceKlass::compute_shared_is_initialized() {
+void ciInstanceKlass::compute_shared_init_state() {
   GUARDED_VM_ENTRY(
     instanceKlass* ik = get_instanceKlass();
-    _is_initialized = ik->is_initialized();
-    return _is_initialized;
-  )
-}
-
-// ------------------------------------------------------------------
-// ciInstanceKlass::compute_shared_is_linked
-bool ciInstanceKlass::compute_shared_is_linked() {
-  GUARDED_VM_ENTRY(
-    instanceKlass* ik = get_instanceKlass();
-    _is_linked = ik->is_linked();
-    return _is_linked;
+    _init_state = (instanceKlass::ClassState)ik->get_init_state();
   )
 }
 
@@ -323,8 +309,8 @@
 // ciInstanceKlass::java_mirror
 //
 // Get the instance of java.lang.Class corresponding to this klass.
+// Cache it on this->_java_mirror.
 ciInstance* ciInstanceKlass::java_mirror() {
-  assert(is_loaded(), "must be loaded");
   if (_java_mirror == NULL) {
     _java_mirror = ciKlass::java_mirror();
   }
--- a/src/share/vm/ci/ciInstanceKlass.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/ciInstanceKlass.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,9 +39,8 @@
   jobject                _loader;
   jobject                _protection_domain;
 
+  instanceKlass::ClassState _init_state;           // state of class
   bool                   _is_shared;
-  bool                   _is_initialized;
-  bool                   _is_linked;
   bool                   _has_finalizer;
   bool                   _has_subklass;
   bool                   _has_nonstatic_fields;
@@ -87,27 +86,34 @@
 
   bool is_shared() { return _is_shared; }
 
-  bool compute_shared_is_initialized();
-  bool compute_shared_is_linked();
+  void compute_shared_init_state();
   bool compute_shared_has_subklass();
   int  compute_shared_nof_implementors();
   int  compute_nonstatic_fields();
   GrowableArray<ciField*>* compute_nonstatic_fields_impl(GrowableArray<ciField*>* super_fields);
 
+  // Update the init_state for shared klasses
+  void update_if_shared(instanceKlass::ClassState expected) {
+    if (_is_shared && _init_state != expected) {
+      if (is_loaded()) compute_shared_init_state();
+    }
+  }
+
 public:
   // Has this klass been initialized?
   bool                   is_initialized() {
-    if (_is_shared && !_is_initialized) {
-      return is_loaded() && compute_shared_is_initialized();
-    }
-    return _is_initialized;
+    update_if_shared(instanceKlass::fully_initialized);
+    return _init_state == instanceKlass::fully_initialized;
+  }
+  // Is this klass being initialized?
+  bool                   is_being_initialized() {
+    update_if_shared(instanceKlass::being_initialized);
+    return _init_state == instanceKlass::being_initialized;
   }
   // Has this klass been linked?
   bool                   is_linked() {
-    if (_is_shared && !_is_linked) {
-      return is_loaded() && compute_shared_is_linked();
-    }
-    return _is_linked;
+    update_if_shared(instanceKlass::linked);
+    return _init_state >= instanceKlass::linked;
   }
 
   // General klass information.
--- a/src/share/vm/ci/ciKlass.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/ciKlass.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -192,8 +192,14 @@
 
 // ------------------------------------------------------------------
 // ciKlass::java_mirror
+//
+// Get the instance of java.lang.Class corresponding to this klass.
+// If it is an unloaded instance or array klass, return an unloaded
+// mirror object of type Class.
 ciInstance* ciKlass::java_mirror() {
   GUARDED_VM_ENTRY(
+    if (!is_loaded())
+      return ciEnv::current()->get_unloaded_klass_mirror(this);
     oop java_mirror = get_Klass()->java_mirror();
     return CURRENT_ENV->get_object(java_mirror)->as_instance();
   )
--- a/src/share/vm/ci/ciMethod.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/ciMethod.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -54,10 +54,10 @@
   _code               = NULL;
   _exception_handlers = NULL;
   _liveness           = NULL;
-  _bcea = NULL;
   _method_blocks = NULL;
 #ifdef COMPILER2
   _flow               = NULL;
+  _bcea               = NULL;
 #endif // COMPILER2
 
   ciEnv *env = CURRENT_ENV;
@@ -121,11 +121,11 @@
   _intrinsic_id = vmIntrinsics::_none;
   _liveness = NULL;
   _can_be_statically_bound = false;
-  _bcea = NULL;
   _method_blocks = NULL;
   _method_data = NULL;
 #ifdef COMPILER2
   _flow = NULL;
+  _bcea = NULL;
 #endif // COMPILER2
 }
 
@@ -694,30 +694,21 @@
 // ------------------------------------------------------------------
 // ciMethod::is_method_handle_invoke
 //
-// Return true if the method is a MethodHandle target.
+// Return true if the method is an instance of one of the two
+// signature-polymorphic MethodHandle methods, invokeExact or invokeGeneric.
 bool ciMethod::is_method_handle_invoke() const {
-  bool flag = (holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
-               methodOopDesc::is_method_handle_invoke_name(name()->sid()));
-#ifdef ASSERT
-  if (is_loaded()) {
-    bool flag2 = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
-    {
-      VM_ENTRY_MARK;
-      bool flag3 = get_methodOop()->is_method_handle_invoke();
-      assert(flag2 == flag3, "consistent");
-      assert(flag  == flag3, "consistent");
-    }
-  }
-#endif //ASSERT
-  return flag;
+  if (!is_loaded())  return false;
+  VM_ENTRY_MARK;
+  return get_methodOop()->is_method_handle_invoke();
 }
 
 // ------------------------------------------------------------------
 // ciMethod::is_method_handle_adapter
 //
 // Return true if the method is a generated MethodHandle adapter.
+// These are built by MethodHandleCompiler.
 bool ciMethod::is_method_handle_adapter() const {
-  check_is_loaded();
+  if (!is_loaded())  return false;
   VM_ENTRY_MARK;
   return get_methodOop()->is_method_handle_adapter();
 }
@@ -1033,10 +1024,15 @@
 bool ciMethod::is_initializer () const {         FETCH_FLAG_FROM_VM(is_initializer); }
 
 BCEscapeAnalyzer  *ciMethod::get_bcea() {
+#ifdef COMPILER2
   if (_bcea == NULL) {
     _bcea = new (CURRENT_ENV->arena()) BCEscapeAnalyzer(this, NULL);
   }
   return _bcea;
+#else // COMPILER2
+  ShouldNotReachHere();
+  return NULL;
+#endif // COMPILER2
 }
 
 ciMethodBlocks  *ciMethod::get_method_blocks() {
--- a/src/share/vm/ci/ciMethod.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/ciMethod.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -48,7 +48,6 @@
   ciInstanceKlass* _holder;
   ciSignature*     _signature;
   ciMethodData*    _method_data;
-  BCEscapeAnalyzer* _bcea;
   ciMethodBlocks*   _method_blocks;
 
   // Code attributes.
@@ -72,7 +71,8 @@
   // Optional liveness analyzer.
   MethodLiveness* _liveness;
 #ifdef COMPILER2
-  ciTypeFlow*     _flow;
+  ciTypeFlow*         _flow;
+  BCEscapeAnalyzer*   _bcea;
 #endif
 
   ciMethod(methodHandle h_m);
--- a/src/share/vm/ci/ciObjectFactory.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/ciObjectFactory.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -70,6 +70,7 @@
 
   _unloaded_methods = new (arena) GrowableArray<ciMethod*>(arena, 4, 0, NULL);
   _unloaded_klasses = new (arena) GrowableArray<ciKlass*>(arena, 8, 0, NULL);
+  _unloaded_instances = new (arena) GrowableArray<ciInstance*>(arena, 4, 0, NULL);
   _return_addresses =
     new (arena) GrowableArray<ciReturnAddress*>(arena, 8, 0, NULL);
 }
@@ -443,6 +444,74 @@
   return new_klass;
 }
 
+
+//------------------------------------------------------------------
+// ciObjectFactory::get_unloaded_instance
+//
+// Get a ciInstance representing an as-yet undetermined instance of a given class.
+//
+ciInstance* ciObjectFactory::get_unloaded_instance(ciInstanceKlass* instance_klass) {
+  for (int i=0; i<_unloaded_instances->length(); i++) {
+    ciInstance* entry = _unloaded_instances->at(i);
+    if (entry->klass()->equals(instance_klass)) {
+      // We've found a match.
+      return entry;
+    }
+  }
+
+  // This is a new unloaded instance.  Create it and stick it in
+  // the cache.
+  ciInstance* new_instance = new (arena()) ciInstance(instance_klass);
+
+  init_ident_of(new_instance);
+  _unloaded_instances->append(new_instance);
+
+  // make sure it looks the way we want:
+  assert(!new_instance->is_loaded(), "");
+  assert(new_instance->klass() == instance_klass, "");
+
+  return new_instance;
+}
+
+
+//------------------------------------------------------------------
+// ciObjectFactory::get_unloaded_klass_mirror
+//
+// Get a ciInstance representing an unresolved klass mirror.
+//
+// Currently, this ignores the parameters and returns a unique unloaded instance.
+ciInstance* ciObjectFactory::get_unloaded_klass_mirror(ciKlass*  type) {
+  assert(ciEnv::_Class_klass != NULL, "");
+  return get_unloaded_instance(ciEnv::_Class_klass->as_instance_klass());
+}
+
+//------------------------------------------------------------------
+// ciObjectFactory::get_unloaded_method_handle_constant
+//
+// Get a ciInstance representing an unresolved method handle constant.
+//
+// Currently, this ignores the parameters and returns a unique unloaded instance.
+ciInstance* ciObjectFactory::get_unloaded_method_handle_constant(ciKlass*  holder,
+                                                                 ciSymbol* name,
+                                                                 ciSymbol* signature,
+                                                                 int       ref_kind) {
+  if (ciEnv::_MethodHandle_klass == NULL)  return NULL;
+  return get_unloaded_instance(ciEnv::_MethodHandle_klass->as_instance_klass());
+}
+
+//------------------------------------------------------------------
+// ciObjectFactory::get_unloaded_method_type_constant
+//
+// Get a ciInstance representing an unresolved method type constant.
+//
+// Currently, this ignores the parameters and returns a unique unloaded instance.
+ciInstance* ciObjectFactory::get_unloaded_method_type_constant(ciSymbol* signature) {
+  if (ciEnv::_MethodType_klass == NULL)  return NULL;
+  return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass());
+}
+
+
+
 //------------------------------------------------------------------
 // ciObjectFactory::get_empty_methodData
 //
@@ -637,7 +706,8 @@
 //
 // Print debugging information about the object factory
 void ciObjectFactory::print() {
-  tty->print("<ciObjectFactory oops=%d unloaded_methods=%d unloaded_klasses=%d>",
+  tty->print("<ciObjectFactory oops=%d unloaded_methods=%d unloaded_instances=%d unloaded_klasses=%d>",
              _ci_objects->length(), _unloaded_methods->length(),
+             _unloaded_instances->length(),
              _unloaded_klasses->length());
 }
--- a/src/share/vm/ci/ciObjectFactory.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/ciObjectFactory.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -39,6 +39,7 @@
   GrowableArray<ciObject*>* _ci_objects;
   GrowableArray<ciMethod*>* _unloaded_methods;
   GrowableArray<ciKlass*>* _unloaded_klasses;
+  GrowableArray<ciInstance*>* _unloaded_instances;
   GrowableArray<ciReturnAddress*>* _return_addresses;
   int                       _next_ident;
 
@@ -73,6 +74,8 @@
 
   void print_contents_impl();
 
+  ciInstance* get_unloaded_instance(ciInstanceKlass* klass);
+
 public:
   static bool is_initialized() { return _initialized; }
 
@@ -98,6 +101,18 @@
                               ciSymbol* name,
                               bool create_if_not_found);
 
+  // Get a ciInstance representing an unresolved klass mirror.
+  ciInstance* get_unloaded_klass_mirror(ciKlass* type);
+
+  // Get a ciInstance representing an unresolved method handle constant.
+  ciInstance* get_unloaded_method_handle_constant(ciKlass*  holder,
+                                                  ciSymbol* name,
+                                                  ciSymbol* signature,
+                                                  int       ref_kind);
+
+  // Get a ciInstance representing an unresolved method type constant.
+  ciInstance* get_unloaded_method_type_constant(ciSymbol* signature);
+
 
   // Get the ciMethodData representing the methodData for a method
   // with none.
--- a/src/share/vm/ci/ciStreams.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/ciStreams.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -186,12 +186,13 @@
 }
 
 // ------------------------------------------------------------------
-// ciBytecodeStream::get_constant_index
+// ciBytecodeStream::get_constant_raw_index
 //
 // If this bytecode is one of the ldc variants, get the index of the
 // referenced constant.
-int ciBytecodeStream::get_constant_index() const {
-  switch(cur_bc()) {
+int ciBytecodeStream::get_constant_raw_index() const {
+  // work-alike for Bytecode_loadconstant::raw_index()
+  switch (cur_bc()) {
   case Bytecodes::_ldc:
     return get_index_u1();
   case Bytecodes::_ldc_w:
@@ -202,25 +203,52 @@
     return 0;
   }
 }
+
+// ------------------------------------------------------------------
+// ciBytecodeStream::get_constant_pool_index
+// Decode any CP cache index into a regular pool index.
+int ciBytecodeStream::get_constant_pool_index() const {
+  // work-alike for Bytecode_loadconstant::pool_index()
+  int index = get_constant_raw_index();
+  if (has_cache_index()) {
+    return get_cpcache()->get_pool_index(index);
+  }
+  return index;
+}
+
+// ------------------------------------------------------------------
+// ciBytecodeStream::get_constant_cache_index
+// Return the CP cache index, or -1 if there isn't any.
+int ciBytecodeStream::get_constant_cache_index() const {
+  // work-alike for Bytecode_loadconstant::cache_index()
+  return has_cache_index() ? get_constant_raw_index() : -1;
+}
+
 // ------------------------------------------------------------------
 // ciBytecodeStream::get_constant
 //
 // If this bytecode is one of the ldc variants, get the referenced
 // constant.
 ciConstant ciBytecodeStream::get_constant() {
+  int pool_index = get_constant_raw_index();
+  int cache_index = -1;
+  if (has_cache_index()) {
+    cache_index = pool_index;
+    pool_index = -1;
+  }
   VM_ENTRY_MARK;
   constantPoolHandle cpool(_method->get_methodOop()->constants());
-  return CURRENT_ENV->get_constant_by_index(cpool, get_constant_index(), _holder);
+  return CURRENT_ENV->get_constant_by_index(cpool, pool_index, cache_index, _holder);
 }
 
 // ------------------------------------------------------------------
-bool ciBytecodeStream::is_unresolved_string() const {
-  return CURRENT_ENV->is_unresolved_string(_holder, get_constant_index());
-}
-
-// ------------------------------------------------------------------
-bool ciBytecodeStream::is_unresolved_klass() const {
-  return CURRENT_ENV->is_unresolved_klass(_holder, get_klass_index());
+// ciBytecodeStream::get_constant_pool_tag
+//
+// If this bytecode is one of the ldc variants, get the referenced
+// constant.
+constantTag ciBytecodeStream::get_constant_pool_tag(int index) const {
+  VM_ENTRY_MARK;
+  return _method->get_methodOop()->constants()->tag_at(index);
 }
 
 // ------------------------------------------------------------------
@@ -378,13 +406,16 @@
 
 // ------------------------------------------------------------------
 // ciBytecodeStream::get_cpcache
-ciCPCache* ciBytecodeStream::get_cpcache() {
-  VM_ENTRY_MARK;
-  // Get the constant pool.
-  constantPoolOop      cpool   = _holder->get_instanceKlass()->constants();
-  constantPoolCacheOop cpcache = cpool->cache();
+ciCPCache* ciBytecodeStream::get_cpcache() const {
+  if (_cpcache == NULL) {
+    VM_ENTRY_MARK;
+    // Get the constant pool.
+    constantPoolOop      cpool   = _holder->get_instanceKlass()->constants();
+    constantPoolCacheOop cpcache = cpool->cache();
 
-  return CURRENT_ENV->get_object(cpcache)->as_cpcache();
+    *(ciCPCache**)&_cpcache = CURRENT_ENV->get_object(cpcache)->as_cpcache();
+  }
+  return _cpcache;
 }
 
 // ------------------------------------------------------------------
--- a/src/share/vm/ci/ciStreams.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/ciStreams.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -46,6 +46,7 @@
 
   ciMethod* _method;           // the method
   ciInstanceKlass* _holder;
+  ciCPCache* _cpcache;
   address _bc_start;            // Start of current bytecode for table
   address _was_wide;            // Address past last wide bytecode
   jint* _table_base;            // Aligned start of last table or switch
@@ -58,7 +59,9 @@
 
   void reset( address base, unsigned int size ) {
     _bc_start =_was_wide = 0;
-    _start = _pc = base; _end = base + size; }
+    _start = _pc = base; _end = base + size;
+    _cpcache = NULL;
+  }
 
   void assert_wide(bool require_wide) const {
     if (require_wide)
@@ -136,15 +139,20 @@
   bool is_wide() const { return ( _pc == _was_wide ); }
 
   // Does this instruction contain an index which refes into the CP cache?
-  bool uses_cp_cache() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
+  bool has_cache_index() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
 
   int get_index_u1() const {
     return bytecode()->get_index_u1(cur_bc_raw());
   }
 
+  int get_index_u1_cpcache() const {
+    return bytecode()->get_index_u1_cpcache(cur_bc_raw());
+  }
+
   // Get a byte index following this bytecode.
   // If prefixed with a wide bytecode, get a wide index.
   int get_index() const {
+    assert(!has_cache_index(), "else use cpcache variant");
     return (_pc == _was_wide)   // was widened?
       ? get_index_u2(true)      // yes, return wide index
       : get_index_u1();         // no, return narrow index
@@ -207,7 +215,9 @@
     return cur_bci() + get_int_table(index); }
 
   // --- Constant pool access ---
-  int get_constant_index() const;
+  int get_constant_raw_index() const;
+  int get_constant_pool_index() const;
+  int get_constant_cache_index() const;
   int get_field_index();
   int get_method_index();
 
@@ -217,12 +227,17 @@
   int get_klass_index() const;
 
   // If this bytecode is one of the ldc variants, get the referenced
-  // constant
+  // constant.  Do not attempt to resolve it, since that would require
+  // execution of Java code.  If it is not resolved, return an unloaded
+  // object (ciConstant.as_object()->is_loaded() == false).
   ciConstant get_constant();
-  // True if the ldc variant points to an unresolved string
-  bool is_unresolved_string() const;
-  // True if the ldc variant points to an unresolved klass
-  bool is_unresolved_klass() const;
+  constantTag get_constant_pool_tag(int index) const;
+
+  // True if the klass-using bytecode points to an unresolved klass
+  bool is_unresolved_klass() const {
+    constantTag tag = get_constant_pool_tag(get_klass_index());
+    return tag.is_unresolved_klass();
+  }
 
   // If this bytecode is one of get_field, get_static, put_field,
   // or put_static, get the referenced field.
@@ -238,7 +253,7 @@
   int       get_method_holder_index();
   int       get_method_signature_index();
 
-  ciCPCache*  get_cpcache();
+  ciCPCache*  get_cpcache() const;
   ciCallSite* get_call_site();
 };
 
--- a/src/share/vm/ci/ciTypeFlow.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/ci/ciTypeFlow.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -712,10 +712,8 @@
     ciObject* obj = con.as_object();
     if (obj->is_null_object()) {
       push_null();
-    } else if (obj->is_klass()) {
-      // The type of ldc <class> is java.lang.Class
-      push_object(outer()->env()->Class_klass());
     } else {
+      assert(!obj->is_klass(), "must be java_mirror of klass");
       push_object(obj->klass());
     }
   } else {
--- a/src/share/vm/classfile/classFileParser.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/classfile/classFileParser.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -117,6 +117,45 @@
           cp->string_index_at_put(index, string_index);
         }
         break;
+      case JVM_CONSTANT_MethodHandle :
+      case JVM_CONSTANT_MethodType :
+        if (!EnableMethodHandles ||
+            _major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
+          classfile_parse_error(
+            (!EnableMethodHandles ?
+             "This JVM does not support constant tag %u in class file %s" :
+             "Class file version does not support constant tag %u in class file %s"),
+            tag, CHECK);
+        }
+        if (tag == JVM_CONSTANT_MethodHandle) {
+          cfs->guarantee_more(4, CHECK);  // ref_kind, method_index, tag/access_flags
+          u1 ref_kind = cfs->get_u1_fast();
+          u2 method_index = cfs->get_u2_fast();
+          cp->method_handle_index_at_put(index, ref_kind, method_index);
+        } else if (tag == JVM_CONSTANT_MethodType) {
+          cfs->guarantee_more(3, CHECK);  // signature_index, tag/access_flags
+          u2 signature_index = cfs->get_u2_fast();
+          cp->method_type_index_at_put(index, signature_index);
+        } else {
+          ShouldNotReachHere();
+        }
+        break;
+      case JVM_CONSTANT_InvokeDynamic :
+        {
+          if (!EnableInvokeDynamic ||
+              _major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
+            classfile_parse_error(
+              (!EnableInvokeDynamic ?
+               "This JVM does not support constant tag %u in class file %s" :
+               "Class file version does not support constant tag %u in class file %s"),
+              tag, CHECK);
+          }
+          cfs->guarantee_more(5, CHECK);  // bsm_index, name_and_type_index, tag/access_flags
+          u2 bootstrap_method_index = cfs->get_u2_fast();
+          u2 name_and_type_index = cfs->get_u2_fast();
+          cp->invoke_dynamic_at_put(index, bootstrap_method_index, name_and_type_index);
+        }
+        break;
       case JVM_CONSTANT_Integer :
         {
           cfs->guarantee_more(5, CHECK);  // bytes, tag/access_flags
@@ -337,6 +376,78 @@
           cp->unresolved_string_at_put(index, sym);
         }
         break;
+      case JVM_CONSTANT_MethodHandle :
+        {
+          int ref_index = cp->method_handle_index_at(index);
+          check_property(
+            valid_cp_range(ref_index, length) &&
+                EnableMethodHandles,
+              "Invalid constant pool index %u in class file %s",
+              ref_index, CHECK_(nullHandle));
+          constantTag tag = cp->tag_at(ref_index);
+          int ref_kind  = cp->method_handle_ref_kind_at(index);
+          switch (ref_kind) {
+          case JVM_REF_getField:
+          case JVM_REF_getStatic:
+          case JVM_REF_putField:
+          case JVM_REF_putStatic:
+            check_property(
+              tag.is_field(),
+              "Invalid constant pool index %u in class file %s (not a field)",
+              ref_index, CHECK_(nullHandle));
+            break;
+          case JVM_REF_invokeVirtual:
+          case JVM_REF_invokeStatic:
+          case JVM_REF_invokeSpecial:
+          case JVM_REF_newInvokeSpecial:
+            check_property(
+              tag.is_method(),
+              "Invalid constant pool index %u in class file %s (not a method)",
+              ref_index, CHECK_(nullHandle));
+            break;
+          case JVM_REF_invokeInterface:
+            check_property(
+              tag.is_interface_method(),
+              "Invalid constant pool index %u in class file %s (not an interface method)",
+              ref_index, CHECK_(nullHandle));
+            break;
+          default:
+            classfile_parse_error(
+              "Bad method handle kind at constant pool index %u in class file %s",
+              index, CHECK_(nullHandle));
+          }
+          // Keep the ref_index unchanged.  It will be indirected at link-time.
+        }
+        break;
+      case JVM_CONSTANT_MethodType :
+        {
+          int ref_index = cp->method_type_index_at(index);
+          check_property(
+            valid_cp_range(ref_index, length) &&
+                cp->tag_at(ref_index).is_utf8() &&
+                EnableMethodHandles,
+              "Invalid constant pool index %u in class file %s",
+              ref_index, CHECK_(nullHandle));
+        }
+        break;
+      case JVM_CONSTANT_InvokeDynamic :
+        {
+          int bootstrap_method_ref_index = cp->invoke_dynamic_bootstrap_method_ref_index_at(index);
+          int name_and_type_ref_index = cp->invoke_dynamic_name_and_type_ref_index_at(index);
+          check_property((bootstrap_method_ref_index == 0 && AllowTransitionalJSR292)
+                         ||
+                         (valid_cp_range(bootstrap_method_ref_index, length) &&
+                          cp->tag_at(bootstrap_method_ref_index).is_method_handle()),
+                         "Invalid constant pool index %u in class file %s",
+                         bootstrap_method_ref_index,
+                         CHECK_(nullHandle));
+          check_property(valid_cp_range(name_and_type_ref_index, length) &&
+                         cp->tag_at(name_and_type_ref_index).is_name_and_type(),
+                         "Invalid constant pool index %u in class file %s",
+                         name_and_type_ref_index,
+                         CHECK_(nullHandle));
+          break;
+        }
       default:
         fatal(err_msg("bad constant pool tag value %u",
                       cp->tag_at(index).value()));
@@ -452,6 +563,43 @@
         }
         break;
       }
+      case JVM_CONSTANT_MethodHandle: {
+        int ref_index = cp->method_handle_index_at(index);
+        int ref_kind  = cp->method_handle_ref_kind_at(index);
+        switch (ref_kind) {
+        case JVM_REF_invokeVirtual:
+        case JVM_REF_invokeStatic:
+        case JVM_REF_invokeSpecial:
+        case JVM_REF_newInvokeSpecial:
+          {
+            int name_and_type_ref_index = cp->name_and_type_ref_index_at(ref_index);
+            int name_ref_index = cp->name_ref_index_at(name_and_type_ref_index);
+            symbolHandle name(THREAD, cp->symbol_at(name_ref_index));
+            if (ref_kind == JVM_REF_newInvokeSpecial) {
+              if (name() != vmSymbols::object_initializer_name()) {
+                classfile_parse_error(
+                  "Bad constructor name at constant pool index %u in class file %s",
+                  name_ref_index, CHECK_(nullHandle));
+              }
+            } else {
+              if (name() == vmSymbols::object_initializer_name()) {
+                classfile_parse_error(
+                  "Bad method name at constant pool index %u in class file %s",
+                  name_ref_index, CHECK_(nullHandle));
+              }
+            }
+          }
+          break;
+          // Other ref_kinds are already fully checked in previous pass.
+        }
+        break;
+      }
+      case JVM_CONSTANT_MethodType: {
+        symbolHandle no_name = vmSymbolHandles::type_name(); // place holder
+        symbolHandle signature(THREAD, cp->method_type_signature_at(index));
+        verify_legal_method_signature(no_name, signature, CHECK_(nullHandle));
+        break;
+      }
     }  // end of switch
   }  // end of for
 
@@ -467,7 +615,7 @@
   case JVM_CONSTANT_UnresolvedClass :
     // Patching a class means pre-resolving it.
     // The name in the constant pool is ignored.
-    if (patch->klass() == SystemDictionary::Class_klass()) { // %%% java_lang_Class::is_instance
+    if (java_lang_Class::is_instance(patch())) {
       guarantee_property(!java_lang_Class::is_primitive(patch()),
                          "Illegal class patch at %d in class file %s",
                          index, CHECK);
--- a/src/share/vm/classfile/classLoader.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/classfile/classLoader.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -832,7 +832,6 @@
 
 
 instanceKlassHandle ClassLoader::load_classfile(symbolHandle h_name, TRAPS) {
-  VTuneClassLoadMarker clm;
   ResourceMark rm(THREAD);
   EventMark m("loading class " INTPTR_FORMAT, (address)h_name());
   ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion);
--- a/src/share/vm/classfile/systemDictionary.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/classfile/systemDictionary.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -2454,6 +2454,48 @@
   return Handle(THREAD, (oop) result.get_jobject());
 }
 
+// Ask Java code to find or construct a method handle constant.
+Handle SystemDictionary::link_method_handle_constant(KlassHandle caller,
+                                                     int ref_kind, //e.g., JVM_REF_invokeVirtual
+                                                     KlassHandle callee,
+                                                     symbolHandle name_sym,
+                                                     symbolHandle signature,
+                                                     TRAPS) {
+  Handle empty;
+  Handle name = java_lang_String::create_from_symbol(name_sym(), CHECK_(empty));
+  Handle type;
+  if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') {
+    bool ignore_is_on_bcp = false;
+    type = find_method_handle_type(signature, caller, ignore_is_on_bcp, CHECK_(empty));
+  } else {
+    SignatureStream ss(signature(), false);
+    if (!ss.is_done()) {
+      oop mirror = ss.as_java_mirror(caller->class_loader(), caller->protection_domain(),
+                                     SignatureStream::NCDFError, CHECK_(empty));
+      type = Handle(THREAD, mirror);
+      ss.next();
+      if (!ss.is_done())  type = Handle();  // error!
+    }
+  }
+  if (type.is_null()) {
+    THROW_MSG_(vmSymbols::java_lang_LinkageError(), "bad signature", empty);
+  }
+
+  // call sun.dyn.MethodHandleNatives::linkMethodHandleConstant(Class caller, int refKind, Class callee, String name, Object type) -> MethodHandle
+  JavaCallArguments args;
+  args.push_oop(caller->java_mirror());  // the referring class
+  args.push_int(ref_kind);
+  args.push_oop(callee->java_mirror());  // the target class
+  args.push_oop(name());
+  args.push_oop(type());
+  JavaValue result(T_OBJECT);
+  JavaCalls::call_static(&result,
+                         SystemDictionary::MethodHandleNatives_klass(),
+                         vmSymbols::linkMethodHandleConstant_name(),
+                         vmSymbols::linkMethodHandleConstant_signature(),
+                         &args, CHECK_(empty));
+  return Handle(THREAD, (oop) result.get_jobject());
+}
 
 // Ask Java code to find or construct a java.dyn.CallSite for the given
 // name and signature, as interpreted relative to the given class loader.
@@ -2465,6 +2507,10 @@
                                                 int caller_bci,
                                                 TRAPS) {
   Handle empty;
+  guarantee(bootstrap_method.not_null() &&
+            java_dyn_MethodHandle::is_instance(bootstrap_method()),
+            "caller must supply a valid BSM");
+
   Handle caller_mname = MethodHandles::new_MemberName(CHECK_(empty));
   MethodHandles::init_MemberName(caller_mname(), caller_method());
 
@@ -2495,20 +2541,61 @@
   return call_site_oop;
 }
 
-Handle SystemDictionary::find_bootstrap_method(KlassHandle caller, TRAPS) {
+Handle SystemDictionary::find_bootstrap_method(methodHandle caller_method, int caller_bci,
+                                               int cache_index, TRAPS) {
   Handle empty;
-  if (!caller->oop_is_instance())  return empty;
-
-  instanceKlassHandle ik(THREAD, caller());
-
-  oop boot_method_oop = ik->bootstrap_method();
-  if (boot_method_oop != NULL) {
+
+  constantPoolHandle pool;
+  {
+    klassOop caller = caller_method->method_holder();
+    if (!Klass::cast(caller)->oop_is_instance())  return empty;
+    pool = constantPoolHandle(THREAD, instanceKlass::cast(caller)->constants());
+  }
+
+  int constant_pool_index = pool->cache()->entry_at(cache_index)->constant_pool_index();
+  constantTag tag = pool->tag_at(constant_pool_index);
+
+  if (tag.is_invoke_dynamic()) {
+    // JVM_CONSTANT_InvokeDynamic is an ordered pair of [bootm, name&type]
+    // The bootm, being a JVM_CONSTANT_MethodHandle, has its own cache entry.
+    int bsm_index = pool->invoke_dynamic_bootstrap_method_ref_index_at(constant_pool_index);
+    if (bsm_index != 0) {
+      int bsm_index_in_cache = pool->cache()->entry_at(cache_index)->bootstrap_method_index_in_cache();
+      DEBUG_ONLY(int bsm_index_2 = pool->cache()->entry_at(bsm_index_in_cache)->constant_pool_index());
+      assert(bsm_index == bsm_index_2, "BSM constant lifted to cache");
+      if (TraceMethodHandles) {
+        tty->print_cr("resolving bootstrap method for "PTR_FORMAT" at %d at cache[%d]CP[%d]...",
+                      (intptr_t) caller_method(), caller_bci, cache_index, constant_pool_index);
+      }
+      oop bsm_oop = pool->resolve_cached_constant_at(bsm_index_in_cache, CHECK_(empty));
+      if (TraceMethodHandles) {
+        tty->print_cr("bootstrap method for "PTR_FORMAT" at %d retrieved as "PTR_FORMAT":",
+                      (intptr_t) caller_method(), caller_bci, (intptr_t) bsm_oop);
+      }
+      assert(bsm_oop->is_oop()
+             && java_dyn_MethodHandle::is_instance(bsm_oop), "must be sane");
+      return Handle(THREAD, bsm_oop);
+    }
+    // else null BSM; fall through
+  } else if (tag.is_name_and_type()) {
+    // JSR 292 EDR does not have JVM_CONSTANT_InvokeDynamic
+    // a bare name&type defaults its BSM to null, so fall through...
+  } else {
+    ShouldNotReachHere();  // verifier does not allow this
+  }
+
+  // Fall through to pick up the per-class bootstrap method.
+  // This mechanism may go away in the PFD.
+  assert(AllowTransitionalJSR292, "else the verifier should have stopped us already");
+  oop bsm_oop = instanceKlass::cast(caller_method->method_holder())->bootstrap_method();
+  if (bsm_oop != NULL) {
     if (TraceMethodHandles) {
-      tty->print_cr("bootstrap method for "PTR_FORMAT" cached as "PTR_FORMAT":", ik(), boot_method_oop);
+      tty->print_cr("bootstrap method for "PTR_FORMAT" registered as "PTR_FORMAT":",
+                    (intptr_t) caller_method(), (intptr_t) bsm_oop);
     }
-    assert(boot_method_oop->is_oop()
-           && java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
-    return Handle(THREAD, boot_method_oop);
+    assert(bsm_oop->is_oop()
+           && java_dyn_MethodHandle::is_instance(bsm_oop), "must be sane");
+    return Handle(THREAD, bsm_oop);
   }
 
   return empty;
--- a/src/share/vm/classfile/systemDictionary.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/classfile/systemDictionary.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -473,6 +473,13 @@
                                            KlassHandle accessing_klass,
                                            bool& return_bcp_flag,
                                            TRAPS);
+  // ask Java to compute a java.dyn.MethodHandle object for a given CP entry
+  static Handle    link_method_handle_constant(KlassHandle caller,
+                                               int ref_kind, //e.g., JVM_REF_invokeVirtual
+                                               KlassHandle callee,
+                                               symbolHandle name,
+                                               symbolHandle signature,
+                                               TRAPS);
   // ask Java to create a dynamic call site, while linking an invokedynamic op
   static Handle    make_dynamic_call_site(Handle bootstrap_method,
                                           // Callee information:
@@ -485,7 +492,10 @@
                                           TRAPS);
 
   // coordinate with Java about bootstrap methods
-  static Handle    find_bootstrap_method(KlassHandle caller, TRAPS);
+  static Handle    find_bootstrap_method(methodHandle caller_method,
+                                         int caller_bci,  // N.B. must be an invokedynamic
+                                         int cache_index, // must be corresponding main_entry
+                                         TRAPS);
 
   // Utility for printing loader "name" as part of tracing constraints
   static const char* loader_name(oop loader) {
--- a/src/share/vm/classfile/verifier.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/classfile/verifier.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1598,7 +1598,10 @@
   if (opcode == Bytecodes::_ldc || opcode == Bytecodes::_ldc_w) {
     if (!tag.is_unresolved_string() && !tag.is_unresolved_klass()) {
       types = (1 << JVM_CONSTANT_Integer) | (1 << JVM_CONSTANT_Float)
-            | (1 << JVM_CONSTANT_String)  | (1 << JVM_CONSTANT_Class);
+            | (1 << JVM_CONSTANT_String)  | (1 << JVM_CONSTANT_Class)
+            | (1 << JVM_CONSTANT_MethodHandle) | (1 << JVM_CONSTANT_MethodType);
+      // Note:  The class file parser already verified the legality of
+      // MethodHandle and MethodType constants.
       verify_cp_type(index, cp, types, CHECK_VERIFY(this));
     }
   } else {
@@ -1632,6 +1635,14 @@
     current_frame->push_stack_2(
       VerificationType::long_type(),
       VerificationType::long2_type(), CHECK_VERIFY(this));
+  } else if (tag.is_method_handle()) {
+    current_frame->push_stack(
+      VerificationType::reference_type(
+        vmSymbols::java_dyn_MethodHandle()), CHECK_VERIFY(this));
+  } else if (tag.is_method_type()) {
+    current_frame->push_stack(
+      VerificationType::reference_type(
+        vmSymbols::java_dyn_MethodType()), CHECK_VERIFY(this));
   } else {
     verify_error(bci, "Invalid index in ldc");
     return;
@@ -1902,7 +1913,8 @@
   unsigned int types = (opcode == Bytecodes::_invokeinterface
                                 ? 1 << JVM_CONSTANT_InterfaceMethodref
                       : opcode == Bytecodes::_invokedynamic
-                                ? 1 << JVM_CONSTANT_NameAndType
+                                ? (1 << JVM_CONSTANT_NameAndType
+                                  |1 << JVM_CONSTANT_InvokeDynamic)
                                 : 1 << JVM_CONSTANT_Methodref);
   verify_cp_type(index, cp, types, CHECK_VERIFY(this));
 
@@ -1920,9 +1932,12 @@
   // Get referenced class type
   VerificationType ref_class_type;
   if (opcode == Bytecodes::_invokedynamic) {
-    if (!EnableInvokeDynamic) {
+    if (!EnableInvokeDynamic ||
+        _klass->major_version() < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
       class_format_error(
-        "invokedynamic instructions not enabled on this JVM",
+        (!EnableInvokeDynamic ?
+         "invokedynamic instructions not enabled in this JVM" :
+         "invokedynamic instructions not supported by this class file version"),
         _klass->external_name());
       return;
     }
--- a/src/share/vm/classfile/verifier.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/classfile/verifier.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -25,7 +25,10 @@
 // The verifier class
 class Verifier : AllStatic {
  public:
-  enum { STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50 };
+  enum {
+    STACKMAP_ATTRIBUTE_MAJOR_VERSION    = 50,
+    INVOKEDYNAMIC_MAJOR_VERSION         = 51
+  };
   typedef enum { ThrowException, NoException } Mode;
 
   /**
--- a/src/share/vm/classfile/vmSymbols.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/classfile/vmSymbols.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -246,6 +246,8 @@
   /* internal up-calls made only by the JVM, via class sun.dyn.MethodHandleNatives: */            \
   template(findMethodHandleType_name,                 "findMethodHandleType")                     \
   template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") \
+  template(linkMethodHandleConstant_name,             "linkMethodHandleConstant")                 \
+  template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/dyn/MethodHandle;") \
   template(makeDynamicCallSite_name,                  "makeDynamicCallSite")                      \
   template(makeDynamicCallSite_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Lsun/dyn/MemberName;I)Ljava/dyn/CallSite;") \
   NOT_LP64(  do_alias(machine_word_signature,         int_signature)  )                           \
--- a/src/share/vm/code/codeBlob.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/code/codeBlob.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -202,6 +202,11 @@
 //----------------------------------------------------------------------------------------------------
 // Implementation of AdapterBlob
 
+AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) :
+  BufferBlob("I2C/C2I adapters", size, cb) {
+  CodeCache::commit(this);
+}
+
 AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 
@@ -281,7 +286,6 @@
       tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, stub);
       Disassembler::decode(stub->instructions_begin(), stub->instructions_end());
     }
-    VTune::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end());
     Forte::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end());
 
     if (JvmtiExport::should_post_dynamic_code_generated()) {
@@ -356,7 +360,6 @@
       tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
       Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
     }
-    VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
     Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
 
     if (JvmtiExport::should_post_dynamic_code_generated()) {
@@ -414,7 +417,6 @@
       tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
       Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
     }
-    VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
     Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
 
     if (JvmtiExport::should_post_dynamic_code_generated()) {
@@ -474,7 +476,6 @@
       tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
       Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
     }
-    VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
     Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
 
     if (JvmtiExport::should_post_dynamic_code_generated()) {
@@ -533,7 +534,6 @@
       tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
       Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
     }
-    VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
     Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
 
     if (JvmtiExport::should_post_dynamic_code_generated()) {
--- a/src/share/vm/code/codeBlob.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/code/codeBlob.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -219,8 +219,7 @@
 
 class AdapterBlob: public BufferBlob {
 private:
-  AdapterBlob(int size)                 : BufferBlob("I2C/C2I adapters", size) {}
-  AdapterBlob(int size, CodeBuffer* cb) : BufferBlob("I2C/C2I adapters", size, cb) {}
+  AdapterBlob(int size, CodeBuffer* cb);
 
 public:
   // Creation
--- a/src/share/vm/code/codeCache.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/code/codeCache.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -93,6 +93,8 @@
 
 CodeHeap * CodeCache::_heap = new CodeHeap();
 int CodeCache::_number_of_blobs = 0;
+int CodeCache::_number_of_adapters = 0;
+int CodeCache::_number_of_nmethods = 0;
 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 bool CodeCache::_needs_cache_clean = false;
 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
@@ -176,8 +178,14 @@
   verify_if_often();
 
   print_trace("free", cb);
-  if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
-    _number_of_nmethods_with_dependencies--;
+  if (cb->is_nmethod()) {
+    _number_of_nmethods--;
+    if (((nmethod *)cb)->has_dependencies()) {
+      _number_of_nmethods_with_dependencies--;
+    }
+  }
+  if (cb->is_adapter_blob()) {
+    _number_of_adapters--;
   }
   _number_of_blobs--;
 
@@ -191,9 +199,16 @@
 void CodeCache::commit(CodeBlob* cb) {
   // this is called by nmethod::nmethod, which must already own CodeCache_lock
   assert_locked_or_safepoint(CodeCache_lock);
-  if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
-    _number_of_nmethods_with_dependencies++;
+  if (cb->is_nmethod()) {
+    _number_of_nmethods++;
+    if (((nmethod *)cb)->has_dependencies()) {
+      _number_of_nmethods_with_dependencies++;
+    }
   }
+  if (cb->is_adapter_blob()) {
+    _number_of_adapters++;
+  }
+
   // flush the hardware I-cache
   ICache::invalidate_range(cb->instructions_begin(), cb->instructions_size());
 }
--- a/src/share/vm/code/codeCache.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/code/codeCache.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,6 +43,8 @@
   // 4422213 or 4436291 for details.
   static CodeHeap * _heap;
   static int _number_of_blobs;
+  static int _number_of_adapters;
+  static int _number_of_nmethods;
   static int _number_of_nmethods_with_dependencies;
   static bool _needs_cache_clean;
   static nmethod* _scavenge_root_nmethods;  // linked via nm->scavenge_root_link()
@@ -105,6 +107,8 @@
   static nmethod* first_nmethod();
   static nmethod* next_nmethod (CodeBlob* cb);
   static int       nof_blobs()                 { return _number_of_blobs; }
+  static int       nof_adapters()              { return _number_of_adapters; }
+  static int       nof_nmethods()              { return _number_of_nmethods; }
 
   // GC support
   static void gc_epilogue();
--- a/src/share/vm/code/nmethod.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/code/nmethod.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -397,11 +397,6 @@
 //-------------end of code for ExceptionCache--------------
 
 
-void nmFlags::clear() {
-  assert(sizeof(nmFlags) == sizeof(int), "using more than one word for nmFlags");
-  *(jint*)this = 0;
-}
-
 int nmethod::total_size() const {
   return
     code_size()          +
@@ -419,8 +414,32 @@
   return NULL;
 }
 
-// %%% This variable is no longer used?
-int nmethod::_zombie_instruction_size = NativeJump::instruction_size;
+// Fill in default values for various flag fields
+void nmethod::init_defaults() {
+  _state                      = alive;
+  _marked_for_reclamation     = 0;
+  _has_flushed_dependencies   = 0;
+  _speculatively_disconnected = 0;
+  _has_unsafe_access          = 0;
+  _has_method_handle_invokes  = 0;
+  _marked_for_deoptimization  = 0;
+  _lock_count                 = 0;
+  _stack_traversal_mark       = 0;
+  _unload_reported            = false;           // jvmti state
+
+  NOT_PRODUCT(_has_debug_info = false);
+  _oops_do_mark_link       = NULL;
+  _jmethod_id              = NULL;
+  _osr_link                = NULL;
+  _scavenge_root_link      = NULL;
+  _scavenge_root_state     = 0;
+  _saved_nmethod_link      = NULL;
+  _compiler                = NULL;
+
+#ifdef HAVE_DTRACE_H
+  _trap_offset             = 0;
+#endif // def HAVE_DTRACE_H
+}
 
 
 nmethod* nmethod::new_native_nmethod(methodHandle method,
@@ -580,24 +599,16 @@
     debug_only(No_Safepoint_Verifier nsv;)
     assert_locked_or_safepoint(CodeCache_lock);
 
-    NOT_PRODUCT(_has_debug_info = false);
-    _oops_do_mark_link       = NULL;
+    init_defaults();
     _method                  = method;
     _entry_bci               = InvocationEntryBci;
-    _osr_link                = NULL;
-    _scavenge_root_link      = NULL;
-    _scavenge_root_state     = 0;
-    _saved_nmethod_link      = NULL;
-    _compiler                = NULL;
     // We have no exception handler or deopt handler make the
     // values something that will never match a pc like the nmethod vtable entry
     _exception_offset        = 0;
     _deoptimize_offset       = 0;
     _deoptimize_mh_offset    = 0;
     _orig_pc_offset          = 0;
-#ifdef HAVE_DTRACE_H
-    _trap_offset             = 0;
-#endif // def HAVE_DTRACE_H
+
     _stub_offset             = data_offset();
     _consts_offset           = data_offset();
     _oops_offset             = data_offset();
@@ -615,17 +626,9 @@
     _exception_cache         = NULL;
     _pc_desc_cache.reset_to(NULL);
 
-    flags.clear();
-    flags.state              = alive;
-    _markedForDeoptimization = 0;
-
-    _lock_count = 0;
-    _stack_traversal_mark    = 0;
-
     code_buffer->copy_oops_to(this);
     debug_only(verify_scavenge_root_oops());
     CodeCache::commit(this);
-    VTune::create_nmethod(this);
   }
 
   if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
@@ -673,14 +676,9 @@
     debug_only(No_Safepoint_Verifier nsv;)
     assert_locked_or_safepoint(CodeCache_lock);
 
-    NOT_PRODUCT(_has_debug_info = false);
-    _oops_do_mark_link       = NULL;
+    init_defaults();
     _method                  = method;
     _entry_bci               = InvocationEntryBci;
-    _osr_link                = NULL;
-    _scavenge_root_link      = NULL;
-    _scavenge_root_state     = 0;
-    _compiler                = NULL;
     // We have no exception handler or deopt handler make the
     // values something that will never match a pc like the nmethod vtable entry
     _exception_offset        = 0;
@@ -706,17 +704,9 @@
     _exception_cache         = NULL;
     _pc_desc_cache.reset_to(NULL);
 
-    flags.clear();
-    flags.state              = alive;
-    _markedForDeoptimization = 0;
-
-    _lock_count = 0;
-    _stack_traversal_mark    = 0;
-
     code_buffer->copy_oops_to(this);
     debug_only(verify_scavenge_root_oops());
     CodeCache::commit(this);
-    VTune::create_nmethod(this);
   }
 
   if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
@@ -781,20 +771,13 @@
     debug_only(No_Safepoint_Verifier nsv;)
     assert_locked_or_safepoint(CodeCache_lock);
 
-    NOT_PRODUCT(_has_debug_info = false);
-    _oops_do_mark_link       = NULL;
+    init_defaults();
     _method                  = method;
+    _entry_bci               = entry_bci;
     _compile_id              = compile_id;
     _comp_level              = comp_level;
-    _entry_bci               = entry_bci;
-    _osr_link                = NULL;
-    _scavenge_root_link      = NULL;
-    _scavenge_root_state     = 0;
     _compiler                = compiler;
     _orig_pc_offset          = orig_pc_offset;
-#ifdef HAVE_DTRACE_H
-    _trap_offset             = 0;
-#endif // def HAVE_DTRACE_H
     _stub_offset             = instructions_offset() + code_buffer->total_offset_of(code_buffer->stubs()->start());
 
     // Exception handler and deopt handler are in the stub section
@@ -821,15 +804,6 @@
     _exception_cache         = NULL;
     _pc_desc_cache.reset_to(scopes_pcs_begin());
 
-    flags.clear();
-    flags.state              = alive;
-    _markedForDeoptimization = 0;
-
-    _unload_reported         = false;           // jvmti state
-
-    _lock_count = 0;
-    _stack_traversal_mark    = 0;
-
     // Copy contents of ScopeDescRecorder to nmethod
     code_buffer->copy_oops_to(this);
     debug_info->copy_to(this);
@@ -841,8 +815,6 @@
 
     CodeCache::commit(this);
 
-    VTune::create_nmethod(this);
-
     // Copy contents of ExceptionHandlerTable to nmethod
     handler_table->copy_to(this);
     nul_chk_table->copy_to(this);
@@ -988,11 +960,6 @@
 }
 
 
-void nmethod::set_version(int v) {
-  flags.version = v;
-}
-
-
 // Promote one word from an assembly-time handle to a live embedded oop.
 inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
   if (handle == NULL ||
@@ -1139,6 +1106,8 @@
 // This is a private interface with the sweeper.
 void nmethod::mark_as_seen_on_stack() {
   assert(is_not_entrant(), "must be a non-entrant method");
+  // Set the traversal mark to ensure that the sweeper does 2
+  // cleaning passes before moving to zombie.
   set_stack_traversal_mark(NMethodSweeper::traversal_count());
 }
 
@@ -1207,7 +1176,7 @@
     // for later on.
     CodeCache::set_needs_cache_clean(true);
   }
-  flags.state = unloaded;
+  _state = unloaded;
 
   // Log the unloading.
   log_state_change();
@@ -1233,21 +1202,21 @@
   if (LogCompilation) {
     if (xtty != NULL) {
       ttyLocker ttyl;  // keep the following output all in one block
-      if (flags.state == unloaded) {
+      if (_state == unloaded) {
         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
                          os::current_thread_id());
       } else {
         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
                          os::current_thread_id(),
-                         (flags.state == zombie ? " zombie='1'" : ""));
+                         (_state == zombie ? " zombie='1'" : ""));
       }
       log_identity(xtty);
       xtty->stamp();
       xtty->end_elem();
     }
   }
-  if (PrintCompilation && flags.state != unloaded) {
-    print_on(tty, flags.state == zombie ? "made zombie " : "made not entrant ");
+  if (PrintCompilation && _state != unloaded) {
+    print_on(tty, _state == zombie ? "made zombie " : "made not entrant ");
     tty->cr();
   }
 }
@@ -1258,8 +1227,9 @@
 
   bool was_alive = false;
 
-  // Make sure the nmethod is not flushed in case of a safepoint in code below.
+  // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
   nmethodLocker nml(this);
+  methodHandle the_method(method());
 
   {
     // If the method is already zombie there is nothing to do
@@ -1279,7 +1249,7 @@
     // Enter critical section.  Does not block for safepoint.
     MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
 
-    if (flags.state == state) {
+    if (_state == state) {
       // another thread already performed this transition so nothing
       // to do, but return false to indicate this.
       return false;
@@ -1290,17 +1260,37 @@
     if (!is_osr_method() && !is_not_entrant()) {
       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
                   SharedRuntime::get_handle_wrong_method_stub());
-      assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");
     }
 
-    was_alive = is_in_use(); // Read state under lock
+    if (is_in_use()) {
+      // It's a true state change, so mark the method as decompiled.
+      // Do it only for transition from alive.
+      inc_decompile_count();
+    }
 
     // Change state
-    flags.state = state;
+    _state = state;
 
     // Log the transition once
     log_state_change();
 
+    // Remove nmethod from method.
+    // We need to check if both the _code and _from_compiled_code_entry_point
+    // refer to this nmethod because there is a race in setting these two fields
+    // in methodOop as seen in bugid 4947125.
+    // If the vep() points to the zombie nmethod, the memory for the nmethod
+    // could be flushed and the compiler and vtable stubs could still call
+    // through it.
+    if (method() != NULL && (method()->code() == this ||
+                             method()->from_compiled_entry() == verified_entry_point())) {
+      HandleMark hm;
+      method()->clear_code();
+    }
+
+    if (state == not_entrant) {
+      mark_as_seen_on_stack();
+    }
+
   } // leave critical region under Patching_lock
 
   // When the nmethod becomes zombie it is no longer alive so the
@@ -1308,18 +1298,17 @@
   // state will be flushed later when the transition to zombie
   // happens or they get unloaded.
   if (state == zombie) {
+    // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
+    // and it hasn't already been reported for this nmethod then report it now.
+    // (the event may have been reported earilier if the GC marked it for unloading).
+    post_compiled_method_unload();
+
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
     flush_dependencies(NULL);
   } else {
     assert(state == not_entrant, "other cases may need to be handled differently");
   }
 
-  if (state == not_entrant) {
-    Events::log("Make nmethod not entrant " INTPTR_FORMAT, this);
-  } else {
-    Events::log("Make nmethod zombie " INTPTR_FORMAT, this);
-  }
-
   if (TraceCreateZombies) {
     tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
   }
@@ -1327,47 +1316,6 @@
   // Make sweeper aware that there is a zombie method that needs to be removed
   NMethodSweeper::notify(this);
 
-  // not_entrant only stuff
-  if (state == not_entrant) {
-    mark_as_seen_on_stack();
-  }
-
-  if (was_alive) {
-    // It's a true state change, so mark the method as decompiled.
-    // Do it only for transition from alive.
-    inc_decompile_count();
-  }
-
-  // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
-  // and it hasn't already been reported for this nmethod then report it now.
-  // (the event may have been reported earilier if the GC marked it for unloading).
-  if (state == zombie) {
-    post_compiled_method_unload();
-  }
-
-
-  // Zombie only stuff
-  if (state == zombie) {
-    VTune::delete_nmethod(this);
-  }
-
-  // Check whether method got unloaded at a safepoint before this,
-  // if so we can skip the flushing steps below
-  if (method() == NULL) return true;
-
-  // Remove nmethod from method.
-  // We need to check if both the _code and _from_compiled_code_entry_point
-  // refer to this nmethod because there is a race in setting these two fields
-  // in methodOop as seen in bugid 4947125.
-  // If the vep() points to the zombie nmethod, the memory for the nmethod
-  // could be flushed and the compiler and vtable stubs could still call
-  // through it.
-  if (method()->code() == this ||
-      method()->from_compiled_entry() == verified_entry_point()) {
-    HandleMark hm;
-    method()->clear_code();
-  }
-
   return true;
 }
 
@@ -1488,11 +1436,25 @@
       moop->signature()->utf8_length(),
       code_begin(), code_size());
 
+  if (JvmtiExport::should_post_compiled_method_load() ||
+      JvmtiExport::should_post_compiled_method_unload()) {
+    get_and_cache_jmethod_id();
+  }
+
   if (JvmtiExport::should_post_compiled_method_load()) {
     JvmtiExport::post_compiled_method_load(this);
   }
 }
 
+jmethodID nmethod::get_and_cache_jmethod_id() {
+  if (_jmethod_id == NULL) {
+    // Cache the jmethod_id since it can no longer be looked up once the
+    // method itself has been marked for unloading.
+    _jmethod_id = method()->jmethod_id();
+  }
+  return _jmethod_id;
+}
+
 void nmethod::post_compiled_method_unload() {
   if (unload_reported()) {
     // During unloading we transition to unloaded and then to zombie
@@ -1504,12 +1466,17 @@
   DTRACE_METHOD_UNLOAD_PROBE(method());
 
   // If a JVMTI agent has enabled the CompiledMethodUnload event then
-  // post the event. Sometime later this nmethod will be made a zombie by
-  // the sweeper but the methodOop will not be valid at that point.
-  if (JvmtiExport::should_post_compiled_method_unload()) {
+  // post the event. Sometime later this nmethod will be made a zombie
+  // by the sweeper but the methodOop will not be valid at that point.
+  // If the _jmethod_id is null then no load event was ever requested
+  // so don't bother posting the unload.  The main reason for this is
+  // that the jmethodID is a weak reference to the methodOop so if
+  // it's being unloaded there's no way to look it up since the weak
+  // ref will have been cleared.
+  if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
     assert(!unload_reported(), "already unloaded");
     HandleMark hm;
-    JvmtiExport::post_compiled_method_unload(method()->jmethod_id(), code_begin());
+    JvmtiExport::post_compiled_method_unload(_jmethod_id, code_begin());
   }
 
   // The JVMTI CompiledMethodUnload event can be enabled or disabled at
@@ -2087,7 +2054,6 @@
 
 void nmethod_init() {
   // make sure you didn't forget to adjust the filler fields
-  assert(sizeof(nmFlags) <= 4,           "nmFlags occupies more than a word");
   assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
 }
 
@@ -2323,7 +2289,6 @@
     tty->print("((nmethod*) "INTPTR_FORMAT ") ", this);
     tty->print(" for method " INTPTR_FORMAT , (address)method());
     tty->print(" { ");
-    if (version())        tty->print("v%d ", version());
     if (is_in_use())      tty->print("in_use ");
     if (is_not_entrant()) tty->print("not_entrant ");
     if (is_zombie())      tty->print("zombie ");
@@ -2659,13 +2624,10 @@
         case Bytecodes::_getstatic:
         case Bytecodes::_putstatic:
           {
-            methodHandle sdm = sd->method();
-            Bytecode_field* field = Bytecode_field_at(sdm(), sdm->bcp_from(sd->bci()));
-            constantPoolOop sdmc = sdm->constants();
-            symbolOop name = sdmc->name_ref_at(field->index());
+            Bytecode_field* field = Bytecode_field_at(sd->method(), sd->bci());
             st->print(" ");
-            if (name != NULL)
-              name->print_symbol_on(st);
+            if (field->name() != NULL)
+              field->name()->print_symbol_on(st);
             else
               st->print("<UNKNOWN>");
           }
--- a/src/share/vm/code/nmethod.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/code/nmethod.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -78,29 +78,8 @@
 
 
 // nmethods (native methods) are the compiled code versions of Java methods.
-
-struct nmFlags {
-  friend class VMStructs;
-  unsigned int version:8;                    // version number (0 = first version)
-  unsigned int age:4;                        // age (in # of sweep steps)
-
-  unsigned int state:2;                      // {alive, zombie, unloaded)
-
-  unsigned int isUncommonRecompiled:1;       // recompiled because of uncommon trap?
-  unsigned int isToBeRecompiled:1;           // to be recompiled as soon as it matures
-  unsigned int hasFlushedDependencies:1;     // Used for maintenance of dependencies
-  unsigned int markedForReclamation:1;       // Used by NMethodSweeper
-
-  unsigned int has_unsafe_access:1;          // May fault due to unsafe access.
-  unsigned int has_method_handle_invokes:1;  // Has this method MethodHandle invokes?
-
-  unsigned int speculatively_disconnected:1; // Marked for potential unload
-
-  void clear();
-};
-
-
-// A nmethod contains:
+//
+// An nmethod contains:
 //  - header                 (the nmethod structure)
 //  [Relocation]
 //  - relocation information
@@ -131,10 +110,9 @@
   friend class CodeCache;  // non-perm oops
  private:
   // Shared fields for all nmethod's
-  static int _zombie_instruction_size;
-
   methodOop _method;
   int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
+  jmethodID _jmethod_id;       // Cache of method()->jmethod_id()
 
   // To support simple linked-list chaining of nmethods:
   nmethod*  _osr_link;         // from instanceKlass::osr_nmethods_head
@@ -146,6 +124,11 @@
 
   AbstractCompiler* _compiler; // The compiler which compiled this nmethod
 
+  // offsets for entry points
+  address _entry_point;                      // entry point with class check
+  address _verified_entry_point;             // entry point without class check
+  address _osr_entry_point;                  // entry point for on stack replacement
+
   // Offsets for different nmethod parts
   int _exception_offset;
   // All deoptee's will resume execution at this location described by
@@ -174,23 +157,31 @@
   // pc during a deopt.
   int _orig_pc_offset;
 
-  int _compile_id;                     // which compilation made this nmethod
-  int _comp_level;                     // compilation level
+  int _compile_id;                           // which compilation made this nmethod
+  int _comp_level;                           // compilation level
+
+  // protected by CodeCache_lock
+  bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
+  bool _speculatively_disconnected;          // Marked for potential unload
+
+  bool _marked_for_reclamation;              // Used by NMethodSweeper (set only by sweeper)
+  bool _marked_for_deoptimization;           // Used for stack deoptimization
 
-  // offsets for entry points
-  address _entry_point;                // entry point with class check
-  address _verified_entry_point;       // entry point without class check
-  address _osr_entry_point;            // entry point for on stack replacement
+  // used by jvmti to track if an unload event has been posted for this nmethod.
+  bool _unload_reported;
 
-  nmFlags flags;           // various flags to keep track of nmethod state
-  bool _markedForDeoptimization;       // Used for stack deoptimization
+  // set during construction
+  unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
+  unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
+
+  // Protected by Patching_lock
+  unsigned char _state;                      // {alive, not_entrant, zombie, unloaded)
+
   enum { alive        = 0,
          not_entrant  = 1, // uncommon trap has happened but activations may still exist
          zombie       = 2,
          unloaded     = 3 };
 
-  // used by jvmti to track if an unload event has been posted for this nmethod.
-  bool _unload_reported;
 
   jbyte _scavenge_root_state;
 
@@ -269,15 +260,15 @@
   bool make_not_entrant_or_zombie(unsigned int state);
   void inc_decompile_count();
 
-  // used to check that writes to nmFlags are done consistently.
-  static void check_safepoint() PRODUCT_RETURN;
-
   // Used to manipulate the exception cache
   void add_exception_cache_entry(ExceptionCache* new_entry);
   ExceptionCache* exception_cache_entry_for_exception(Handle exception);
 
   // Inform external interfaces that a compiled method has been unloaded
-  inline void post_compiled_method_unload();
+  void post_compiled_method_unload();
+
+  // Initailize fields to their default values
+  void init_defaults();
 
  public:
   // create nmethod with entry_bci
@@ -392,11 +383,11 @@
   address verified_entry_point() const            { return _verified_entry_point;    } // if klass is correct
 
   // flag accessing and manipulation
-  bool  is_in_use() const                         { return flags.state == alive; }
-  bool  is_alive() const                          { return flags.state == alive || flags.state == not_entrant; }
-  bool  is_not_entrant() const                    { return flags.state == not_entrant; }
-  bool  is_zombie() const                         { return flags.state == zombie; }
-  bool  is_unloaded() const                       { return flags.state == unloaded;   }
+  bool  is_in_use() const                         { return _state == alive; }
+  bool  is_alive() const                          { return _state == alive || _state == not_entrant; }
+  bool  is_not_entrant() const                    { return _state == not_entrant; }
+  bool  is_zombie() const                         { return _state == zombie; }
+  bool  is_unloaded() const                       { return _state == unloaded;   }
 
   // Make the nmethod non entrant. The nmethod will continue to be
   // alive.  It is used when an uncommon trap happens.  Returns true
@@ -409,37 +400,33 @@
   bool  unload_reported()                         { return _unload_reported; }
   void  set_unload_reported()                     { _unload_reported = true; }
 
-  bool  is_marked_for_deoptimization() const      { return _markedForDeoptimization; }
-  void  mark_for_deoptimization()                 { _markedForDeoptimization = true; }
+  bool  is_marked_for_deoptimization() const      { return _marked_for_deoptimization; }
+  void  mark_for_deoptimization()                 { _marked_for_deoptimization = true; }
 
   void  make_unloaded(BoolObjectClosure* is_alive, oop cause);
 
   bool has_dependencies()                         { return dependencies_size() != 0; }
   void flush_dependencies(BoolObjectClosure* is_alive);
-  bool  has_flushed_dependencies()                { return flags.hasFlushedDependencies; }
-  void  set_has_flushed_dependencies()            {
+  bool has_flushed_dependencies()                 { return _has_flushed_dependencies; }
+  void set_has_flushed_dependencies()             {
     assert(!has_flushed_dependencies(), "should only happen once");
-    flags.hasFlushedDependencies = 1;
+    _has_flushed_dependencies = 1;
   }
 
-  bool  is_marked_for_reclamation() const         { return flags.markedForReclamation; }
-  void  mark_for_reclamation()                    { flags.markedForReclamation = 1; }
-  void  unmark_for_reclamation()                  { flags.markedForReclamation = 0; }
+  bool  is_marked_for_reclamation() const         { return _marked_for_reclamation; }
+  void  mark_for_reclamation()                    { _marked_for_reclamation = 1; }
+
+  bool  has_unsafe_access() const                 { return _has_unsafe_access; }
+  void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }
 
-  bool  has_unsafe_access() const                 { return flags.has_unsafe_access; }
-  void  set_has_unsafe_access(bool z)             { flags.has_unsafe_access = z; }
+  bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
+  void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
 
-  bool  has_method_handle_invokes() const         { return flags.has_method_handle_invokes; }
-  void  set_has_method_handle_invokes(bool z)     { flags.has_method_handle_invokes = z; }
-
-  bool  is_speculatively_disconnected() const     { return flags.speculatively_disconnected; }
-  void  set_speculatively_disconnected(bool z)     { flags.speculatively_disconnected = z; }
+  bool  is_speculatively_disconnected() const     { return _speculatively_disconnected; }
+  void  set_speculatively_disconnected(bool z)     { _speculatively_disconnected = z; }
 
   int   comp_level() const                        { return _comp_level; }
 
-  int   version() const                           { return flags.version; }
-  void  set_version(int v);
-
   // Support for oops in scopes and relocs:
   // Note: index 0 is reserved for null.
   oop   oop_at(int index) const                   { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
@@ -599,6 +586,7 @@
 
   // jvmti support:
   void post_compiled_method_load_event();
+  jmethodID get_and_cache_jmethod_id();
 
   // verify operations
   void verify();
--- a/src/share/vm/code/vtableStubs.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/code/vtableStubs.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,6 @@
     }
     _chunk = blob->instructions_begin();
     _chunk_end = _chunk + bytes;
-    VTune::register_stub("vtable stub", _chunk, _chunk_end);
     Forte::register_stub("vtable stub", _chunk, _chunk_end);
     // Notify JVMTI about this stub. The event will be recorded by the enclosing
     // JvmtiDynamicCodeEventCollector and posted when this thread has released
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -664,19 +664,14 @@
         return;
       }
 
-      // XXX use a global constant instead of 64!
-      typedef struct OopTaskQueuePadded {
-        OopTaskQueue work_queue;
-        char pad[64 - sizeof(OopTaskQueue)];  // prevent false sharing
-      } OopTaskQueuePadded;
-
+      typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
       for (i = 0; i < num_queues; i++) {
-        OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
-        if (q_padded == NULL) {
+        PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
+        if (q == NULL) {
           warning("work_queue allocation failure.");
           return;
         }
-        _task_queues->register_queue(i, &q_padded->work_queue);
+        _task_queues->register_queue(i, q);
       }
       for (i = 0; i < num_queues; i++) {
         _task_queues->queue(i)->initialize();
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -234,6 +234,11 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   if (_gc_cause != GCCause::_gc_locker &&
       gch->total_full_collections_completed() <= _full_gc_count_before) {
+    // maybe we should change the condition to test _gc_cause ==
+    // GCCause::_java_lang_system_gc, instead of
+    // _gc_cause != GCCause::_gc_locker
+    assert(_gc_cause == GCCause::_java_lang_system_gc,
+           "the only way to get here if this was a System.gc()-induced GC");
     assert(ExplicitGCInvokesConcurrent, "Error");
     // Now, wait for witnessing concurrent gc cycle to complete,
     // but do so in native mode, because we want to lock the
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -271,21 +271,16 @@
     if (cas_res == prev_epoch_entry) {
       // We successfully updated the card num value in the epoch entry
       count_ptr->_count = 0; // initialize counter for new card num
+      jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
 
       // Even though the region containg the card at old_card_num was not
       // in the young list when old_card_num was recorded in the epoch
       // cache it could have been added to the free list and subsequently
-      // added to the young list in the intervening time. If the evicted
-      // card is in a young region just return the card_ptr and the evicted
-      // card will not be cleaned. See CR 6817995.
-
-      jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
-      if (is_young_card(old_card_ptr)) {
-        *count = 0;
-        // We can defer the processing of card_ptr
-        *defer = true;
-        return card_ptr;
-      }
+      // added to the young list in the intervening time. See CR 6817995.
+      // We do not deal with this case here - it will be handled in
+      // HeapRegion::oops_on_card_seq_iterate_careful after it has been
+      // determined that the region containing the card has been allocated
+      // to, and it's safe to check the young type of the region.
 
       // We do not want to defer processing of card_ptr in this case
       // (we need to refine old_card_ptr and card_ptr)
@@ -301,22 +296,22 @@
   jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
   assert(cached_ptr != NULL, "bad cached card ptr");
 
-  if (is_young_card(cached_ptr)) {
-    // The region containing cached_ptr has been freed during a clean up
-    // pause, reallocated, and tagged as young.
-    assert(cached_ptr != card_ptr, "shouldn't be");
+  // We've just inserted a card pointer into the card count cache
+  // and got back the card that we just inserted or (evicted) the
+  // previous contents of that count slot.
 
-    // We've just inserted a new old-gen card pointer into the card count
-    // cache and evicted the previous contents of that count slot.
-    // The evicted card pointer has been determined to be in a young region
-    // and so cannot be the newly inserted card pointer (that will be
-    // in an old region).
-    // The count for newly inserted card will be set to zero during the
-    // insertion, so we don't want to defer the cleaning of the newly
-    // inserted card pointer.
-    assert(*defer == false, "deferring non-hot card");
-    return NULL;
-  }
+  // The card we got back could be in a young region. When the
+  // returned card (if evicted) was originally inserted, we had
+  // determined that its containing region was not young. However
+  // it is possible for the region to be freed during a cleanup
+  // pause, then reallocated and tagged as young which will result
+  // in the returned card residing in a young region.
+  //
+  // We do not deal with this case here - the change from non-young
+  // to young could be observed at any time - it will be handled in
+  // HeapRegion::oops_on_card_seq_iterate_careful after it has been
+  // determined that the region containing the card has been allocated
+  // to.
 
   // The card pointer we obtained from card count cache is not hot
   // so do not store it in the cache; return it for immediate
@@ -325,7 +320,7 @@
     return cached_ptr;
   }
 
-  // Otherwise, the pointer we got from the _card_counts is hot.
+  // Otherwise, the pointer we got from the _card_counts cache is hot.
   jbyte* res = NULL;
   MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
   if (_n_hot == _hot_cache_size) {
@@ -338,17 +333,8 @@
   if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0;
   _n_hot++;
 
-  if (res != NULL) {
-    // Even though the region containg res was not in the young list
-    // when it was recorded in the hot cache it could have been added
-    // to the free list and subsequently added to the young list in
-    // the intervening time. If res is in a young region, return NULL
-    // so that res is not cleaned. See CR 6817995.
-
-    if (is_young_card(res)) {
-      res = NULL;
-    }
-  }
+  // The card obtained from the hot card cache could be in a young
+  // region. See above on how this can happen.
 
   return res;
 }
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -266,6 +266,12 @@
       _cm->clearNextBitmap();
       _sts.leave();
     }
+
+    // Update the number of full collections that have been
+    // completed. This will also notify the FullGCCount_lock in case a
+    // Java thread is waiting for a full GC to happen (e.g., it
+    // called System.gc() with +ExplicitGCInvokesConcurrent).
+    g1->increment_full_collections_completed(true /* outer */);
   }
   assert(_should_terminate, "just checking");
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -638,6 +638,11 @@
 
     // Now retry the allocation.
     if (_cur_alloc_region != NULL) {
+      if (allocated_young_region != NULL) {
+        // We need to ensure that the store to top does not
+        // float above the setting of the young type.
+        OrderAccess::storestore();
+      }
       res = _cur_alloc_region->allocate(word_size);
     }
   }
@@ -809,7 +814,8 @@
   }
 };
 
-void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
+void G1CollectedHeap::do_collection(bool explicit_gc,
+                                    bool clear_all_soft_refs,
                                     size_t word_size) {
   if (GC_locker::check_active_before_gc()) {
     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
@@ -821,10 +827,6 @@
     Universe::print_heap_before_gc();
   }
 
-  if (full && DisableExplicitGC) {
-    return;
-  }
-
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
 
@@ -837,9 +839,11 @@
     IsGCActiveMark x;
 
     // Timing
+    bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
+    assert(!system_gc || explicit_gc, "invariant");
     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    TraceTime t(full ? "Full GC (System.gc())" : "Full GC",
+    TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
                 PrintGC, true, gclog_or_tty);
 
     TraceMemoryManagerStats tms(true /* fullGC */);
@@ -944,7 +948,7 @@
     heap_region_iterate(&rs_clear);
 
     // Resize the heap if necessary.
-    resize_if_necessary_after_full_collection(full ? 0 : word_size);
+    resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
 
     if (_cg1r->use_cache()) {
       _cg1r->clear_and_record_card_counts();
@@ -1009,13 +1013,18 @@
             "young list should be empty at this point");
   }
 
+  // Update the number of full collections that have been completed.
+  increment_full_collections_completed(false /* outer */);
+
   if (PrintHeapAtGC) {
     Universe::print_heap_after_gc();
   }
 }
 
 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
-  do_collection(true, clear_all_soft_refs, 0);
+  do_collection(true,                /* explicit_gc */
+                clear_all_soft_refs,
+                0                    /* word_size */);
 }
 
 // This code is mostly copied from TenuredGeneration.
@@ -1331,6 +1340,7 @@
   _young_list(new YoungList(this)),
   _gc_time_stamp(0),
   _surviving_young_words(NULL),
+  _full_collections_completed(0),
   _in_cset_fast_test(NULL),
   _in_cset_fast_test_base(NULL),
   _dirty_cards_region_list(NULL) {
@@ -1689,6 +1699,51 @@
   return car->free();
 }
 
+bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
+  return
+    ((cause == GCCause::_gc_locker           && GCLockerInvokesConcurrent) ||
+     (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
+}
+
+void G1CollectedHeap::increment_full_collections_completed(bool outer) {
+  MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
+
+  // We have already incremented _total_full_collections at the start
+  // of the GC, so total_full_collections() represents how many full
+  // collections have been started.
+  unsigned int full_collections_started = total_full_collections();
+
+  // Given that this method is called at the end of a Full GC or of a
+  // concurrent cycle, and those can be nested (i.e., a Full GC can
+  // interrupt a concurrent cycle), the number of full collections
+  // completed should be either one (in the case where there was no
+  // nesting) or two (when a Full GC interrupted a concurrent cycle)
+  // behind the number of full collections started.
+
+  // This is the case for the inner caller, i.e. a Full GC.
+  assert(outer ||
+         (full_collections_started == _full_collections_completed + 1) ||
+         (full_collections_started == _full_collections_completed + 2),
+         err_msg("for inner caller: full_collections_started = %u "
+                 "is inconsistent with _full_collections_completed = %u",
+                 full_collections_started, _full_collections_completed));
+
+  // This is the case for the outer caller, i.e. the concurrent cycle.
+  assert(!outer ||
+         (full_collections_started == _full_collections_completed + 1),
+         err_msg("for outer caller: full_collections_started = %u "
+                 "is inconsistent with _full_collections_completed = %u",
+                 full_collections_started, _full_collections_completed));
+
+  _full_collections_completed += 1;
+
+  // This notify_all() will ensure that a thread that called
+  // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
+  // and it's waiting for a full GC to finish will be woken up. It is
+  // waiting in VM_G1IncCollectionPause::doit_epilogue().
+  FullGCCount_lock->notify_all();
+}
+
 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
   assert(Thread::current()->is_VM_thread(), "Precondition#1");
   assert(Heap_lock->is_locked(), "Precondition#2");
@@ -1709,25 +1764,41 @@
   // The caller doesn't have the Heap_lock
   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 
-  int gc_count_before;
+  unsigned int gc_count_before;
+  unsigned int full_gc_count_before;
   {
     MutexLocker ml(Heap_lock);
     // Read the GC count while holding the Heap_lock
     gc_count_before = SharedHeap::heap()->total_collections();
+    full_gc_count_before = SharedHeap::heap()->total_full_collections();
 
     // Don't want to do a GC until cleanup is completed.
     wait_for_cleanup_complete();
-  } // We give up heap lock; VMThread::execute gets it back below
-  switch (cause) {
-    case GCCause::_scavenge_alot: {
-      // Do an incremental pause, which might sometimes be abandoned.
-      VM_G1IncCollectionPause op(gc_count_before, cause);
+
+    // We give up heap lock; VMThread::execute gets it back below
+  }
+
+  if (should_do_concurrent_full_gc(cause)) {
+    // Schedule an initial-mark evacuation pause that will start a
+    // concurrent cycle.
+    VM_G1IncCollectionPause op(gc_count_before,
+                               true, /* should_initiate_conc_mark */
+                               g1_policy()->max_pause_time_ms(),
+                               cause);
+    VMThread::execute(&op);
+  } else {
+    if (cause == GCCause::_gc_locker
+        DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
+
+      // Schedule a standard evacuation pause.
+      VM_G1IncCollectionPause op(gc_count_before,
+                                 false, /* should_initiate_conc_mark */
+                                 g1_policy()->max_pause_time_ms(),
+                                 cause);
       VMThread::execute(&op);
-      break;
-    }
-    default: {
-      // In all other cases, we currently do a full gc.
-      VM_G1CollectFull op(gc_count_before, cause);
+    } else {
+      // Schedule a Full GC.
+      VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
       VMThread::execute(&op);
     }
   }
@@ -1989,6 +2060,11 @@
 
 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
                                                   HeapRegionClosure *cl) {
+  if (r == NULL) {
+    // The CSet is empty so there's nothing to do.
+    return;
+  }
+
   assert(r->in_collection_set(),
          "Start region must be a member of the collection set.");
   HeapRegion* cur = r;
@@ -2481,11 +2557,13 @@
 }
 
 void G1CollectedHeap::do_collection_pause() {
+  assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock");
+
   // Read the GC count while holding the Heap_lock
   // we need to do this _before_ wait_for_cleanup_complete(), to
   // ensure that we do not give up the heap lock and potentially
   // pick up the wrong count
-  int gc_count_before = SharedHeap::heap()->total_collections();
+  unsigned int gc_count_before = SharedHeap::heap()->total_collections();
 
   // Don't want to do a GC pause while cleanup is being completed!
   wait_for_cleanup_complete();
@@ -2493,7 +2571,10 @@
   g1_policy()->record_stop_world_start();
   {
     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
-    VM_G1IncCollectionPause op(gc_count_before);
+    VM_G1IncCollectionPause op(gc_count_before,
+                               false, /* should_initiate_conc_mark */
+                               g1_policy()->max_pause_time_ms(),
+                               GCCause::_g1_inc_collection_pause);
     VMThread::execute(&op);
   }
 }
@@ -2612,7 +2693,7 @@
 };
 
 void
-G1CollectedHeap::do_collection_pause_at_safepoint() {
+G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
   if (GC_locker::check_active_before_gc()) {
     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   }
@@ -2637,8 +2718,12 @@
       else
         strcat(verbose_str, "(partial)");
     }
-    if (g1_policy()->during_initial_mark_pause())
+    if (g1_policy()->during_initial_mark_pause()) {
       strcat(verbose_str, " (initial-mark)");
+      // We are about to start a marking cycle, so we increment the
+      // full collection counter.
+      increment_total_full_collections();
+    }
 
     // if PrintGCDetails is on, we'll print long statistics information
     // in the collector policy code, so let's not print this as the output
@@ -2661,7 +2746,6 @@
              "young list should be well formed");
     }
 
-    bool abandoned = false;
     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
       IsGCActiveMark x;
 
@@ -2743,7 +2827,7 @@
 
       // Now choose the CS. We may abandon a pause if we find no
       // region that will fit in the MMU pause.
-      bool abandoned = g1_policy()->choose_collection_set();
+      bool abandoned = g1_policy()->choose_collection_set(target_pause_time_ms);
 
       // Nothing to do if we were unable to choose a collection set.
       if (!abandoned) {
@@ -3972,6 +4056,10 @@
 
   void work(int i) {
     if (i >= _n_workers) return;  // no work needed this round
+
+    double start_time_ms = os::elapsedTime() * 1000.0;
+    _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms);
+
     ResourceMark rm;
     HandleMark   hm;
 
@@ -4019,7 +4107,7 @@
       double elapsed_ms = (os::elapsedTime()-start)*1000.0;
       double term_ms = pss.term_time()*1000.0;
       _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
-      _g1h->g1_policy()->record_termination_time(i, term_ms);
+      _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts());
     }
     _g1h->g1_policy()->record_thread_age_table(pss.age_table());
     _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
@@ -4043,7 +4131,8 @@
       double term         = pss.term_time();
       gclog_or_tty->print("  Elapsed: %7.2f ms.\n"
                           "    Strong roots: %7.2f ms (%6.2f%%)\n"
-                          "    Termination:  %7.2f ms (%6.2f%%) (in %d entries)\n",
+                          "    Termination:  %7.2f ms (%6.2f%%) "
+                                                 "(in "SIZE_FORMAT" entries)\n",
                           elapsed * 1000.0,
                           strong_roots * 1000.0, (strong_roots*100.0/elapsed),
                           term * 1000.0, (term*100.0/elapsed),
@@ -4059,6 +4148,8 @@
 
     assert(pss.refs_to_scan() == 0, "Task queue should be empty");
     assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
+    double end_time_ms = os::elapsedTime() * 1000.0;
+    _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
   }
 };
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -277,6 +277,18 @@
   void update_surviving_young_words(size_t* surv_young_words);
   void cleanup_surviving_young_words();
 
+  // It decides whether an explicit GC should start a concurrent cycle
+  // instead of doing a STW GC. Currently, a concurrent cycle is
+  // explicitly started if:
+  // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
+  // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
+  bool should_do_concurrent_full_gc(GCCause::Cause cause);
+
+  // Keeps track of how many "full collections" (i.e., Full GCs or
+  // concurrent cycles) we have completed. The number of them we have
+  // started is maintained in _total_full_collections in CollectedHeap.
+  volatile unsigned int _full_collections_completed;
+
 protected:
 
   // Returns "true" iff none of the gc alloc regions have any allocations
@@ -356,13 +368,14 @@
   // GC pause.
   void  retire_alloc_region(HeapRegion* alloc_region, bool par);
 
-  // Helper function for two callbacks below.
-  // "full", if true, indicates that the GC is for a System.gc() request,
-  // and should collect the entire heap.  If "clear_all_soft_refs" is true,
-  // all soft references are cleared during the GC.  If "full" is false,
-  // "word_size" describes the allocation that the GC should
-  // attempt (at least) to satisfy.
-  void do_collection(bool full, bool clear_all_soft_refs,
+  // - if explicit_gc is true, the GC is for a System.gc() or a heap
+  // inspection request and should collect the entire heap
+  // - if clear_all_soft_refs is true, all soft references are cleared
+  // during the GC
+  // - if explicit_gc is false, word_size describes the allocation that
+  // the GC should attempt (at least) to satisfy
+  void do_collection(bool explicit_gc,
+                     bool clear_all_soft_refs,
                      size_t word_size);
 
   // Callback from VM_G1CollectFull operation.
@@ -431,6 +444,26 @@
         _in_cset_fast_test_length * sizeof(bool));
   }
 
+  // This is called at the end of either a concurrent cycle or a Full
+  // GC to update the number of full collections completed. Those two
+  // can happen in a nested fashion, i.e., we start a concurrent
+  // cycle, a Full GC happens half-way through it which ends first,
+  // and then the cycle notices that a Full GC happened and ends
+  // too. The outer parameter is a boolean to help us do a bit tighter
+  // consistency checking in the method. If outer is false, the caller
+  // is the inner caller in the nesting (i.e., the Full GC). If outer
+  // is true, the caller is the outer caller in this nesting (i.e.,
+  // the concurrent cycle). Further nesting is not currently
+  // supported. The end of the this call also notifies the
+  // FullGCCount_lock in case a Java thread is waiting for a full GC
+  // to happen (e.g., it called System.gc() with
+  // +ExplicitGCInvokesConcurrent).
+  void increment_full_collections_completed(bool outer);
+
+  unsigned int full_collections_completed() {
+    return _full_collections_completed;
+  }
+
 protected:
 
   // Shrink the garbage-first heap by at most the given size (in bytes!).
@@ -444,7 +477,7 @@
 
   // The guts of the incremental collection pause, executed by the vm
   // thread.
-  virtual void do_collection_pause_at_safepoint();
+  virtual void do_collection_pause_at_safepoint(double target_pause_time_ms);
 
   // Actually do the work of evacuating the collection set.
   virtual void evacuate_collection_set();
@@ -1549,7 +1582,7 @@
   int _hash_seed;
   int _queue_num;
 
-  int _term_attempts;
+  size_t _term_attempts;
 #if G1_DETAILED_STATS
   int _pushes, _pops, _steals, _steal_attempts;
   int _overflow_pushes;
@@ -1727,8 +1760,8 @@
   int* hash_seed() { return &_hash_seed; }
   int  queue_num() { return _queue_num; }
 
-  int term_attempts()   { return _term_attempts; }
-  void note_term_attempt()  { _term_attempts++; }
+  size_t term_attempts()   { return _term_attempts; }
+  void note_term_attempt() { _term_attempts++; }
 
 #if G1_DETAILED_STATS
   int pushes()          { return _pushes; }
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -154,7 +154,6 @@
   _known_garbage_bytes(0),
 
   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
-  _target_pause_time_ms(-1.0),
 
    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
 
@@ -231,20 +230,21 @@
   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 
+  _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
   _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
 
-  _par_last_update_rs_start_times_ms = new double[_parallel_gc_threads];
   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
   _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
 
-  _par_last_scan_rs_start_times_ms = new double[_parallel_gc_threads];
   _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
   _par_last_scan_new_refs_times_ms = new double[_parallel_gc_threads];
 
   _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
 
   _par_last_termination_times_ms = new double[_parallel_gc_threads];
+  _par_last_termination_attempts = new double[_parallel_gc_threads];
+  _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
 
   // start conservatively
   _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
@@ -274,10 +274,64 @@
 
   // </NEW PREDICTION>
 
-  double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
+  // Below, we might need to calculate the pause time target based on
+  // the pause interval. When we do so we are going to give G1 maximum
+  // flexibility and allow it to do pauses when it needs to. So, we'll
+  // arrange that the pause interval to be pause time target + 1 to
+  // ensure that a) the pause time target is maximized with respect to
+  // the pause interval and b) we maintain the invariant that pause
+  // time target < pause interval. If the user does not want this
+  // maximum flexibility, they will have to set the pause interval
+  // explicitly.
+
+  // First make sure that, if either parameter is set, its value is
+  // reasonable.
+  if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
+    if (MaxGCPauseMillis < 1) {
+      vm_exit_during_initialization("MaxGCPauseMillis should be "
+                                    "greater than 0");
+    }
+  }
+  if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
+    if (GCPauseIntervalMillis < 1) {
+      vm_exit_during_initialization("GCPauseIntervalMillis should be "
+                                    "greater than 0");
+    }
+  }
+
+  // Then, if the pause time target parameter was not set, set it to
+  // the default value.
+  if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
+    if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
+      // The default pause time target in G1 is 200ms
+      FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
+    } else {
+      // We do not allow the pause interval to be set without the
+      // pause time target
+      vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
+                                    "without setting MaxGCPauseMillis");
+    }
+  }
+
+  // Then, if the interval parameter was not set, set it according to
+  // the pause time target (this will also deal with the case when the
+  // pause time target is the default value).
+  if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
+    FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
+  }
+
+  // Finally, make sure that the two parameters are consistent.
+  if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
+    char buffer[256];
+    jio_snprintf(buffer, 256,
+                 "MaxGCPauseMillis (%u) should be less than "
+                 "GCPauseIntervalMillis (%u)",
+                 MaxGCPauseMillis, GCPauseIntervalMillis);
+    vm_exit_during_initialization(buffer);
+  }
+
   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
-  guarantee(max_gc_time < time_slice,
-            "Max GC time should not be greater than the time slice");
+  double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
   _sigma = (double) G1ConfidencePercent / 100.0;
 
@@ -782,16 +836,17 @@
   // if they are not set properly
 
   for (int i = 0; i < _parallel_gc_threads; ++i) {
-    _par_last_ext_root_scan_times_ms[i] = -666.0;
-    _par_last_mark_stack_scan_times_ms[i] = -666.0;
-    _par_last_update_rs_start_times_ms[i] = -666.0;
-    _par_last_update_rs_times_ms[i] = -666.0;
-    _par_last_update_rs_processed_buffers[i] = -666.0;
-    _par_last_scan_rs_start_times_ms[i] = -666.0;
-    _par_last_scan_rs_times_ms[i] = -666.0;
-    _par_last_scan_new_refs_times_ms[i] = -666.0;
-    _par_last_obj_copy_times_ms[i] = -666.0;
-    _par_last_termination_times_ms[i] = -666.0;
+    _par_last_gc_worker_start_times_ms[i] = -1234.0;
+    _par_last_ext_root_scan_times_ms[i] = -1234.0;
+    _par_last_mark_stack_scan_times_ms[i] = -1234.0;
+    _par_last_update_rs_times_ms[i] = -1234.0;
+    _par_last_update_rs_processed_buffers[i] = -1234.0;
+    _par_last_scan_rs_times_ms[i] = -1234.0;
+    _par_last_scan_new_refs_times_ms[i] = -1234.0;
+    _par_last_obj_copy_times_ms[i] = -1234.0;
+    _par_last_termination_times_ms[i] = -1234.0;
+    _par_last_termination_attempts[i] = -1234.0;
+    _par_last_gc_worker_end_times_ms[i] = -1234.0;
   }
 #endif
 
@@ -942,9 +997,9 @@
   return sum;
 }
 
-void G1CollectorPolicy::print_par_stats (int level,
-                                         const char* str,
-                                         double* data,
+void G1CollectorPolicy::print_par_stats(int level,
+                                        const char* str,
+                                        double* data,
                                          bool summary) {
   double min = data[0], max = data[0];
   double total = 0.0;
@@ -973,10 +1028,10 @@
   gclog_or_tty->print_cr("]");
 }
 
-void G1CollectorPolicy::print_par_buffers (int level,
-                                         const char* str,
-                                         double* data,
-                                         bool summary) {
+void G1CollectorPolicy::print_par_sizes(int level,
+                                        const char* str,
+                                        double* data,
+                                        bool summary) {
   double min = data[0], max = data[0];
   double total = 0.0;
   int j;
@@ -1321,15 +1376,22 @@
       }
       if (parallel) {
         print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
-        print_par_stats(2, "Update RS (Start)", _par_last_update_rs_start_times_ms, false);
+        print_par_stats(2, "GC Worker Start Time",
+                        _par_last_gc_worker_start_times_ms, false);
         print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
-        print_par_buffers(3, "Processed Buffers",
-                          _par_last_update_rs_processed_buffers, true);
-        print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
-        print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
+        print_par_sizes(3, "Processed Buffers",
+                        _par_last_update_rs_processed_buffers, true);
+        print_par_stats(2, "Ext Root Scanning",
+                        _par_last_ext_root_scan_times_ms);
+        print_par_stats(2, "Mark Stack Scanning",
+                        _par_last_mark_stack_scan_times_ms);
         print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
         print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
         print_par_stats(2, "Termination", _par_last_termination_times_ms);
+        print_par_sizes(3, "Termination Attempts",
+                        _par_last_termination_attempts, true);
+        print_par_stats(2, "GC Worker End Time",
+                        _par_last_gc_worker_end_times_ms, false);
         print_stats(2, "Other", parallel_other_time);
         print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
       } else {
@@ -1572,8 +1634,6 @@
   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
   // </NEW PREDICTION>
-
-  _target_pause_time_ms = -1.0;
 }
 
 // <NEW PREDICTION>
@@ -2303,7 +2363,6 @@
     if (reached_target_length) {
       assert( young_list_length > 0 && _g1->young_list()->length() > 0,
               "invariant" );
-      _target_pause_time_ms = max_pause_time_ms;
       return true;
     }
   } else {
@@ -2335,6 +2394,17 @@
 }
 #endif
 
+bool
+G1CollectorPolicy::force_initial_mark_if_outside_cycle() {
+  bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
+  if (!during_cycle) {
+    set_initiate_conc_mark_if_possible();
+    return true;
+  } else {
+    return false;
+  }
+}
+
 void
 G1CollectorPolicy::decide_on_conc_mark_initiation() {
   // We are about to decide on whether this pause will be an
@@ -2801,7 +2871,8 @@
 #endif // !PRODUCT
 
 bool
-G1CollectorPolicy_BestRegionsFirst::choose_collection_set() {
+G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
+                                                  double target_pause_time_ms) {
   // Set this here - in case we're not doing young collections.
   double non_young_start_time_sec = os::elapsedTime();
 
@@ -2814,26 +2885,19 @@
 
   start_recording_regions();
 
-  guarantee(_target_pause_time_ms > -1.0
-            NOT_PRODUCT(|| Universe::heap()->gc_cause() == GCCause::_scavenge_alot),
-            "_target_pause_time_ms should have been set!");
-#ifndef PRODUCT
-  if (_target_pause_time_ms <= -1.0) {
-    assert(ScavengeALot && Universe::heap()->gc_cause() == GCCause::_scavenge_alot, "Error");
-    _target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
-  }
-#endif
-  assert(_collection_set == NULL, "Precondition");
+  guarantee(target_pause_time_ms > 0.0,
+            err_msg("target_pause_time_ms = %1.6lf should be positive",
+                    target_pause_time_ms));
+  guarantee(_collection_set == NULL, "Precondition");
 
   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
   double predicted_pause_time_ms = base_time_ms;
 
-  double target_time_ms = _target_pause_time_ms;
-  double time_remaining_ms = target_time_ms - base_time_ms;
+  double time_remaining_ms = target_pause_time_ms - base_time_ms;
 
   // the 10% and 50% values are arbitrary...
-  if (time_remaining_ms < 0.10*target_time_ms) {
-    time_remaining_ms = 0.50 * target_time_ms;
+  if (time_remaining_ms < 0.10 * target_pause_time_ms) {
+    time_remaining_ms = 0.50 * target_pause_time_ms;
     _within_target = false;
   } else {
     _within_target = true;
@@ -2996,7 +3060,18 @@
   _recorded_non_young_cset_choice_time_ms =
     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
 
-  return abandon_collection;
+  // Here we are supposed to return whether the pause should be
+  // abandoned or not (i.e., whether the collection set is empty or
+  // not). However, this introduces a subtle issue when a pause is
+  // initiated explicitly with System.gc() and
+  // +ExplicitGCInvokesConcurrent (see Comment #2 in CR 6944166), it's
+  // supposed to start a marking cycle, and it's abandoned. So, by
+  // returning false here we are telling the caller never to consider
+  // a pause to be abandoned. We'll actually remove all the code
+  // associated with abandoned pauses as part of CR 6963209, but we are
+  // just disabling them this way for the moment to avoid increasing
+  // further the amount of changes for CR 6944166.
+  return false;
 }
 
 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -171,16 +171,17 @@
   double*    _cur_aux_times_ms;
   bool*      _cur_aux_times_set;
 
+  double* _par_last_gc_worker_start_times_ms;
   double* _par_last_ext_root_scan_times_ms;
   double* _par_last_mark_stack_scan_times_ms;
-  double* _par_last_update_rs_start_times_ms;
   double* _par_last_update_rs_times_ms;
   double* _par_last_update_rs_processed_buffers;
-  double* _par_last_scan_rs_start_times_ms;
   double* _par_last_scan_rs_times_ms;
   double* _par_last_scan_new_refs_times_ms;
   double* _par_last_obj_copy_times_ms;
   double* _par_last_termination_times_ms;
+  double* _par_last_termination_attempts;
+  double* _par_last_gc_worker_end_times_ms;
 
   // indicates that we are in young GC mode
   bool _in_young_gc_mode;
@@ -198,8 +199,6 @@
   size_t _young_cset_length;
   bool   _last_young_gc_full;
 
-  double _target_pause_time_ms;
-
   unsigned              _full_young_pause_num;
   unsigned              _partial_young_pause_num;
 
@@ -525,6 +524,10 @@
     return _mmu_tracker;
   }
 
+  double max_pause_time_ms() {
+    return _mmu_tracker->max_gc_time() * 1000.0;
+  }
+
   double predict_init_time_ms() {
     return get_new_prediction(_concurrent_mark_init_times_ms);
   }
@@ -559,13 +562,14 @@
   }
 
 protected:
-  void print_stats (int level, const char* str, double value);
-  void print_stats (int level, const char* str, int value);
-  void print_par_stats (int level, const char* str, double* data) {
+  void print_stats(int level, const char* str, double value);
+  void print_stats(int level, const char* str, int value);
+
+  void print_par_stats(int level, const char* str, double* data) {
     print_par_stats(level, str, data, true);
   }
-  void print_par_stats (int level, const char* str, double* data, bool summary);
-  void print_par_buffers (int level, const char* str, double* data, bool summary);
+  void print_par_stats(int level, const char* str, double* data, bool summary);
+  void print_par_sizes(int level, const char* str, double* data, bool summary);
 
   void check_other_times(int level,
                          NumberSeq* other_times_ms,
@@ -891,6 +895,10 @@
   virtual void record_full_collection_start();
   virtual void record_full_collection_end();
 
+  void record_gc_worker_start_time(int worker_i, double ms) {
+    _par_last_gc_worker_start_times_ms[worker_i] = ms;
+  }
+
   void record_ext_root_scan_time(int worker_i, double ms) {
     _par_last_ext_root_scan_times_ms[worker_i] = ms;
   }
@@ -912,10 +920,6 @@
     _all_mod_union_times_ms->add(ms);
   }
 
-  void record_update_rs_start_time(int thread, double ms) {
-    _par_last_update_rs_start_times_ms[thread] = ms;
-  }
-
   void record_update_rs_time(int thread, double ms) {
     _par_last_update_rs_times_ms[thread] = ms;
   }
@@ -925,10 +929,6 @@
     _par_last_update_rs_processed_buffers[thread] = processed_buffers;
   }
 
-  void record_scan_rs_start_time(int thread, double ms) {
-    _par_last_scan_rs_start_times_ms[thread] = ms;
-  }
-
   void record_scan_rs_time(int thread, double ms) {
     _par_last_scan_rs_times_ms[thread] = ms;
   }
@@ -953,16 +953,13 @@
     _par_last_obj_copy_times_ms[thread] += ms;
   }
 
-  void record_obj_copy_time(double ms) {
-    record_obj_copy_time(0, ms);
+  void record_termination(int thread, double ms, size_t attempts) {
+    _par_last_termination_times_ms[thread] = ms;
+    _par_last_termination_attempts[thread] = (double) attempts;
   }
 
-  void record_termination_time(int thread, double ms) {
-    _par_last_termination_times_ms[thread] = ms;
-  }
-
-  void record_termination_time(double ms) {
-    record_termination_time(0, ms);
+  void record_gc_worker_end_time(int worker_i, double ms) {
+    _par_last_gc_worker_end_times_ms[worker_i] = ms;
   }
 
   void record_pause_time_ms(double ms) {
@@ -1013,7 +1010,7 @@
   // Choose a new collection set.  Marks the chosen regions as being
   // "in_collection_set", and links them together.  The head and number of
   // the collection set are available via access methods.
-  virtual bool choose_collection_set() = 0;
+  virtual bool choose_collection_set(double target_pause_time_ms) = 0;
 
   // The head of the list (via "next_in_collection_set()") representing the
   // current collection set.
@@ -1082,6 +1079,12 @@
   void set_during_initial_mark_pause()  { _during_initial_mark_pause = true;  }
   void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
 
+  // This sets the initiate_conc_mark_if_possible() flag to start a
+  // new cycle, as long as we are not already in one. It's best if it
+  // is called during a safepoint when the test whether a cycle is in
+  // progress or not is stable.
+  bool force_initial_mark_if_outside_cycle();
+
   // This is called at the very beginning of an evacuation pause (it
   // has to be the first thing that the pause does). If
   // initiate_conc_mark_if_possible() is true, and the concurrent
@@ -1264,7 +1267,7 @@
   // If the estimated is less then desirable, resize if possible.
   void expand_if_possible(size_t numRegions);
 
-  virtual bool choose_collection_set();
+  virtual bool choose_collection_set(double target_pause_time_ms);
   virtual void record_collection_pause_start(double start_time_sec,
                                              size_t start_used);
   virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -303,7 +303,6 @@
   assert( _cards_scanned != NULL, "invariant" );
   _cards_scanned[worker_i] = scanRScl.cards_done();
 
-  _g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0);
   _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
 }
 
@@ -311,8 +310,6 @@
   ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
 
   double start = os::elapsedTime();
-  _g1p->record_update_rs_start_time(worker_i, start * 1000.0);
-
   // Apply the appropriate closure to all remaining log entries.
   _g1->iterate_dirty_card_closure(false, worker_i);
   // Now there should be no dirty cards.
@@ -471,7 +468,6 @@
       updateRS(worker_i);
       scanNewRefsRS(oc, worker_i);
     } else {
-      _g1p->record_update_rs_start_time(worker_i, os::elapsedTime() * 1000.0);
       _g1p->record_update_rs_processed_buffers(worker_i, 0.0);
       _g1p->record_update_rs_time(worker_i, 0.0);
       _g1p->record_scan_new_refs_time(worker_i, 0.0);
@@ -479,7 +475,6 @@
     if (G1UseParallelRSetScanning || (worker_i == 0)) {
       scanRS(oc, worker_i);
     } else {
-      _g1p->record_scan_rs_start_time(worker_i, os::elapsedTime() * 1000.0);
       _g1p->record_scan_rs_time(worker_i, 0.0);
     }
   } else {
@@ -681,9 +676,27 @@
   // We must complete this write before we do any of the reads below.
   OrderAccess::storeload();
   // And process it, being careful of unallocated portions of TLAB's.
+
+  // The region for the current card may be a young region. The
+  // current card may have been a card that was evicted from the
+  // card cache. When the card was inserted into the cache, we had
+  // determined that its region was non-young. While in the cache,
+  // the region may have been freed during a cleanup pause, reallocated
+  // and tagged as young.
+  //
+  // We wish to filter out cards for such a region but the current
+  // thread, if we're running conucrrently, may "see" the young type
+  // change at any time (so an earlier "is_young" check may pass or
+  // fail arbitrarily). We tell the iteration code to perform this
+  // filtering when it has been determined that there has been an actual
+  // allocation in this region and making it safe to check the young type.
+  bool filter_young = true;
+
   HeapWord* stop_point =
     r->oops_on_card_seq_iterate_careful(dirtyRegion,
-                                        &filter_then_update_rs_oop_cl);
+                                        &filter_then_update_rs_oop_cl,
+                                        filter_young);
+
   // If stop_point is non-null, then we encountered an unallocated region
   // (perhaps the unfilled portion of a TLAB.)  For now, we'll dirty the
   // card and re-enqueue: if we put off the card until a GC pause, then the
@@ -794,8 +807,14 @@
       if (r == NULL) {
         assert(_g1->is_in_permanent(start), "Or else where?");
       } else {
-        guarantee(!r->is_young(), "It was evicted in the current minor cycle.");
-        // Process card pointer we get back from the hot card cache
+        // Checking whether the region we got back from the cache
+        // is young here is inappropriate. The region could have been
+        // freed, reallocated and tagged as young while in the cache.
+        // Hence we could see its young type change at any time.
+        //
+        // Process card pointer we get back from the hot card cache. This
+        // will check whether the region containing the card is young
+        // _after_ checking that the region has been allocated from.
         concurrentRefineOneCard_impl(res, worker_i);
       }
     }
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -658,7 +658,8 @@
 HeapWord*
 HeapRegion::
 oops_on_card_seq_iterate_careful(MemRegion mr,
-                                     FilterOutOfRegionClosure* cl) {
+                                 FilterOutOfRegionClosure* cl,
+                                 bool filter_young) {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   // If we're within a stop-world GC, then we might look at a card in a
@@ -672,6 +673,16 @@
   if (mr.is_empty()) return NULL;
   // Otherwise, find the obj that extends onto mr.start().
 
+  // The intersection of the incoming mr (for the card) and the
+  // allocated part of the region is non-empty. This implies that
+  // we have actually allocated into this region. The code in
+  // G1CollectedHeap.cpp that allocates a new region sets the
+  // is_young tag on the region before allocating. Thus we
+  // safely know if this region is young.
+  if (is_young() && filter_young) {
+    return NULL;
+  }
+
   // We used to use "block_start_careful" here.  But we're actually happy
   // to update the BOT while we do this...
   HeapWord* cur = block_start(mr.start());
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -252,7 +252,7 @@
                                 // survivor
   };
 
-  YoungType _young_type;
+  volatile YoungType _young_type;
   int  _young_index_in_cset;
   SurvRateGroup* _surv_rate_group;
   int  _age_index;
@@ -726,9 +726,12 @@
   HeapWord*
   object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
 
+  // In this version - if filter_young is true and the region
+  // is a young region then we skip the iteration.
   HeapWord*
   oops_on_card_seq_iterate_careful(MemRegion mr,
-                                   FilterOutOfRegionClosure* cl);
+                                   FilterOutOfRegionClosure* cl,
+                                   bool filter_young);
 
   // The region "mr" is entirely in "this", and starts and ends at block
   // boundaries. The caller declares that all the contained blocks are
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -42,8 +42,65 @@
 void VM_G1IncCollectionPause::doit() {
   JvmtiGCForAllocationMarker jgcm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  assert(!_should_initiate_conc_mark ||
+  ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
+   (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
+         "only a GC locker or a System.gc() induced GC should start a cycle");
+
   GCCauseSetter x(g1h, _gc_cause);
-  g1h->do_collection_pause_at_safepoint();
+  if (_should_initiate_conc_mark) {
+    // It's safer to read full_collections_completed() here, given
+    // that noone else will be updating it concurrently. Since we'll
+    // only need it if we're initiating a marking cycle, no point in
+    // setting it earlier.
+    _full_collections_completed_before = g1h->full_collections_completed();
+
+    // At this point we are supposed to start a concurrent cycle. We
+    // will do so if one is not already in progress.
+    bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
+  }
+  g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
+}
+
+void VM_G1IncCollectionPause::doit_epilogue() {
+  VM_GC_Operation::doit_epilogue();
+
+  // If the pause was initiated by a System.gc() and
+  // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
+  // that just started (or maybe one that was already in progress) to
+  // finish.
+  if (_gc_cause == GCCause::_java_lang_system_gc &&
+      _should_initiate_conc_mark) {
+    assert(ExplicitGCInvokesConcurrent,
+           "the only way to be here is if ExplicitGCInvokesConcurrent is set");
+
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+    // In the doit() method we saved g1h->full_collections_completed()
+    // in the _full_collections_completed_before field. We have to
+    // wait until we observe that g1h->full_collections_completed()
+    // has increased by at least one. This can happen if a) we started
+    // a cycle and it completes, b) a cycle already in progress
+    // completes, or c) a Full GC happens.
+
+    // If the condition has already been reached, there's no point in
+    // actually taking the lock and doing the wait.
+    if (g1h->full_collections_completed() <=
+                                          _full_collections_completed_before) {
+      // The following is largely copied from CMS
+
+      Thread* thr = Thread::current();
+      assert(thr->is_Java_thread(), "invariant");
+      JavaThread* jt = (JavaThread*)thr;
+      ThreadToNativeFromVM native(jt);
+
+      MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
+      while (g1h->full_collections_completed() <=
+                                          _full_collections_completed_before) {
+        FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
+      }
+    }
+  }
 }
 
 void VM_CGC_Operation::doit() {
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -31,13 +31,12 @@
 //   - VM_G1PopRegionCollectionPause
 
 class VM_G1CollectFull: public VM_GC_Operation {
- private:
  public:
-  VM_G1CollectFull(int gc_count_before,
-                   GCCause::Cause gc_cause)
-    : VM_GC_Operation(gc_count_before)
-  {
-    _gc_cause = gc_cause;
+  VM_G1CollectFull(unsigned int gc_count_before,
+                   unsigned int full_gc_count_before,
+                   GCCause::Cause cause)
+    : VM_GC_Operation(gc_count_before, full_gc_count_before) {
+    _gc_cause = cause;
   }
   ~VM_G1CollectFull() {}
   virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
@@ -67,12 +66,28 @@
 };
 
 class VM_G1IncCollectionPause: public VM_GC_Operation {
- public:
-  VM_G1IncCollectionPause(int gc_count_before,
-                          GCCause::Cause gc_cause = GCCause::_g1_inc_collection_pause) :
-    VM_GC_Operation(gc_count_before) { _gc_cause = gc_cause; }
+private:
+  bool _should_initiate_conc_mark;
+  double _target_pause_time_ms;
+  unsigned int _full_collections_completed_before;
+public:
+  VM_G1IncCollectionPause(unsigned int   gc_count_before,
+                          bool           should_initiate_conc_mark,
+                          double         target_pause_time_ms,
+                          GCCause::Cause cause)
+    : VM_GC_Operation(gc_count_before),
+      _full_collections_completed_before(0),
+      _should_initiate_conc_mark(should_initiate_conc_mark),
+      _target_pause_time_ms(target_pause_time_ms) {
+    guarantee(target_pause_time_ms > 0.0,
+              err_msg("target_pause_time_ms = %1.6lf should be positive",
+                      target_pause_time_ms));
+
+    _gc_cause = cause;
+  }
   virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
   virtual void doit();
+  virtual void doit_epilogue();
   virtual const char* name() const {
     return "garbage-first incremental collection pause";
   }
--- a/src/share/vm/gc_implementation/includeDB_gc_g1	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/includeDB_gc_g1	Sat Jul 31 15:10:59 2010 +0100
@@ -367,4 +367,6 @@
 
 vm_operations_g1.cpp			vm_operations_g1.hpp
 vm_operations_g1.cpp                    g1CollectedHeap.inline.hpp
+vm_operations_g1.cpp                    g1CollectorPolicy.hpp
+vm_operations_g1.cpp                    interfaceSupport.hpp
 vm_operations_g1.cpp                    isGCActiveMark.hpp
--- a/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Sat Jul 31 15:10:59 2010 +0100
@@ -270,7 +270,7 @@
 psParallelCompact.cpp			pcTasks.hpp
 psParallelCompact.cpp			psMarkSweep.hpp
 psParallelCompact.cpp			psMarkSweepDecorator.hpp
-psParallelCompact.cpp			psCompactionManager.hpp
+psParallelCompact.cpp			psCompactionManager.inline.hpp
 psParallelCompact.cpp                   psPromotionManager.inline.hpp
 psParallelCompact.cpp			psOldGen.hpp
 psParallelCompact.cpp			psParallelCompact.hpp
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -539,10 +539,9 @@
   guarantee(_task_queues != NULL, "task_queues allocation failure.");
 
   for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
-    ObjToScanQueuePadded *q_padded = new ObjToScanQueuePadded();
-    guarantee(q_padded != NULL, "work_queue Allocation failure.");
-
-    _task_queues->register_queue(i1, &q_padded->work_queue);
+    ObjToScanQueue *q = new ObjToScanQueue();
+    guarantee(q != NULL, "work_queue Allocation failure.");
+    _task_queues->register_queue(i1, q);
   }
 
   for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,8 +33,8 @@
 // but they must be here to allow ParScanClosure::do_oop_work to be defined
 // in genOopClosures.inline.hpp.
 
-typedef OopTaskQueue       ObjToScanQueue;
-typedef OopTaskQueueSet    ObjToScanQueueSet;
+typedef Padded<OopTaskQueue> ObjToScanQueue;
+typedef GenericTaskQueueSet<ObjToScanQueue> ObjToScanQueueSet;
 
 // Enable this to get push/pop/steal stats.
 const int PAR_STATS_ENABLED = 0;
@@ -304,12 +304,6 @@
   friend class ParEvacuateFollowersClosure;
 
  private:
-  // XXX use a global constant instead of 64!
-  struct ObjToScanQueuePadded {
-        ObjToScanQueue work_queue;
-        char pad[64 - sizeof(ObjToScanQueue)];  // prevent false sharing
-  };
-
   // The per-worker-thread work queues
   ObjToScanQueueSet* _task_queues;
 
--- a/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,8 @@
 
 class ParScanThreadState;
 class ParNewGeneration;
-typedef OopTaskQueueSet ObjToScanQueueSet;
+typedef Padded<OopTaskQueue> ObjToScanQueue;
+typedef GenericTaskQueueSet<ObjToScanQueue> ObjToScanQueueSet;
 class ParallelTaskTerminator;
 
 class ParScanClosure: public OopsInGenClosure {
--- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -566,14 +566,14 @@
 #endif
 
   // Commit new or uncommit old pages, if necessary.
-  resize_commit_uncommit(changed_region, new_region);
+  if (resize_commit_uncommit(changed_region, new_region)) {
+    // Set the new start of the committed region
+    resize_update_committed_table(changed_region, new_region);
+  }
 
   // Update card table entries
   resize_update_card_table_entries(changed_region, new_region);
 
-  // Set the new start of the committed region
-  resize_update_committed_table(changed_region, new_region);
-
   // Update the covered region
   resize_update_covered_table(changed_region, new_region);
 
@@ -604,8 +604,9 @@
   debug_only(verify_guard();)
 }
 
-void CardTableExtension::resize_commit_uncommit(int changed_region,
+bool CardTableExtension::resize_commit_uncommit(int changed_region,
                                                 MemRegion new_region) {
+  bool result = false;
   // Commit new or uncommit old pages, if necessary.
   MemRegion cur_committed = _committed[changed_region];
   assert(_covered[changed_region].end() == new_region.end(),
@@ -675,20 +676,31 @@
                               "card table expansion");
       }
     }
+    result = true;
   } else if (new_start_aligned > cur_committed.start()) {
     // Shrink the committed region
+#if 0 // uncommitting space is currently unsafe because of the interactions
+      // of growing and shrinking regions.  One region A can uncommit space
+      // that it owns but which is being used by another region B (maybe).
+      // Region B has not committed the space because it was already
+      // committed by region A.
     MemRegion uncommit_region = committed_unique_to_self(changed_region,
       MemRegion(cur_committed.start(), new_start_aligned));
     if (!uncommit_region.is_empty()) {
       if (!os::uncommit_memory((char*)uncommit_region.start(),
                                uncommit_region.byte_size())) {
-        vm_exit_out_of_memory(uncommit_region.byte_size(),
-          "card table contraction");
+        // If the uncommit fails, ignore it.  Let the
+        // committed table resizing go even though the committed
+        // table will over state the committed space.
       }
     }
+#else
+    assert(!result, "Should be false with current workaround");
+#endif
   }
   assert(_committed[changed_region].end() == cur_committed.end(),
     "end should not change");
+  return result;
 }
 
 void CardTableExtension::resize_update_committed_table(int changed_region,
--- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,9 @@
 class CardTableExtension : public CardTableModRefBS {
  private:
   // Support methods for resizing the card table.
-  void resize_commit_uncommit(int changed_region, MemRegion new_region);
+  // resize_commit_uncommit() returns true if the pages were committed or
+  // uncommitted
+  bool resize_commit_uncommit(int changed_region, MemRegion new_region);
   void resize_update_card_table_entries(int changed_region,
                                         MemRegion new_region);
   void resize_update_committed_table(int changed_region, MemRegion new_region);
--- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
   ParCompactionManager::_objarray_queues = NULL;
 ObjectStartArray*    ParCompactionManager::_start_array = NULL;
 ParMarkBitMap*       ParCompactionManager::_mark_bitmap = NULL;
-RegionTaskQueueSet*   ParCompactionManager::_region_array = NULL;
+RegionTaskQueueSet*  ParCompactionManager::_region_array = NULL;
 
 ParCompactionManager::ParCompactionManager() :
     _action(CopyAndUpdate) {
@@ -43,25 +43,9 @@
   _old_gen = heap->old_gen();
   _start_array = old_gen()->start_array();
 
-
   marking_stack()->initialize();
-
-  // We want the overflow stack to be permanent
-  _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
-
-  _objarray_queue.initialize();
-  _objarray_overflow_stack =
-    new (ResourceObj::C_HEAP) ObjArrayOverflowStack(10, true);
-
-#ifdef USE_RegionTaskQueueWithOverflow
+  _objarray_stack.initialize();
   region_stack()->initialize();
-#else
-  region_stack()->initialize();
-
-  // We want the overflow stack to be permanent
-  _region_overflow_stack =
-    new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
-#endif
 
   // Note that _revisit_klass_stack is allocated out of the
   // C heap (as opposed to out of ResourceArena).
@@ -71,12 +55,9 @@
   // From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
   // have to do for now until we are able to investigate a more optimal setting.
   _revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
-
 }
 
 ParCompactionManager::~ParCompactionManager() {
-  delete _overflow_stack;
-  delete _objarray_overflow_stack;
   delete _revisit_klass_stack;
   delete _revisit_mdo_stack;
   // _manager_array and _stack_array are statics
@@ -108,12 +89,8 @@
     _manager_array[i] = new ParCompactionManager();
     guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
     stack_array()->register_queue(i, _manager_array[i]->marking_stack());
-    _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_queue);
-#ifdef USE_RegionTaskQueueWithOverflow
-    region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
-#else
+    _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
     region_array()->register_queue(i, _manager_array[i]->region_stack());
-#endif
   }
 
   // The VMThread gets its own ParCompactionManager, which is not available
@@ -149,57 +126,6 @@
   return action() == ParCompactionManager::ResetObjects;
 }
 
-// For now save on a stack
-void ParCompactionManager::save_for_scanning(oop m) {
-  stack_push(m);
-}
-
-void ParCompactionManager::stack_push(oop obj) {
-
-  if(!marking_stack()->push(obj)) {
-    overflow_stack()->push(obj);
-  }
-}
-
-oop ParCompactionManager::retrieve_for_scanning() {
-
-  // Should not be used in the parallel case
-  ShouldNotReachHere();
-  return NULL;
-}
-
-// Save region on a stack
-void ParCompactionManager::save_for_processing(size_t region_index) {
-#ifdef ASSERT
-  const ParallelCompactData& sd = PSParallelCompact::summary_data();
-  ParallelCompactData::RegionData* const region_ptr = sd.region(region_index);
-  assert(region_ptr->claimed(), "must be claimed");
-  assert(region_ptr->_pushed++ == 0, "should only be pushed once");
-#endif
-  region_stack_push(region_index);
-}
-
-void ParCompactionManager::region_stack_push(size_t region_index) {
-
-#ifdef USE_RegionTaskQueueWithOverflow
-  region_stack()->save(region_index);
-#else
-  if(!region_stack()->push(region_index)) {
-    region_overflow_stack()->push(region_index);
-  }
-#endif
-}
-
-bool ParCompactionManager::retrieve_for_processing(size_t& region_index) {
-#ifdef USE_RegionTaskQueueWithOverflow
-  return region_stack()->retrieve(region_index);
-#else
-  // Should not be used in the parallel case
-  ShouldNotReachHere();
-  return false;
-#endif
-}
-
 ParCompactionManager*
 ParCompactionManager::gc_thread_compaction_manager(int index) {
   assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
@@ -218,8 +144,8 @@
   do {
     // Drain the overflow stack first, to allow stealing from the marking stack.
     oop obj;
-    while (!overflow_stack()->is_empty()) {
-      overflow_stack()->pop()->follow_contents(this);
+    while (marking_stack()->pop_overflow(obj)) {
+      obj->follow_contents(this);
     }
     while (marking_stack()->pop_local(obj)) {
       obj->follow_contents(this);
@@ -227,11 +153,10 @@
 
     // Process ObjArrays one at a time to avoid marking stack bloat.
     ObjArrayTask task;
-    if (!_objarray_overflow_stack->is_empty()) {
-      task = _objarray_overflow_stack->pop();
+    if (_objarray_stack.pop_overflow(task)) {
       objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
       k->oop_follow_contents(this, task.obj(), task.index());
-    } else if (_objarray_queue.pop_local(task)) {
+    } else if (_objarray_stack.pop_local(task)) {
       objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
       k->oop_follow_contents(this, task.obj(), task.index());
     }
@@ -240,68 +165,18 @@
   assert(marking_stacks_empty(), "Sanity");
 }
 
-void ParCompactionManager::drain_region_overflow_stack() {
-  size_t region_index = (size_t) -1;
-  while(region_stack()->retrieve_from_overflow(region_index)) {
-    PSParallelCompact::fill_and_update_region(this, region_index);
-  }
-}
-
 void ParCompactionManager::drain_region_stacks() {
-#ifdef ASSERT
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-  MutableSpace* to_space = heap->young_gen()->to_space();
-  MutableSpace* old_space = heap->old_gen()->object_space();
-  MutableSpace* perm_space = heap->perm_gen()->object_space();
-#endif /* ASSERT */
-
-#if 1 // def DO_PARALLEL - the serial code hasn't been updated
   do {
-
-#ifdef USE_RegionTaskQueueWithOverflow
-    // Drain overflow stack first, so other threads can steal from
-    // claimed stack while we work.
-    size_t region_index = (size_t) -1;
-    while(region_stack()->retrieve_from_overflow(region_index)) {
+    // Drain overflow stack first so other threads can steal.
+    size_t region_index;
+    while (region_stack()->pop_overflow(region_index)) {
       PSParallelCompact::fill_and_update_region(this, region_index);
     }
 
-    while (region_stack()->retrieve_from_stealable_queue(region_index)) {
+    while (region_stack()->pop_local(region_index)) {
       PSParallelCompact::fill_and_update_region(this, region_index);
     }
   } while (!region_stack()->is_empty());
-#else
-    // Drain overflow stack first, so other threads can steal from
-    // claimed stack while we work.
-    while(!region_overflow_stack()->is_empty()) {
-      size_t region_index = region_overflow_stack()->pop();
-      PSParallelCompact::fill_and_update_region(this, region_index);
-    }
-
-    size_t region_index = -1;
-    // obj is a reference!!!
-    while (region_stack()->pop_local(region_index)) {
-      // It would be nice to assert about the type of objects we might
-      // pop, but they can come from anywhere, unfortunately.
-      PSParallelCompact::fill_and_update_region(this, region_index);
-    }
-  } while((region_stack()->size() != 0) ||
-          (region_overflow_stack()->length() != 0));
-#endif
-
-#ifdef USE_RegionTaskQueueWithOverflow
-  assert(region_stack()->is_empty(), "Sanity");
-#else
-  assert(region_stack()->size() == 0, "Sanity");
-  assert(region_overflow_stack()->length() == 0, "Sanity");
-#endif
-#else
-  oop obj;
-  while (obj = retrieve_for_scanning()) {
-    obj->follow_contents(this);
-  }
-#endif
 }
 
 #ifdef ASSERT
--- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -59,10 +59,10 @@
 
  private:
   // 32-bit:  4K * 8 = 32KiB; 64-bit:  8K * 16 = 128KiB
-  #define OBJARRAY_QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
-  typedef GenericTaskQueue<ObjArrayTask, OBJARRAY_QUEUE_SIZE> ObjArrayTaskQueue;
-  typedef GenericTaskQueueSet<ObjArrayTaskQueue> ObjArrayTaskQueueSet;
-  #undef OBJARRAY_QUEUE_SIZE
+  #define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
+  typedef OverflowTaskQueue<ObjArrayTask, QUEUE_SIZE> ObjArrayTaskQueue;
+  typedef GenericTaskQueueSet<ObjArrayTaskQueue>      ObjArrayTaskQueueSet;
+  #undef QUEUE_SIZE
 
   static ParCompactionManager** _manager_array;
   static OopTaskQueueSet*       _stack_array;
@@ -72,23 +72,13 @@
   static PSOldGen*              _old_gen;
 
 private:
-  OopTaskQueue                  _marking_stack;
-  GrowableArray<oop>*           _overflow_stack;
-
-  typedef GrowableArray<ObjArrayTask> ObjArrayOverflowStack;
-  ObjArrayTaskQueue             _objarray_queue;
-  ObjArrayOverflowStack*        _objarray_overflow_stack;
+  OverflowTaskQueue<oop>        _marking_stack;
+  ObjArrayTaskQueue             _objarray_stack;
 
   // Is there a way to reuse the _marking_stack for the
   // saving empty regions?  For now just create a different
   // type of TaskQueue.
-
-#ifdef USE_RegionTaskQueueWithOverflow
-  RegionTaskQueueWithOverflow   _region_stack;
-#else
   RegionTaskQueue               _region_stack;
-  GrowableArray<size_t>*        _region_overflow_stack;
-#endif
 
 #if 1  // does this happen enough to need a per thread stack?
   GrowableArray<Klass*>*        _revisit_klass_stack;
@@ -107,16 +97,8 @@
  protected:
   // Array of tasks.  Needed by the ParallelTaskTerminator.
   static RegionTaskQueueSet* region_array()      { return _region_array; }
-  OopTaskQueue*  marking_stack()                 { return &_marking_stack; }
-  GrowableArray<oop>* overflow_stack()           { return _overflow_stack; }
-#ifdef USE_RegionTaskQueueWithOverflow
-  RegionTaskQueueWithOverflow* region_stack()    { return &_region_stack; }
-#else
-  RegionTaskQueue*  region_stack()               { return &_region_stack; }
-  GrowableArray<size_t>* region_overflow_stack() {
-    return _region_overflow_stack;
-  }
-#endif
+  OverflowTaskQueue<oop>*  marking_stack()       { return &_marking_stack; }
+  RegionTaskQueue* region_stack()                { return &_region_stack; }
 
   // Pushes onto the marking stack.  If the marking stack is full,
   // pushes onto the overflow stack.
@@ -124,11 +106,7 @@
   // Do not implement an equivalent stack_pop.  Deal with the
   // marking stack and overflow stack directly.
 
-  // Pushes onto the region stack.  If the region stack is full,
-  // pushes onto the region overflow stack.
-  void region_stack_push(size_t region_index);
-
-public:
+ public:
   Action action() { return _action; }
   void set_action(Action v) { _action = v; }
 
@@ -157,22 +135,15 @@
   GrowableArray<DataLayout*>* revisit_mdo_stack() { return _revisit_mdo_stack; }
 #endif
 
-  // Save oop for later processing.  Must not fail.
-  void save_for_scanning(oop m);
-  // Get a oop for scanning.  If returns null, no oop were found.
-  oop retrieve_for_scanning();
-
-  inline void push_objarray(oop obj, size_t index);
-
-  // Save region for later processing.  Must not fail.
-  void save_for_processing(size_t region_index);
-  // Get a region for processing.  If returns null, no region were found.
-  bool retrieve_for_processing(size_t& region_index);
+  // Save for later processing.  Must not fail.
+  inline void push(oop obj) { _marking_stack.push(obj); }
+  inline void push_objarray(oop objarray, size_t index);
+  inline void push_region(size_t index);
 
   // Access function for compaction managers
   static ParCompactionManager* gc_thread_compaction_manager(int index);
 
-  static bool steal(int queue_num, int* seed, Task& t) {
+  static bool steal(int queue_num, int* seed, oop& t) {
     return stack_array()->steal(queue_num, seed, t);
   }
 
@@ -180,8 +151,8 @@
     return _objarray_queues->steal(queue_num, seed, t);
   }
 
-  static bool steal(int queue_num, int* seed, RegionTask& t) {
-    return region_array()->steal(queue_num, seed, t);
+  static bool steal(int queue_num, int* seed, size_t& region) {
+    return region_array()->steal(queue_num, seed, region);
   }
 
   // Process tasks remaining on any marking stack
@@ -191,9 +162,6 @@
   // Process tasks remaining on any stack
   void drain_region_stacks();
 
-  // Process tasks remaining on any stack
-  void drain_region_overflow_stack();
-
   // Debugging support
 #ifdef ASSERT
   bool stacks_have_been_allocated();
@@ -208,6 +176,5 @@
 }
 
 bool ParCompactionManager::marking_stacks_empty() const {
-  return _marking_stack.size() == 0 && _overflow_stack->is_empty() &&
-    _objarray_queue.size() == 0 && _objarray_overflow_stack->is_empty();
+  return _marking_stack.is_empty() && _objarray_stack.is_empty();
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -26,7 +26,16 @@
 {
   ObjArrayTask task(obj, index);
   assert(task.is_valid(), "bad ObjArrayTask");
-  if (!_objarray_queue.push(task)) {
-    _objarray_overflow_stack->push(task);
-  }
+  _objarray_stack.push(task);
 }
+
+void ParCompactionManager::push_region(size_t index)
+{
+#ifdef ASSERT
+  const ParallelCompactData& sd = PSParallelCompact::summary_data();
+  ParallelCompactData::RegionData* const region_ptr = sd.region(index);
+  assert(region_ptr->claimed(), "must be claimed");
+  assert(region_ptr->_pushed++ == 0, "should only be pushed once");
+#endif
+  region_stack()->push(index);
+}
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -2474,7 +2474,7 @@
     for (size_t cur = end_region - 1; cur >= beg_region; --cur) {
       if (sd.region(cur)->claim_unsafe()) {
         ParCompactionManager* cm = ParCompactionManager::manager_array(which);
-        cm->save_for_processing(cur);
+        cm->push_region(cur);
 
         if (TraceParallelOldGCCompactionPhase && Verbose) {
           const size_t count_mod_8 = fillable_regions & 7;
@@ -3138,7 +3138,7 @@
     assert(cur->data_size() > 0, "region must have live data");
     cur->decrement_destination_count();
     if (cur < enqueue_end && cur->available() && cur->claim()) {
-      cm->save_for_processing(sd.region(cur));
+      cm->push_region(sd.region(cur));
     }
   }
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1297,11 +1297,8 @@
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (mark_bitmap()->is_unmarked(obj)) {
-      if (mark_obj(obj)) {
-        // This thread marked the object and owns the subsequent processing of it.
-        cm->save_for_scanning(obj);
-      }
+    if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
+      cm->push(obj);
     }
   }
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -90,84 +90,70 @@
 }
 
 void PSPromotionManager::post_scavenge() {
-#if PS_PM_STATS
-  print_stats();
-#endif // PS_PM_STATS
-
-  for(uint i=0; i<ParallelGCThreads+1; i++) {
+  TASKQUEUE_STATS_ONLY(if (PrintGCDetails && ParallelGCVerbose) print_stats());
+  for (uint i = 0; i < ParallelGCThreads + 1; i++) {
     PSPromotionManager* manager = manager_array(i);
-
-    // the guarantees are a bit gratuitous but, if one fires, we'll
-    // have a better idea of what went wrong
-    if (i < ParallelGCThreads) {
-      guarantee((!UseDepthFirstScavengeOrder ||
-                 manager->overflow_stack_depth()->length() <= 0),
-                "promotion manager overflow stack must be empty");
-      guarantee((UseDepthFirstScavengeOrder ||
-                 manager->overflow_stack_breadth()->length() <= 0),
-                "promotion manager overflow stack must be empty");
-
-      guarantee((!UseDepthFirstScavengeOrder ||
-                 manager->claimed_stack_depth()->size() <= 0),
-                "promotion manager claimed stack must be empty");
-      guarantee((UseDepthFirstScavengeOrder ||
-                 manager->claimed_stack_breadth()->size() <= 0),
-                "promotion manager claimed stack must be empty");
+    if (UseDepthFirstScavengeOrder) {
+      assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
     } else {
-      guarantee((!UseDepthFirstScavengeOrder ||
-                 manager->overflow_stack_depth()->length() <= 0),
-                "VM Thread promotion manager overflow stack "
-                "must be empty");
-      guarantee((UseDepthFirstScavengeOrder ||
-                 manager->overflow_stack_breadth()->length() <= 0),
-                "VM Thread promotion manager overflow stack "
-                "must be empty");
-
-      guarantee((!UseDepthFirstScavengeOrder ||
-                 manager->claimed_stack_depth()->size() <= 0),
-                "VM Thread promotion manager claimed stack "
-                "must be empty");
-      guarantee((UseDepthFirstScavengeOrder ||
-                 manager->claimed_stack_breadth()->size() <= 0),
-                "VM Thread promotion manager claimed stack "
-                "must be empty");
+      assert(manager->claimed_stack_breadth()->is_empty(), "should be empty");
     }
-
     manager->flush_labs();
   }
 }
 
-#if PS_PM_STATS
-
+#if TASKQUEUE_STATS
 void
-PSPromotionManager::print_stats(uint i) {
-  tty->print_cr("---- GC Worker %2d Stats", i);
-  tty->print_cr("    total pushes            %8d", _total_pushes);
-  tty->print_cr("    masked pushes           %8d", _masked_pushes);
-  tty->print_cr("    overflow pushes         %8d", _overflow_pushes);
-  tty->print_cr("    max overflow length     %8d", _max_overflow_length);
-  tty->print_cr("");
-  tty->print_cr("    arrays chunked          %8d", _arrays_chunked);
-  tty->print_cr("    array chunks processed  %8d", _array_chunks_processed);
-  tty->print_cr("");
-  tty->print_cr("    total steals            %8d", _total_steals);
-  tty->print_cr("    masked steals           %8d", _masked_steals);
-  tty->print_cr("");
+PSPromotionManager::print_taskqueue_stats(uint i) const {
+  const TaskQueueStats& stats = depth_first() ?
+    _claimed_stack_depth.stats : _claimed_stack_breadth.stats;
+  tty->print("%3u ", i);
+  stats.print();
+  tty->cr();
 }
 
 void
+PSPromotionManager::print_local_stats(uint i) const {
+  #define FMT " " SIZE_FORMAT_W(10)
+  tty->print_cr("%3u" FMT FMT FMT FMT, i, _masked_pushes, _masked_steals,
+                _arrays_chunked, _array_chunks_processed);
+  #undef FMT
+}
+
+static const char* const pm_stats_hdr[] = {
+  "    --------masked-------     arrays      array",
+  "thr       push      steal    chunked     chunks",
+  "--- ---------- ---------- ---------- ----------"
+};
+
+void
 PSPromotionManager::print_stats() {
-  tty->print_cr("== GC Tasks Stats (%s), GC %3d",
-                (UseDepthFirstScavengeOrder) ? "Depth-First" : "Breadth-First",
+  const bool df = UseDepthFirstScavengeOrder;
+  tty->print_cr("== GC Task Stats (%s-First), GC %3d", df ? "Depth" : "Breadth",
                 Universe::heap()->total_collections());
 
-  for (uint i = 0; i < ParallelGCThreads+1; ++i) {
-    PSPromotionManager* manager = manager_array(i);
-    manager->print_stats(i);
+  tty->print("thr "); TaskQueueStats::print_header(1); tty->cr();
+  tty->print("--- "); TaskQueueStats::print_header(2); tty->cr();
+  for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
+    manager_array(i)->print_taskqueue_stats(i);
+  }
+
+  const uint hlines = sizeof(pm_stats_hdr) / sizeof(pm_stats_hdr[0]);
+  for (uint i = 0; i < hlines; ++i) tty->print_cr(pm_stats_hdr[i]);
+  for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
+    manager_array(i)->print_local_stats(i);
   }
 }
 
-#endif // PS_PM_STATS
+void
+PSPromotionManager::reset_stats() {
+  TaskQueueStats& stats = depth_first() ?
+    claimed_stack_depth()->stats : claimed_stack_breadth()->stats;
+  stats.reset();
+  _masked_pushes = _masked_steals = 0;
+  _arrays_chunked = _array_chunks_processed = 0;
+}
+#endif // TASKQUEUE_STATS
 
 PSPromotionManager::PSPromotionManager() {
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@@ -181,15 +167,9 @@
   if (depth_first()) {
     claimed_stack_depth()->initialize();
     queue_size = claimed_stack_depth()->max_elems();
-    // We want the overflow stack to be permanent
-    _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<StarTask>(10, true);
-    _overflow_stack_breadth = NULL;
   } else {
     claimed_stack_breadth()->initialize();
     queue_size = claimed_stack_breadth()->max_elems();
-    // We want the overflow stack to be permanent
-    _overflow_stack_breadth = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
-    _overflow_stack_depth = NULL;
   }
 
   _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
@@ -209,8 +189,7 @@
 }
 
 void PSPromotionManager::reset() {
-  assert(claimed_stack_empty(), "reset of non-empty claimed stack");
-  assert(overflow_stack_empty(), "reset of non-empty overflow stack");
+  assert(stacks_empty(), "reset of non-empty stack");
 
   // We need to get an assert in here to make sure the labs are always flushed.
 
@@ -228,22 +207,13 @@
 
   _prefetch_queue.clear();
 
-#if PS_PM_STATS
-  _total_pushes = 0;
-  _masked_pushes = 0;
-  _overflow_pushes = 0;
-  _max_overflow_length = 0;
-  _arrays_chunked = 0;
-  _array_chunks_processed = 0;
-  _total_steals = 0;
-  _masked_steals = 0;
-#endif // PS_PM_STATS
+  TASKQUEUE_STATS_ONLY(reset_stats());
 }
 
 
 void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
   assert(depth_first(), "invariant");
-  assert(overflow_stack_depth() != NULL, "invariant");
+  assert(claimed_stack_depth()->overflow_stack() != NULL, "invariant");
   totally_drain = totally_drain || _totally_drain;
 
 #ifdef ASSERT
@@ -254,41 +224,35 @@
   MutableSpace* perm_space = heap->perm_gen()->object_space();
 #endif /* ASSERT */
 
+  OopStarTaskQueue* const tq = claimed_stack_depth();
   do {
     StarTask p;
 
     // Drain overflow stack first, so other threads can steal from
     // claimed stack while we work.
-    while(!overflow_stack_depth()->is_empty()) {
-      // linux compiler wants different overloaded operator= in taskqueue to
-      // assign to p that the other compilers don't like.
-      StarTask ptr = overflow_stack_depth()->pop();
-      process_popped_location_depth(ptr);
+    while (tq->pop_overflow(p)) {
+      process_popped_location_depth(p);
     }
 
     if (totally_drain) {
-      while (claimed_stack_depth()->pop_local(p)) {
+      while (tq->pop_local(p)) {
         process_popped_location_depth(p);
       }
     } else {
-      while (claimed_stack_depth()->size() > _target_stack_size &&
-             claimed_stack_depth()->pop_local(p)) {
+      while (tq->size() > _target_stack_size && tq->pop_local(p)) {
         process_popped_location_depth(p);
       }
     }
-  } while( (totally_drain && claimed_stack_depth()->size() > 0) ||
-           (overflow_stack_depth()->length() > 0) );
+  } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty());
 
-  assert(!totally_drain || claimed_stack_empty(), "Sanity");
-  assert(totally_drain ||
-         claimed_stack_depth()->size() <= _target_stack_size,
-         "Sanity");
-  assert(overflow_stack_empty(), "Sanity");
+  assert(!totally_drain || tq->taskqueue_empty(), "Sanity");
+  assert(totally_drain || tq->size() <= _target_stack_size, "Sanity");
+  assert(tq->overflow_empty(), "Sanity");
 }
 
 void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
   assert(!depth_first(), "invariant");
-  assert(overflow_stack_breadth() != NULL, "invariant");
+  assert(claimed_stack_breadth()->overflow_stack() != NULL, "invariant");
   totally_drain = totally_drain || _totally_drain;
 
 #ifdef ASSERT
@@ -299,51 +263,39 @@
   MutableSpace* perm_space = heap->perm_gen()->object_space();
 #endif /* ASSERT */
 
+  OverflowTaskQueue<oop>* const tq = claimed_stack_breadth();
   do {
     oop obj;
 
     // Drain overflow stack first, so other threads can steal from
     // claimed stack while we work.
-    while(!overflow_stack_breadth()->is_empty()) {
-      obj = overflow_stack_breadth()->pop();
+    while (tq->pop_overflow(obj)) {
       obj->copy_contents(this);
     }
 
     if (totally_drain) {
-      // obj is a reference!!!
-      while (claimed_stack_breadth()->pop_local(obj)) {
-        // It would be nice to assert about the type of objects we might
-        // pop, but they can come from anywhere, unfortunately.
+      while (tq->pop_local(obj)) {
         obj->copy_contents(this);
       }
     } else {
-      // obj is a reference!!!
-      while (claimed_stack_breadth()->size() > _target_stack_size &&
-             claimed_stack_breadth()->pop_local(obj)) {
-        // It would be nice to assert about the type of objects we might
-        // pop, but they can come from anywhere, unfortunately.
+      while (tq->size() > _target_stack_size && tq->pop_local(obj)) {
         obj->copy_contents(this);
       }
     }
 
     // If we could not find any other work, flush the prefetch queue
-    if (claimed_stack_breadth()->size() == 0 &&
-        (overflow_stack_breadth()->length() == 0)) {
+    if (tq->is_empty()) {
       flush_prefetch_queue();
     }
-  } while((totally_drain && claimed_stack_breadth()->size() > 0) ||
-          (overflow_stack_breadth()->length() > 0));
+  } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty());
 
-  assert(!totally_drain || claimed_stack_empty(), "Sanity");
-  assert(totally_drain ||
-         claimed_stack_breadth()->size() <= _target_stack_size,
-         "Sanity");
-  assert(overflow_stack_empty(), "Sanity");
+  assert(!totally_drain || tq->taskqueue_empty(), "Sanity");
+  assert(totally_drain || tq->size() <= _target_stack_size, "Sanity");
+  assert(tq->overflow_empty(), "Sanity");
 }
 
 void PSPromotionManager::flush_labs() {
-  assert(claimed_stack_empty(), "Attempt to flush lab with live stack");
-  assert(overflow_stack_empty(), "Attempt to flush lab with live overflow stack");
+  assert(stacks_empty(), "Attempt to flush lab with live stack");
 
   // If either promotion lab fills up, we can flush the
   // lab but not refill it, so check first.
@@ -480,14 +432,9 @@
             new_obj->is_objArray() &&
             PSChunkLargeArrays) {
           // we'll chunk it
-#if PS_PM_STATS
-          ++_arrays_chunked;
-#endif // PS_PM_STATS
           oop* const masked_o = mask_chunked_array_oop(o);
           push_depth(masked_o);
-#if PS_PM_STATS
-          ++_masked_pushes;
-#endif // PS_PM_STATS
+          TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
         } else {
           // we'll just push its contents
           new_obj->push_contents(this);
@@ -551,9 +498,7 @@
   assert(old->is_objArray(), "invariant");
   assert(old->is_forwarded(), "invariant");
 
-#if PS_PM_STATS
-  ++_array_chunks_processed;
-#endif // PS_PM_STATS
+  TASKQUEUE_STATS_ONLY(++_array_chunks_processed);
 
   oop const obj = old->forwardee();
 
@@ -565,9 +510,7 @@
     assert(start > 0, "invariant");
     arrayOop(old)->set_length(start);
     push_depth(mask_chunked_array_oop(old));
-#if PS_PM_STATS
-    ++_masked_pushes;
-#endif // PS_PM_STATS
+    TASKQUEUE_STATS_ONLY(++_masked_pushes);
   } else {
     // this is the final chunk for this array
     start = 0;
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,8 +42,6 @@
 class PSOldGen;
 class ParCompactionManager;
 
-#define PS_PM_STATS         0
-
 class PSPromotionManager : public CHeapObj {
   friend class PSScavenge;
   friend class PSRefProcTaskExecutor;
@@ -54,22 +52,18 @@
   static PSOldGen*                    _old_gen;
   static MutableSpace*                _young_space;
 
-#if PS_PM_STATS
-  uint                                _total_pushes;
-  uint                                _masked_pushes;
-
-  uint                                _overflow_pushes;
-  uint                                _max_overflow_length;
+#if TASKQUEUE_STATS
+  size_t                              _masked_pushes;
+  size_t                              _masked_steals;
+  size_t                              _arrays_chunked;
+  size_t                              _array_chunks_processed;
 
-  uint                                _arrays_chunked;
-  uint                                _array_chunks_processed;
+  void print_taskqueue_stats(uint i) const;
+  void print_local_stats(uint i) const;
+  static void print_stats();
 
-  uint                                _total_steals;
-  uint                                _masked_steals;
-
-  void print_stats(uint i);
-  static void print_stats();
-#endif // PS_PM_STATS
+  void reset_stats();
+#endif // TASKQUEUE_STATS
 
   PSYoungPromotionLAB                 _young_lab;
   PSOldPromotionLAB                   _old_lab;
@@ -78,9 +72,7 @@
   PrefetchQueue                       _prefetch_queue;
 
   OopStarTaskQueue                    _claimed_stack_depth;
-  GrowableArray<StarTask>*            _overflow_stack_depth;
-  OopTaskQueue                        _claimed_stack_breadth;
-  GrowableArray<oop>*                 _overflow_stack_breadth;
+  OverflowTaskQueue<oop>              _claimed_stack_breadth;
 
   bool                                _depth_first;
   bool                                _totally_drain;
@@ -97,9 +89,6 @@
   template <class T> inline void claim_or_forward_internal_depth(T* p);
   template <class T> inline void claim_or_forward_internal_breadth(T* p);
 
-  GrowableArray<StarTask>* overflow_stack_depth() { return _overflow_stack_depth; }
-  GrowableArray<oop>*  overflow_stack_breadth()   { return _overflow_stack_breadth; }
-
   // On the task queues we push reference locations as well as
   // partially-scanned arrays (in the latter case, we push an oop to
   // the from-space image of the array and the length on the
@@ -148,40 +137,12 @@
 
   template <class T> void push_depth(T* p) {
     assert(depth_first(), "pre-condition");
-
-#if PS_PM_STATS
-    ++_total_pushes;
-#endif // PS_PM_STATS
-
-    if (!claimed_stack_depth()->push(p)) {
-      overflow_stack_depth()->push(p);
-#if PS_PM_STATS
-      ++_overflow_pushes;
-      uint stack_length = (uint) overflow_stack_depth()->length();
-      if (stack_length > _max_overflow_length) {
-        _max_overflow_length = stack_length;
-      }
-#endif // PS_PM_STATS
-    }
+    claimed_stack_depth()->push(p);
   }
 
   void push_breadth(oop o) {
     assert(!depth_first(), "pre-condition");
-
-#if PS_PM_STATS
-    ++_total_pushes;
-#endif // PS_PM_STATS
-
-    if(!claimed_stack_breadth()->push(o)) {
-      overflow_stack_breadth()->push(o);
-#if PS_PM_STATS
-      ++_overflow_pushes;
-      uint stack_length = (uint) overflow_stack_breadth()->length();
-      if (stack_length > _max_overflow_length) {
-        _max_overflow_length = stack_length;
-      }
-#endif // PS_PM_STATS
-    }
+    claimed_stack_breadth()->push(o);
   }
 
  protected:
@@ -199,12 +160,10 @@
   static PSPromotionManager* vm_thread_promotion_manager();
 
   static bool steal_depth(int queue_num, int* seed, StarTask& t) {
-    assert(stack_array_depth() != NULL, "invariant");
     return stack_array_depth()->steal(queue_num, seed, t);
   }
 
-  static bool steal_breadth(int queue_num, int* seed, Task& t) {
-    assert(stack_array_breadth() != NULL, "invariant");
+  static bool steal_breadth(int queue_num, int* seed, oop& t) {
     return stack_array_breadth()->steal(queue_num, seed, t);
   }
 
@@ -214,7 +173,7 @@
   OopStarTaskQueue* claimed_stack_depth() {
     return &_claimed_stack_depth;
   }
-  OopTaskQueue* claimed_stack_breadth() {
+  OverflowTaskQueue<oop>* claimed_stack_breadth() {
     return &_claimed_stack_breadth;
   }
 
@@ -246,25 +205,13 @@
   void drain_stacks_depth(bool totally_drain);
   void drain_stacks_breadth(bool totally_drain);
 
-  bool claimed_stack_empty() {
-    if (depth_first()) {
-      return claimed_stack_depth()->size() <= 0;
-    } else {
-      return claimed_stack_breadth()->size() <= 0;
-    }
-  }
-  bool overflow_stack_empty() {
-    if (depth_first()) {
-      return overflow_stack_depth()->length() <= 0;
-    } else {
-      return overflow_stack_breadth()->length() <= 0;
-    }
+  bool depth_first() const {
+    return _depth_first;
   }
   bool stacks_empty() {
-    return claimed_stack_empty() && overflow_stack_empty();
-  }
-  bool depth_first() {
-    return _depth_first;
+    return depth_first() ?
+      claimed_stack_depth()->is_empty() :
+      claimed_stack_breadth()->is_empty();
   }
 
   inline void process_popped_location_depth(StarTask p);
@@ -273,12 +220,5 @@
   template <class T> inline void claim_or_forward_depth(T* p);
   template <class T> inline void claim_or_forward_breadth(T* p);
 
-#if PS_PM_STATS
-  void increment_steals(oop* p = NULL) {
-    _total_steals += 1;
-    if (p != NULL && is_oop_masked(p)) {
-      _masked_steals += 1;
-    }
-  }
-#endif // PS_PM_STATS
+  TASKQUEUE_STATS_ONLY(inline void record_steal(StarTask& p);)
 };
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -124,3 +124,11 @@
     }
   }
 }
+
+#if TASKQUEUE_STATS
+void PSPromotionManager::record_steal(StarTask& p) {
+  if (is_oop_masked(p)) {
+    ++_masked_steals;
+  }
+}
+#endif // TASKQUEUE_STATS
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -414,7 +414,6 @@
     }
 
     // Finally, flush the promotion_manager's labs, and deallocate its stacks.
-    assert(promotion_manager->claimed_stack_empty(), "Sanity");
     PSPromotionManager::post_scavenge();
 
     promotion_failure_occurred = promotion_failed();
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -148,9 +148,7 @@
     while(true) {
       StarTask p;
       if (PSPromotionManager::steal_depth(which, &random_seed, p)) {
-#if PS_PM_STATS
-        pm->increment_steals(p);
-#endif // PS_PM_STATS
+        TASKQUEUE_STATS_ONLY(pm->record_steal(p));
         pm->process_popped_location_depth(p);
         pm->drain_stacks_depth(true);
       } else {
@@ -163,9 +161,6 @@
     while(true) {
       oop obj;
       if (PSPromotionManager::steal_breadth(which, &random_seed, obj)) {
-#if PS_PM_STATS
-        pm->increment_steals();
-#endif // PS_PM_STATS
         obj->copy_contents(pm);
         pm->drain_stacks_breadth(true);
       } else {
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -86,9 +86,7 @@
 
     _gc_locked = false;
 
-    if (full) {
-      _full_gc_count_before = full_gc_count_before;
-    }
+    _full_gc_count_before = full_gc_count_before;
     // In ParallelScavengeHeap::mem_allocate() collections can be
     // executed within a loop and _all_soft_refs_clear can be set
     // true after they have been cleared by a collection and another
--- a/src/share/vm/gc_interface/gcCause.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/gc_interface/gcCause.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -78,6 +78,9 @@
     case _old_generation_too_full_to_scavenge:
       return "Old Generation Too Full To Scavenge";
 
+    case _g1_inc_collection_pause:
+      return "G1 Evacuation Pause";
+
     case _last_ditch_collection:
       return "Last ditch collection";
 
--- a/src/share/vm/includeDB_compiler2	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/includeDB_compiler2	Sat Jul 31 15:10:59 2010 +0100
@@ -89,6 +89,21 @@
 
 allocation.hpp                          c2_globals.hpp
 
+bcEscapeAnalyzer.cpp                    bcEscapeAnalyzer.hpp
+bcEscapeAnalyzer.cpp                    bitMap.inline.hpp
+bcEscapeAnalyzer.cpp                    bytecode.hpp
+bcEscapeAnalyzer.cpp                    ciConstant.hpp
+bcEscapeAnalyzer.cpp                    ciField.hpp
+bcEscapeAnalyzer.cpp                    ciMethodBlocks.hpp
+bcEscapeAnalyzer.cpp                    ciStreams.hpp
+
+bcEscapeAnalyzer.hpp                    allocation.hpp
+bcEscapeAnalyzer.hpp                    ciMethod.hpp
+bcEscapeAnalyzer.hpp                    ciMethodData.hpp
+bcEscapeAnalyzer.hpp                    dependencies.hpp
+bcEscapeAnalyzer.hpp                    growableArray.hpp
+bcEscapeAnalyzer.hpp                    vectset.hpp
+
 block.cpp                               allocation.inline.hpp
 block.cpp                               block.hpp
 block.cpp                               cfgnode.hpp
@@ -239,6 +254,7 @@
 ciEnv.cpp                               compileLog.hpp
 ciEnv.cpp                               runtime.hpp
 
+ciMethod.cpp                            bcEscapeAnalyzer.hpp
 ciMethod.cpp                            ciTypeFlow.hpp
 ciMethod.cpp                            methodOop.hpp
 
--- a/src/share/vm/includeDB_core	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/includeDB_core	Sat Jul 31 15:10:59 2010 +0100
@@ -301,20 +301,6 @@
 barrierSet.inline.hpp                   barrierSet.hpp
 barrierSet.inline.hpp                   cardTableModRefBS.hpp
 
-bcEscapeAnalyzer.cpp                    bcEscapeAnalyzer.hpp
-bcEscapeAnalyzer.cpp                    bitMap.inline.hpp
-bcEscapeAnalyzer.cpp                    bytecode.hpp
-bcEscapeAnalyzer.cpp                    ciConstant.hpp
-bcEscapeAnalyzer.cpp                    ciField.hpp
-bcEscapeAnalyzer.cpp                    ciMethodBlocks.hpp
-bcEscapeAnalyzer.cpp                    ciStreams.hpp
-
-bcEscapeAnalyzer.hpp                    allocation.hpp
-bcEscapeAnalyzer.hpp                    ciMethod.hpp
-bcEscapeAnalyzer.hpp                    ciMethodData.hpp
-bcEscapeAnalyzer.hpp                    dependencies.hpp
-bcEscapeAnalyzer.hpp                    growableArray.hpp
-
 biasedLocking.cpp                       biasedLocking.hpp
 biasedLocking.cpp                       klass.inline.hpp
 biasedLocking.cpp                       markOop.hpp
@@ -545,6 +531,7 @@
 
 ciCPCache.hpp                           ciClassList.hpp
 ciCPCache.hpp                           ciObject.hpp
+ciCPCache.hpp                           cpCacheOop.hpp
 
 ciEnv.cpp                               allocation.inline.hpp
 ciEnv.cpp                               ciConstant.hpp
@@ -664,7 +651,6 @@
 
 ciMethod.cpp                            abstractCompiler.hpp
 ciMethod.cpp                            allocation.inline.hpp
-ciMethod.cpp                            bcEscapeAnalyzer.hpp
 ciMethod.cpp                            bitMap.inline.hpp
 ciMethod.cpp                            ciCallProfile.hpp
 ciMethod.cpp                            ciExceptionHandler.hpp
@@ -823,6 +809,7 @@
 
 ciStreams.cpp                           ciCallSite.hpp
 ciStreams.cpp                           ciConstant.hpp
+ciStreams.cpp                           ciCPCache.hpp
 ciStreams.cpp                           ciField.hpp
 ciStreams.cpp                           ciStreams.hpp
 ciStreams.cpp                           ciUtilities.hpp
@@ -962,7 +949,6 @@
 classLoader.cpp                         timer.hpp
 classLoader.cpp                         universe.inline.hpp
 classLoader.cpp                         vmSymbols.hpp
-classLoader.cpp                         vtune.hpp
 
 classLoader.hpp                         classFileParser.hpp
 classLoader.hpp                         perfData.hpp
@@ -1002,7 +988,6 @@
 codeBlob.cpp                            safepoint.hpp
 codeBlob.cpp                            sharedRuntime.hpp
 codeBlob.cpp                            vframe.hpp
-codeBlob.cpp                            vtune.hpp
 
 codeBlob.hpp                            codeBuffer.hpp
 codeBlob.hpp                            frame.hpp
@@ -2165,7 +2150,6 @@
 interpreter.cpp                         stubRoutines.hpp
 interpreter.cpp                         templateTable.hpp
 interpreter.cpp                         timer.hpp
-interpreter.cpp                         vtune.hpp
 
 interpreter.hpp                         cppInterpreter.hpp
 interpreter.hpp                         stubs.hpp
@@ -2321,7 +2305,6 @@
 java.cpp                                vmError.hpp
 java.cpp                                vm_operations.hpp
 java.cpp                                vm_version_<arch>.hpp
-java.cpp                                vtune.hpp
 
 java.hpp                                os.hpp
 
@@ -3048,7 +3031,6 @@
 nmethod.cpp                             scopeDesc.hpp
 nmethod.cpp                             sharedRuntime.hpp
 nmethod.cpp                             sweeper.hpp
-nmethod.cpp                             vtune.hpp
 nmethod.cpp                             xmlstream.hpp
 
 nmethod.hpp                             codeBlob.hpp
@@ -3771,7 +3753,6 @@
 sharedRuntime.cpp                       vmSymbols.hpp
 sharedRuntime.cpp                       vmreg_<arch>.inline.hpp
 sharedRuntime.cpp                       vtableStubs.hpp
-sharedRuntime.cpp                       vtune.hpp
 sharedRuntime.cpp                       xmlstream.hpp
 
 sharedRuntime.hpp                       allocation.hpp
@@ -3935,7 +3916,6 @@
 stubCodeGenerator.cpp                   forte.hpp
 stubCodeGenerator.cpp                   oop.inline.hpp
 stubCodeGenerator.cpp                   stubCodeGenerator.hpp
-stubCodeGenerator.cpp                   vtune.hpp
 
 stubCodeGenerator.hpp                   allocation.hpp
 stubCodeGenerator.hpp                   assembler.hpp
@@ -4456,7 +4436,6 @@
 universe.cpp                            universe.inline.hpp
 universe.cpp                            vmSymbols.hpp
 universe.cpp                            vm_operations.hpp
-universe.cpp                            vtune.hpp
 
 universe.hpp                            growableArray.hpp
 universe.hpp                            handles.hpp
@@ -4719,7 +4698,6 @@
 vtableStubs.cpp                         resourceArea.hpp
 vtableStubs.cpp                         sharedRuntime.hpp
 vtableStubs.cpp                         vtableStubs.hpp
-vtableStubs.cpp                         vtune.hpp
 
 vtableStubs.hpp                         allocation.hpp
 
@@ -4733,11 +4711,6 @@
 vtableStubs_<arch_model>.cpp            vmreg_<arch>.inline.hpp
 vtableStubs_<arch_model>.cpp            vtableStubs.hpp
 
-vtune.hpp                               allocation.hpp
-
-vtune_<os_family>.cpp                   interpreter.hpp
-vtune_<os_family>.cpp                   vtune.hpp
-
 watermark.hpp                           allocation.hpp
 watermark.hpp                           globalDefinitions.hpp
 
--- a/src/share/vm/interpreter/bytecode.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/interpreter/bytecode.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -136,25 +136,24 @@
 // Implementation of Bytecode_invoke
 
 void Bytecode_invoke::verify() const {
-  Bytecodes::Code bc = adjusted_invoke_code();
   assert(is_valid(), "check invoke");
   assert(method()->constants()->cache() != NULL, "do not call this from verifier or rewriter");
 }
 
 
-symbolOop Bytecode_invoke::signature() const {
+symbolOop Bytecode_member_ref::signature() const {
   constantPoolOop constants = method()->constants();
   return constants->signature_ref_at(index());
 }
 
 
-symbolOop Bytecode_invoke::name() const {
+symbolOop Bytecode_member_ref::name() const {
   constantPoolOop constants = method()->constants();
   return constants->name_ref_at(index());
 }
 
 
-BasicType Bytecode_invoke::result_type(Thread *thread) const {
+BasicType Bytecode_member_ref::result_type(Thread *thread) const {
   symbolHandle sh(thread, signature());
   ResultTypeFinder rts(sh);
   rts.iterate();
@@ -167,9 +166,9 @@
   KlassHandle resolved_klass;
   constantPoolHandle constants(THREAD, _method->constants());
 
-  if (adjusted_invoke_code() == Bytecodes::_invokedynamic) {
+  if (java_code() == Bytecodes::_invokedynamic) {
     LinkResolver::resolve_dynamic_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
-  } else if (adjusted_invoke_code() != Bytecodes::_invokeinterface) {
+  } else if (java_code() != Bytecodes::_invokeinterface) {
     LinkResolver::resolve_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
   } else {
     LinkResolver::resolve_interface_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
@@ -178,51 +177,68 @@
 }
 
 
-int Bytecode_invoke::index() const {
+int Bytecode_member_ref::index() const {
   // Note:  Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4,
   // at the same time it allocates per-call-site CP cache entries.
-  Bytecodes::Code stdc = Bytecodes::java_code(code());
-  Bytecode* invoke = Bytecode_at(bcp());
-  if (invoke->has_index_u4(stdc))
-    return invoke->get_index_u4(stdc);
+  Bytecodes::Code rawc = code();
+  Bytecode* invoke = bytecode();
+  if (invoke->has_index_u4(rawc))
+    return invoke->get_index_u4(rawc);
   else
-    return invoke->get_index_u2_cpcache(stdc);
+    return invoke->get_index_u2_cpcache(rawc);
 }
 
+int Bytecode_member_ref::pool_index() const {
+  int index = this->index();
+  DEBUG_ONLY({
+      if (!bytecode()->has_index_u4(code()))
+        index -= constantPoolOopDesc::CPCACHE_INDEX_TAG;
+    });
+  return _method->constants()->cache()->entry_at(index)->constant_pool_index();
+}
 
 // Implementation of Bytecode_field
 
 void Bytecode_field::verify() const {
-  Bytecodes::Code stdc = Bytecodes::java_code(code());
-  assert(stdc == Bytecodes::_putstatic || stdc == Bytecodes::_getstatic ||
-         stdc == Bytecodes::_putfield  || stdc == Bytecodes::_getfield, "check field");
-}
-
-
-bool Bytecode_field::is_static() const {
-  Bytecodes::Code stdc = Bytecodes::java_code(code());
-  return stdc == Bytecodes::_putstatic || stdc == Bytecodes::_getstatic;
+  assert(is_valid(), "check field");
 }
 
 
-int Bytecode_field::index() const {
-  Bytecode* invoke = Bytecode_at(bcp());
-  return invoke->get_index_u2_cpcache(Bytecodes::_getfield);
+// Implementation of Bytecode_loadconstant
+
+int Bytecode_loadconstant::raw_index() const {
+  Bytecode* bcp = bytecode();
+  Bytecodes::Code rawc = bcp->code();
+  assert(rawc != Bytecodes::_wide, "verifier prevents this");
+  if (Bytecodes::java_code(rawc) == Bytecodes::_ldc)
+    return bcp->get_index_u1(rawc);
+  else
+    return bcp->get_index_u2(rawc, false);
 }
 
-
-// Implementation of Bytecodes loac constant
+int Bytecode_loadconstant::pool_index() const {
+  int index = raw_index();
+  if (has_cache_index()) {
+    return _method->constants()->cache()->entry_at(index)->constant_pool_index();
+  }
+  return index;
+}
 
-int Bytecode_loadconstant::index() const {
-  Bytecodes::Code stdc = Bytecodes::java_code(code());
-  if (stdc != Bytecodes::_wide) {
-    if (Bytecodes::java_code(stdc) == Bytecodes::_ldc)
-      return get_index_u1(stdc);
-    else
-      return get_index_u2(stdc, false);
+BasicType Bytecode_loadconstant::result_type() const {
+  int index = pool_index();
+  constantTag tag = _method->constants()->tag_at(index);
+  return tag.basic_type();
+}
+
+oop Bytecode_loadconstant::resolve_constant(TRAPS) const {
+  assert(_method.not_null(), "must supply method to resolve constant");
+  int index = raw_index();
+  constantPoolOop constants = _method->constants();
+  if (has_cache_index()) {
+    return constants->resolve_cached_constant_at(index, THREAD);
+  } else {
+    return constants->resolve_constant_at(index, THREAD);
   }
-  stdc = Bytecodes::code_at(addr_at(1));
-  return get_index_u2(stdc, true);
 }
 
 //------------------------------------------------------------------------------
--- a/src/share/vm/interpreter/bytecode.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/interpreter/bytecode.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -76,9 +76,13 @@
           return Bytes::get_native_u2(p);
     else  return Bytes::get_Java_u2(p);
   }
+  int get_index_u1_cpcache(Bytecodes::Code bc) const {
+    assert_same_format_as(bc); assert_index_size(1, bc);
+    return *(jubyte*)addr_at(1) + constantPoolOopDesc::CPCACHE_INDEX_TAG;
+  }
   int get_index_u2_cpcache(Bytecodes::Code bc) const {
     assert_same_format_as(bc); assert_index_size(2, bc); assert_native_index(bc);
-    return Bytes::get_native_u2(addr_at(1)) DEBUG_ONLY(+ constantPoolOopDesc::CPCACHE_INDEX_TAG);
+    return Bytes::get_native_u2(addr_at(1)) + constantPoolOopDesc::CPCACHE_INDEX_TAG;
   }
   int get_index_u4(Bytecodes::Code bc) const {
     assert_same_format_as(bc); assert_index_size(4, bc);
@@ -152,7 +156,7 @@
 
 inline Bytecode_lookupswitch* Bytecode_lookupswitch_at(address bcp) {
   Bytecode_lookupswitch* b = (Bytecode_lookupswitch*)bcp;
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
@@ -174,44 +178,56 @@
 
 inline Bytecode_tableswitch* Bytecode_tableswitch_at(address bcp) {
   Bytecode_tableswitch* b = (Bytecode_tableswitch*)bcp;
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
 
-// Abstraction for invoke_{virtual, static, interface, special}
+// Common code for decoding invokes and field references.
 
-class Bytecode_invoke: public ResourceObj {
+class Bytecode_member_ref: public ResourceObj {
  protected:
   methodHandle _method;                          // method containing the bytecode
   int          _bci;                             // position of the bytecode
 
-  Bytecode_invoke(methodHandle method, int bci)  : _method(method), _bci(bci) {}
+  Bytecode_member_ref(methodHandle method, int bci)  : _method(method), _bci(bci) {}
+
+ public:
+  // Attributes
+  methodHandle method() const                    { return _method; }
+  int          bci() const                       { return _bci; }
+  address      bcp() const                       { return _method->bcp_from(bci()); }
+  Bytecode*    bytecode() const                  { return Bytecode_at(bcp()); }
+
+  int          index() const;                    // cache index (loaded from instruction)
+  int          pool_index() const;               // constant pool index
+  symbolOop    name() const;                     // returns the name of the method or field
+  symbolOop    signature() const;                // returns the signature of the method or field
+
+  BasicType    result_type(Thread* thread) const; // returns the result type of the getfield or invoke
+
+  Bytecodes::Code code() const                   { return Bytecodes::code_at(bcp(), _method()); }
+  Bytecodes::Code java_code() const              { return Bytecodes::java_code(code()); }
+};
+
+// Abstraction for invoke_{virtual, static, interface, special}
+
+class Bytecode_invoke: public Bytecode_member_ref {
+ protected:
+  Bytecode_invoke(methodHandle method, int bci)  : Bytecode_member_ref(method, bci) {}
 
  public:
   void verify() const;
 
   // Attributes
-  methodHandle method() const                    { return _method; }
-  int          bci() const                       { return _bci; }
-  address      bcp() const                       { return _method->bcp_from(bci()); }
-
-  int          index() const;                    // the constant pool index for the invoke
-  symbolOop    name() const;                     // returns the name of the invoked method
-  symbolOop    signature() const;                // returns the signature of the invoked method
-  BasicType    result_type(Thread *thread) const; // returns the result type of the invoke
-
-  Bytecodes::Code code() const                   { return Bytecodes::code_at(bcp(), _method()); }
-  Bytecodes::Code adjusted_invoke_code() const   { return Bytecodes::java_code(code()); }
-
   methodHandle static_target(TRAPS);             // "specified" method   (from constant pool)
 
   // Testers
-  bool is_invokeinterface() const                { return adjusted_invoke_code() == Bytecodes::_invokeinterface; }
-  bool is_invokevirtual() const                  { return adjusted_invoke_code() == Bytecodes::_invokevirtual; }
-  bool is_invokestatic() const                   { return adjusted_invoke_code() == Bytecodes::_invokestatic; }
-  bool is_invokespecial() const                  { return adjusted_invoke_code() == Bytecodes::_invokespecial; }
-  bool is_invokedynamic() const                  { return adjusted_invoke_code() == Bytecodes::_invokedynamic; }
+  bool is_invokeinterface() const                { return java_code() == Bytecodes::_invokeinterface; }
+  bool is_invokevirtual() const                  { return java_code() == Bytecodes::_invokevirtual; }
+  bool is_invokestatic() const                   { return java_code() == Bytecodes::_invokestatic; }
+  bool is_invokespecial() const                  { return java_code() == Bytecodes::_invokespecial; }
+  bool is_invokedynamic() const                  { return java_code() == Bytecodes::_invokedynamic; }
 
   bool has_receiver() const                      { return !is_invokestatic() && !is_invokedynamic(); }
 
@@ -230,7 +246,7 @@
 
 inline Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci) {
   Bytecode_invoke* b = new Bytecode_invoke(method, bci);
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
@@ -240,21 +256,34 @@
 }
 
 
-// Abstraction for all field accesses (put/get field/static_
-class Bytecode_field: public Bytecode {
-public:
+// Abstraction for all field accesses (put/get field/static)
+class Bytecode_field: public Bytecode_member_ref {
+ protected:
+  Bytecode_field(methodHandle method, int bci)  : Bytecode_member_ref(method, bci) {}
+
+ public:
+  // Testers
+  bool is_getfield() const                       { return java_code() == Bytecodes::_getfield; }
+  bool is_putfield() const                       { return java_code() == Bytecodes::_putfield; }
+  bool is_getstatic() const                      { return java_code() == Bytecodes::_getstatic; }
+  bool is_putstatic() const                      { return java_code() == Bytecodes::_putstatic; }
+
+  bool is_getter() const                         { return is_getfield()  || is_getstatic(); }
+  bool is_static() const                         { return is_getstatic() || is_putstatic(); }
+
+  bool is_valid() const                          { return is_getfield()   ||
+                                                          is_putfield()   ||
+                                                          is_getstatic()  ||
+                                                          is_putstatic(); }
   void verify() const;
 
-  int  index() const;
-  bool is_static() const;
-
   // Creation
-  inline friend Bytecode_field* Bytecode_field_at(const methodOop method, address bcp);
+  inline friend Bytecode_field* Bytecode_field_at(methodHandle method, int bci);
 };
 
-inline Bytecode_field* Bytecode_field_at(const methodOop method, address bcp) {
-  Bytecode_field* b = (Bytecode_field*)bcp;
-  debug_only(b->verify());
+inline Bytecode_field* Bytecode_field_at(methodHandle method, int bci) {
+  Bytecode_field* b = new Bytecode_field(method, bci);
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
@@ -274,7 +303,7 @@
 
 inline Bytecode_checkcast* Bytecode_checkcast_at(address bcp) {
   Bytecode_checkcast* b = (Bytecode_checkcast*)bcp;
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
@@ -294,7 +323,7 @@
 
 inline Bytecode_instanceof* Bytecode_instanceof_at(address bcp) {
   Bytecode_instanceof* b = (Bytecode_instanceof*)bcp;
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
@@ -312,7 +341,7 @@
 
 inline Bytecode_new* Bytecode_new_at(address bcp) {
   Bytecode_new* b = (Bytecode_new*)bcp;
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
@@ -330,7 +359,7 @@
 
 inline Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp) {
   Bytecode_multianewarray* b = (Bytecode_multianewarray*)bcp;
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
@@ -348,29 +377,57 @@
 
 inline Bytecode_anewarray* Bytecode_anewarray_at(address bcp) {
   Bytecode_anewarray* b = (Bytecode_anewarray*)bcp;
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
 
 // Abstraction for ldc, ldc_w and ldc2_w
 
-class Bytecode_loadconstant: public Bytecode {
+class Bytecode_loadconstant: public ResourceObj {
+ private:
+  int          _bci;
+  methodHandle _method;
+
+  Bytecodes::Code code() const                   { return bytecode()->code(); }
+
+  int raw_index() const;
+
+  Bytecode_loadconstant(methodHandle method, int bci) : _method(method), _bci(bci) {}
+
  public:
+  // Attributes
+  methodHandle method() const                    { return _method; }
+  int          bci() const                       { return _bci; }
+  address      bcp() const                       { return _method->bcp_from(bci()); }
+  Bytecode*    bytecode() const                  { return Bytecode_at(bcp()); }
+
   void verify() const {
+    assert(_method.not_null(), "must supply method");
     Bytecodes::Code stdc = Bytecodes::java_code(code());
     assert(stdc == Bytecodes::_ldc ||
            stdc == Bytecodes::_ldc_w ||
            stdc == Bytecodes::_ldc2_w, "load constant");
   }
 
-  int index() const;
+  // Only non-standard bytecodes (fast_aldc) have CP cache indexes.
+  bool has_cache_index() const { return code() >= Bytecodes::number_of_java_codes; }
 
-  inline friend Bytecode_loadconstant* Bytecode_loadconstant_at(const methodOop method, address bcp);
+  int pool_index() const;               // index into constant pool
+  int cache_index() const {             // index into CP cache (or -1 if none)
+    return has_cache_index() ? raw_index() : -1;
+  }
+
+  BasicType result_type() const;        // returns the result type of the ldc
+
+  oop resolve_constant(TRAPS) const;
+
+  // Creation
+  inline friend Bytecode_loadconstant* Bytecode_loadconstant_at(methodHandle method, int bci);
 };
 
-inline Bytecode_loadconstant* Bytecode_loadconstant_at(const methodOop method, address bcp) {
-  Bytecode_loadconstant* b = (Bytecode_loadconstant*)bcp;
-  debug_only(b->verify());
+inline Bytecode_loadconstant* Bytecode_loadconstant_at(methodHandle method, int bci) {
+  Bytecode_loadconstant* b = new Bytecode_loadconstant(method, bci);
+  DEBUG_ONLY(b->verify());
   return b;
 }
--- a/src/share/vm/interpreter/bytecodeTracer.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/interpreter/bytecodeTracer.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -49,6 +49,7 @@
 
   int       get_index_u1()           { return *(address)_next_pc++; }
   int       get_index_u2()           { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
+  int       get_index_u1_cpcache()   { return get_index_u1() + constantPoolOopDesc::CPCACHE_INDEX_TAG; }
   int       get_index_u2_cpcache()   { int i=Bytes::get_native_u2(_next_pc); _next_pc+=2; return i + constantPoolOopDesc::CPCACHE_INDEX_TAG; }
   int       get_index_u4()           { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; }
   int       get_index_special()      { return (is_wide()) ? get_index_u2() : get_index_u1(); }
@@ -60,6 +61,7 @@
   bool      check_index(int i, int& cp_index, outputStream* st = tty);
   void      print_constant(int i, outputStream* st = tty);
   void      print_field_or_method(int i, outputStream* st = tty);
+  void      print_field_or_method(int orig_i, int i, outputStream* st = tty);
   void      print_attributes(int bci, outputStream* st = tty);
   void      bytecode_epilog(int bci, outputStream* st = tty);
 
@@ -177,18 +179,29 @@
   _closure->trace(method, bcp, st);
 }
 
+void print_symbol(symbolOop sym, outputStream* st) {
+  char buf[40];
+  int len = sym->utf8_length();
+  if (len >= (int)sizeof(buf)) {
+    st->print_cr(" %s...[%d]", sym->as_C_string(buf, sizeof(buf)), len);
+  } else {
+    st->print(" ");
+    sym->print_on(st); st->cr();
+  }
+}
+
 void print_oop(oop value, outputStream* st) {
   if (value == NULL) {
     st->print_cr(" NULL");
-  } else {
+  } else if (java_lang_String::is_instance(value)) {
     EXCEPTION_MARK;
     Handle h_value (THREAD, value);
     symbolHandle sym = java_lang_String::as_symbol(h_value, CATCH);
-    if (sym->utf8_length() > 32) {
-      st->print_cr(" ....");
-    } else {
-      sym->print_on(st); st->cr();
-    }
+    print_symbol(sym(), st);
+  } else if (value->is_symbol()) {
+    print_symbol(symbolOop(value), st);
+  } else {
+    st->print_cr(" " PTR_FORMAT, (intptr_t) value);
   }
 }
 
@@ -279,16 +292,27 @@
   } else if (tag.is_double()) {
     st->print_cr(" %f", constants->double_at(i));
   } else if (tag.is_string()) {
-    oop string = constants->resolved_string_at(i);
+    oop string = constants->pseudo_string_at(i);
     print_oop(string, st);
   } else if (tag.is_unresolved_string()) {
-    st->print_cr(" <unresolved string at %d>", i);
+    const char* string = constants->string_at_noresolve(i);
+    st->print_cr(" %s", string);
   } else if (tag.is_klass()) {
     st->print_cr(" %s", constants->resolved_klass_at(i)->klass_part()->external_name());
   } else if (tag.is_unresolved_klass()) {
     st->print_cr(" <unresolved klass at %d>", i);
   } else if (tag.is_object()) {
-    st->print_cr(" " PTR_FORMAT, constants->object_at(i));
+    st->print(" <Object>");
+    print_oop(constants->object_at(i), st);
+  } else if (tag.is_method_type()) {
+    int i2 = constants->method_type_index_at(i);
+    st->print(" <MethodType> %d", i2);
+    print_oop(constants->symbol_at(i2), st);
+  } else if (tag.is_method_handle()) {
+    int kind = constants->method_handle_ref_kind_at(i);
+    int i2 = constants->method_handle_index_at(i);
+    st->print(" <MethodHandle of kind %d>", kind, i2);
+    print_field_or_method(-i, i2, st);
   } else {
     st->print_cr(" bad tag=%d at %d", tag.value(), i);
   }
@@ -297,17 +321,23 @@
 void BytecodePrinter::print_field_or_method(int i, outputStream* st) {
   int orig_i = i;
   if (!check_index(orig_i, i, st))  return;
+  print_field_or_method(orig_i, i, st);
+}
 
+void BytecodePrinter::print_field_or_method(int orig_i, int i, outputStream* st) {
   constantPoolOop constants = method()->constants();
   constantTag tag = constants->tag_at(i);
 
-  int nt_index = -1;
+  bool has_klass = true;
 
   switch (tag.value()) {
   case JVM_CONSTANT_InterfaceMethodref:
   case JVM_CONSTANT_Methodref:
   case JVM_CONSTANT_Fieldref:
+    break;
   case JVM_CONSTANT_NameAndType:
+  case JVM_CONSTANT_InvokeDynamic:
+    has_klass = false;
     break;
   default:
     st->print_cr(" bad tag=%d at %d", tag.value(), i);
@@ -316,7 +346,17 @@
 
   symbolOop name = constants->uncached_name_ref_at(i);
   symbolOop signature = constants->uncached_signature_ref_at(i);
-  st->print_cr(" %d <%s> <%s> ", i, name->as_C_string(), signature->as_C_string());
+  const char* sep = (tag.is_field() ? "/" : "");
+  if (has_klass) {
+    symbolOop klass = constants->klass_name_at(constants->uncached_klass_ref_index_at(i));
+    st->print_cr(" %d <%s.%s%s%s> ", i, klass->as_C_string(), name->as_C_string(), sep, signature->as_C_string());
+  } else {
+    if (tag.is_invoke_dynamic()) {
+      int bsm = constants->invoke_dynamic_bootstrap_method_ref_index_at(i);
+      st->print(" bsm=%d", bsm);
+    }
+    st->print_cr(" %d <%s%s%s>", i, name->as_C_string(), sep, signature->as_C_string());
+  }
 }
 
 
@@ -340,12 +380,20 @@
       st->print_cr(" " INT32_FORMAT, get_short());
       break;
     case Bytecodes::_ldc:
-      print_constant(get_index_u1(), st);
+      if (Bytecodes::uses_cp_cache(raw_code())) {
+        print_constant(get_index_u1_cpcache(), st);
+      } else {
+        print_constant(get_index_u1(), st);
+      }
       break;
 
     case Bytecodes::_ldc_w:
     case Bytecodes::_ldc2_w:
-      print_constant(get_index_u2(), st);
+      if (Bytecodes::uses_cp_cache(raw_code())) {
+        print_constant(get_index_u2_cpcache(), st);
+      } else {
+        print_constant(get_index_u2(), st);
+      }
       break;
 
     case Bytecodes::_iload:
--- a/src/share/vm/interpreter/bytecodes.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/interpreter/bytecodes.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -489,6 +489,9 @@
 
   def(_return_register_finalizer , "return_register_finalizer" , "b"    , NULL    , T_VOID   ,  0, true, _return);
 
+  def(_fast_aldc           , "fast_aldc"           , "bj"   , NULL    , T_OBJECT,   1, true,  _ldc   );
+  def(_fast_aldc_w         , "fast_aldc_w"         , "bJJ"  , NULL    , T_OBJECT,   1, true,  _ldc_w );
+
   def(_shouldnotreachhere  , "_shouldnotreachhere" , "b"    , NULL    , T_VOID   ,  0, false);
 
   // platform specific JVM bytecodes
--- a/src/share/vm/interpreter/bytecodes.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/interpreter/bytecodes.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -270,6 +270,10 @@
     _fast_linearswitch    ,
     _fast_binaryswitch    ,
 
+    // special handling of oop constants:
+    _fast_aldc            ,
+    _fast_aldc_w          ,
+
     _return_register_finalizer    ,
 
     _shouldnotreachhere,      // For debugging
--- a/src/share/vm/interpreter/interpreter.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/interpreter/interpreter.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -99,11 +99,6 @@
 #endif // PRODUCT
   // need to hit every safepoint in order to call zapping routine
   // register the interpreter
-  VTune::register_stub(
-    "Interpreter",
-    AbstractInterpreter::code()->code_start(),
-    AbstractInterpreter::code()->code_end()
-  );
   Forte::register_stub(
     "Interpreter",
     AbstractInterpreter::code()->code_start(),
@@ -267,20 +262,6 @@
 }
 #endif // PRODUCT
 
-static BasicType constant_pool_type(methodOop method, int index) {
-  constantTag tag = method->constants()->tag_at(index);
-       if (tag.is_int              ()) return T_INT;
-  else if (tag.is_float            ()) return T_FLOAT;
-  else if (tag.is_long             ()) return T_LONG;
-  else if (tag.is_double           ()) return T_DOUBLE;
-  else if (tag.is_string           ()) return T_OBJECT;
-  else if (tag.is_unresolved_string()) return T_OBJECT;
-  else if (tag.is_klass            ()) return T_OBJECT;
-  else if (tag.is_unresolved_klass ()) return T_OBJECT;
-  ShouldNotReachHere();
-  return T_ILLEGAL;
-}
-
 
 //------------------------------------------------------------------------------------------------------------------------
 // Deoptimization support
@@ -330,13 +311,15 @@
     }
 
     case Bytecodes::_ldc   :
-      type = constant_pool_type( method, *(bcp+1) );
-      break;
-
     case Bytecodes::_ldc_w : // fall through
     case Bytecodes::_ldc2_w:
-      type = constant_pool_type( method, Bytes::get_Java_u2(bcp+1) );
-      break;
+      {
+        Thread *thread = Thread::current();
+        ResourceMark rm(thread);
+        methodHandle mh(thread, method);
+        type = Bytecode_loadconstant_at(mh, bci)->result_type();
+        break;
+      }
 
     default:
       type = Bytecodes::result_type(code);
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -83,6 +83,18 @@
   }
 IRT_END
 
+IRT_ENTRY(void, InterpreterRuntime::resolve_ldc(JavaThread* thread, Bytecodes::Code bytecode)) {
+  assert(bytecode == Bytecodes::_fast_aldc ||
+         bytecode == Bytecodes::_fast_aldc_w, "wrong bc");
+  ResourceMark rm(thread);
+  methodHandle m (thread, method(thread));
+  Bytecode_loadconstant* ldc = Bytecode_loadconstant_at(m, bci(thread));
+  oop result = ldc->resolve_constant(THREAD);
+  DEBUG_ONLY(ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc->cache_index()));
+  assert(result == cpce->f1(), "expected result for assembly code");
+}
+IRT_END
+
 
 //------------------------------------------------------------------------------------------------------------------------
 // Allocation
@@ -328,7 +340,7 @@
   typeArrayHandle    h_extable  (thread, h_method->exception_table());
   bool               should_repeat;
   int                handler_bci;
-  int                current_bci = bcp(thread) - h_method->code_base();
+  int                current_bci = bci(thread);
 
   // Need to do this check first since when _do_not_unlock_if_synchronized
   // is set, we don't want to trigger any classloading which may make calls
@@ -615,8 +627,7 @@
   if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface) {
     ResourceMark rm(thread);
     methodHandle m (thread, method(thread));
-    int bci = m->bci_from(bcp(thread));
-    Bytecode_invoke* call = Bytecode_invoke_at(m, bci);
+    Bytecode_invoke* call = Bytecode_invoke_at(m, bci(thread));
     symbolHandle signature (thread, call->signature());
     receiver = Handle(thread,
                   thread->last_frame().interpreter_callee_receiver(signature));
@@ -691,10 +702,6 @@
 
   methodHandle caller_method(thread, method(thread));
 
-  // first find the bootstrap method
-  KlassHandle caller_klass(thread, caller_method->method_holder());
-  Handle bootm = SystemDictionary::find_bootstrap_method(caller_klass, CHECK);
-
   constantPoolHandle pool(thread, caller_method->constants());
   pool->set_invokedynamic();    // mark header to flag active call sites
 
@@ -715,7 +722,7 @@
     CallInfo info;
     LinkResolver::resolve_invoke(info, Handle(), pool,
                                  site_index, bytecode, CHECK);
-    // The main entry corresponds to a JVM_CONSTANT_NameAndType, and serves
+    // The main entry corresponds to a JVM_CONSTANT_InvokeDynamic, and serves
     // as a common reference point for all invokedynamic call sites with
     // that exact call descriptor.  We will link it in the CP cache exactly
     // as if it were an invokevirtual of MethodHandle.invoke.
@@ -723,23 +730,30 @@
       bytecode,
       info.resolved_method(),
       info.vtable_index());
-    assert(pool->cache()->entry_at(main_index)->is_vfinal(), "f2 must be a methodOop");
   }
 
   // The method (f2 entry) of the main entry is the MH.invoke for the
   // invokedynamic target call signature.
-  intptr_t f2_value = pool->cache()->entry_at(main_index)->f2();
-  methodHandle signature_invoker(THREAD, (methodOop) f2_value);
+  oop f1_value = pool->cache()->entry_at(main_index)->f1();
+  methodHandle signature_invoker(THREAD, (methodOop) f1_value);
   assert(signature_invoker.not_null() && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(),
          "correct result from LinkResolver::resolve_invokedynamic");
 
+  Handle bootm = SystemDictionary::find_bootstrap_method(caller_method, caller_bci,
+                                                         main_index, CHECK);
+  if (bootm.is_null()) {
+    THROW_MSG(vmSymbols::java_lang_IllegalStateException(),
+              "no bootstrap method found for invokedynamic");
+  }
+
+  // Short circuit if CallSite has been bound already:
+  if (!pool->cache()->secondary_entry_at(site_index)->is_f1_null())
+    return;
+
   symbolHandle call_site_name(THREAD, pool->name_ref_at(site_index));
 
   Handle info;  // NYI: Other metadata from a new kind of CP entry.  (Annotations?)
 
-  // this is the index which gets stored on the CallSite object (as "callerPosition"):
-  int call_site_position = constantPoolCacheOopDesc::decode_secondary_index(site_index);
-
   Handle call_site
     = SystemDictionary::make_dynamic_call_site(bootm,
                                                // Callee information:
@@ -1257,7 +1271,7 @@
   Bytecode_invoke* invoke = Bytecode_invoke_at(mh, bci);
   ArgumentSizeComputer asc(invoke->signature());
   int size_of_arguments = (asc.size() + (invoke->has_receiver() ? 1 : 0)); // receiver
-  Copy::conjoint_bytes(src_address, dest_address,
+  Copy::conjoint_jbytes(src_address, dest_address,
                        size_of_arguments * Interpreter::stackElementSize);
 IRT_END
 #endif
--- a/src/share/vm/interpreter/interpreterRuntime.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/interpreter/interpreterRuntime.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -34,6 +34,7 @@
   static frame     last_frame(JavaThread *thread)    { return thread->last_frame(); }
   static methodOop method(JavaThread *thread)        { return last_frame(thread).interpreter_frame_method(); }
   static address   bcp(JavaThread *thread)           { return last_frame(thread).interpreter_frame_bcp(); }
+  static int       bci(JavaThread *thread)           { return last_frame(thread).interpreter_frame_bci(); }
   static void      set_bcp_and_mdp(address bcp, JavaThread*thread);
   static Bytecodes::Code code(JavaThread *thread)    {
     // pass method to avoid calling unsafe bcp_to_method (partial fix 4926272)
@@ -59,6 +60,7 @@
  public:
   // Constants
   static void    ldc           (JavaThread* thread, bool wide);
+  static void    resolve_ldc   (JavaThread* thread, Bytecodes::Code bytecode);
 
   // Allocation
   static void    _new          (JavaThread* thread, constantPoolOopDesc* pool, int index);
--- a/src/share/vm/interpreter/linkResolver.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/interpreter/linkResolver.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -67,6 +67,15 @@
   set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK);
 }
 
+void CallInfo::set_dynamic(methodHandle resolved_method, TRAPS) {
+  assert(resolved_method->is_method_handle_invoke(), "");
+  KlassHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
+  assert(resolved_klass == resolved_method->method_holder(), "");
+  int vtable_index = methodOopDesc::nonvirtual_vtable_index;
+  assert(resolved_method->vtable_index() == vtable_index, "");
+  set_common(resolved_klass, KlassHandle(), resolved_method, resolved_method, vtable_index, CHECK);
+}
+
 void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
   assert(resolved_method->signature() == selected_method->signature(), "signatures must correspond");
   _resolved_klass  = resolved_klass;
@@ -176,9 +185,20 @@
                                           KlassHandle klass, symbolHandle name, symbolHandle signature,
                                           KlassHandle current_klass,
                                           TRAPS) {
-  if (EnableMethodHandles && MethodHandles::enabled() &&
+  if (EnableMethodHandles &&
       klass() == SystemDictionary::MethodHandle_klass() &&
       methodOopDesc::is_method_handle_invoke_name(name())) {
+    if (!MethodHandles::enabled()) {
+      // Make sure the Java part of the runtime has been booted up.
+      klassOop natives = SystemDictionary::MethodHandleNatives_klass();
+      if (natives == NULL || instanceKlass::cast(natives)->is_not_initialized()) {
+        SystemDictionary::resolve_or_fail(vmSymbolHandles::sun_dyn_MethodHandleNatives(),
+                                          Handle(),
+                                          Handle(),
+                                          true,
+                                          CHECK);
+      }
+    }
     methodOop result_oop = SystemDictionary::find_method_handle_invoke(name,
                                                                        signature,
                                                                        current_klass,
@@ -1065,7 +1085,7 @@
   if (resolved_method.is_null()) {
     THROW(vmSymbols::java_lang_InternalError());
   }
-  result.set_virtual(resolved_klass, KlassHandle(), resolved_method, resolved_method, resolved_method->vtable_index(), CHECK);
+  result.set_dynamic(resolved_method, CHECK);
 }
 
 //------------------------------------------------------------------------------------------------------------------------
--- a/src/share/vm/interpreter/linkResolver.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/interpreter/linkResolver.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -73,6 +73,7 @@
   void         set_static(   KlassHandle resolved_klass,                             methodHandle resolved_method                                                , TRAPS);
   void         set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method                  , TRAPS);
   void         set_virtual(  KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS);
+  void         set_dynamic(                                                          methodHandle resolved_method,                                                 TRAPS);
   void         set_common(   KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS);
 
   friend class LinkResolver;
--- a/src/share/vm/interpreter/rewriter.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/interpreter/rewriter.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -32,12 +32,17 @@
 void Rewriter::compute_index_maps() {
   const int length  = _pool->length();
   init_cp_map(length);
+  jint tag_mask = 0;
   for (int i = 0; i < length; i++) {
     int tag = _pool->tag_at(i).value();
+    tag_mask |= (1 << tag);
     switch (tag) {
       case JVM_CONSTANT_InterfaceMethodref:
       case JVM_CONSTANT_Fieldref          : // fall through
       case JVM_CONSTANT_Methodref         : // fall through
+      case JVM_CONSTANT_MethodHandle      : // fall through
+      case JVM_CONSTANT_MethodType        : // fall through
+      case JVM_CONSTANT_InvokeDynamic     : // fall through
         add_cp_cache_entry(i);
         break;
     }
@@ -45,6 +50,8 @@
 
   guarantee((int)_cp_cache_map.length()-1 <= (int)((u2)-1),
             "all cp cache indexes fit in a u2");
+
+  _have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0);
 }
 
 
@@ -57,6 +64,28 @@
   constantPoolCacheOop cache =
       oopFactory::new_constantPoolCache(length, methodOopDesc::IsUnsafeConc, CHECK);
   cache->initialize(_cp_cache_map);
+
+  // Don't bother to the next pass if there is no JVM_CONSTANT_InvokeDynamic.
+  if (_have_invoke_dynamic) {
+    for (int i = 0; i < length; i++) {
+      int pool_index = cp_cache_entry_pool_index(i);
+      if (pool_index >= 0 &&
+          _pool->tag_at(pool_index).is_invoke_dynamic()) {
+        int bsm_index = _pool->invoke_dynamic_bootstrap_method_ref_index_at(pool_index);
+        if (bsm_index != 0) {
+          assert(_pool->tag_at(bsm_index).is_method_handle(), "must be a MH constant");
+          // There is a CP cache entry holding the BSM for these calls.
+          int bsm_cache_index = cp_entry_to_cp_cache(bsm_index);
+          cache->entry_at(i)->initialize_bootstrap_method_index_in_cache(bsm_cache_index);
+        } else {
+          // There is no CP cache entry holding the BSM for these calls.
+          // We will need to look for a class-global BSM, later.
+          guarantee(AllowTransitionalJSR292, "");
+        }
+      }
+    }
+  }
+
   _pool->set_cache(cache);
   cache->set_constant_pool(_pool());
 }
@@ -131,6 +160,27 @@
 }
 
 
+// Rewrite some ldc bytecodes to _fast_aldc
+void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide) {
+  assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "");
+  address p = bcp + offset;
+  int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
+  constantTag tag = _pool->tag_at(cp_index).value();
+  if (tag.is_method_handle() || tag.is_method_type()) {
+    int cache_index = cp_entry_to_cp_cache(cp_index);
+    if (is_wide) {
+      (*bcp) = Bytecodes::_fast_aldc_w;
+      assert(cache_index == (u2)cache_index, "");
+      Bytes::put_native_u2(p, cache_index);
+    } else {
+      (*bcp) = Bytecodes::_fast_aldc;
+      assert(cache_index == (u1)cache_index, "");
+      (*p) = (u1)cache_index;
+    }
+  }
+}
+
+
 // Rewrites a method given the index_map information
 void Rewriter::scan_method(methodOop method) {
 
@@ -198,6 +248,12 @@
         case Bytecodes::_invokedynamic:
           rewrite_invokedynamic(bcp, prefix_length+1);
           break;
+        case Bytecodes::_ldc:
+          maybe_rewrite_ldc(bcp, prefix_length+1, false);
+          break;
+        case Bytecodes::_ldc_w:
+          maybe_rewrite_ldc(bcp, prefix_length+1, true);
+          break;
         case Bytecodes::_jsr            : // fall through
         case Bytecodes::_jsr_w          : nof_jsrs++;                   break;
         case Bytecodes::_monitorenter   : // fall through
--- a/src/share/vm/interpreter/rewriter.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/interpreter/rewriter.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -32,6 +32,7 @@
   objArrayHandle      _methods;
   intArray            _cp_map;
   intStack            _cp_cache_map;
+  bool                _have_invoke_dynamic;
 
   void init_cp_map(int length) {
     _cp_map.initialize(length, -1);
@@ -56,6 +57,22 @@
     return cache_index;
   }
 
+  // Access the contents of _cp_cache_map to determine CP cache layout.
+  int cp_cache_entry_pool_index(int cache_index) {
+    int cp_index = _cp_cache_map[cache_index];
+    if ((cp_index & _secondary_entry_tag) != 0)
+      return -1;
+    else
+      return cp_index;
+  }
+  int cp_cache_secondary_entry_main_index(int cache_index) {
+    int cp_index = _cp_cache_map[cache_index];
+    if ((cp_index & _secondary_entry_tag) == 0)
+      return -1;
+    else
+      return (cp_index - _secondary_entry_tag);
+  }
+
   // All the work goes in here:
   Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS);
 
@@ -66,6 +83,7 @@
   void rewrite_Object_init(methodHandle m, TRAPS);
   void rewrite_member_reference(address bcp, int offset);
   void rewrite_invokedynamic(address bcp, int offset);
+  void maybe_rewrite_ldc(address bcp, int offset, bool is_wide);
 
  public:
   // Driver routine:
--- a/src/share/vm/interpreter/templateTable.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/interpreter/templateTable.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -507,6 +507,9 @@
   def(Bytecodes::_fast_linearswitch   , ubcp|disp|____|____, itos, vtos, fast_linearswitch   ,  _           );
   def(Bytecodes::_fast_binaryswitch   , ubcp|disp|____|____, itos, vtos, fast_binaryswitch   ,  _           );
 
+  def(Bytecodes::_fast_aldc           , ubcp|____|clvm|____, vtos, atos, fast_aldc           ,  false       );
+  def(Bytecodes::_fast_aldc_w         , ubcp|____|clvm|____, vtos, atos, fast_aldc           ,  true        );
+
   def(Bytecodes::_return_register_finalizer , ____|disp|clvm|____, vtos, vtos, _return       ,  vtos        );
 
   def(Bytecodes::_shouldnotreachhere   , ____|____|____|____, vtos, vtos, shouldnotreachhere ,  _           );
--- a/src/share/vm/interpreter/templateTable.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/interpreter/templateTable.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -123,6 +123,7 @@
   static void sipush();
   static void ldc(bool wide);
   static void ldc2_w();
+  static void fast_aldc(bool wide);
 
   static void locals_index(Register reg, int offset = 1);
   static void iload();
--- a/src/share/vm/memory/cardTableModRefBS.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/memory/cardTableModRefBS.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -284,12 +284,19 @@
         committed_unique_to_self(ind, MemRegion(new_end_aligned,
                                                 cur_committed.end()));
       if (!uncommit_region.is_empty()) {
-        if (!os::uncommit_memory((char*)uncommit_region.start(),
-                                 uncommit_region.byte_size())) {
-          assert(false, "Card table contraction failed");
-          // The call failed so don't change the end of the
-          // committed region.  This is better than taking the
-          // VM down.
+        // It is not safe to uncommit cards if the boundary between
+        // the generations is moving.  A shrink can uncommit cards
+        // owned by generation A but being used by generation B.
+        if (!UseAdaptiveGCBoundary) {
+          if (!os::uncommit_memory((char*)uncommit_region.start(),
+                                   uncommit_region.byte_size())) {
+            assert(false, "Card table contraction failed");
+            // The call failed so don't change the end of the
+            // committed region.  This is better than taking the
+            // VM down.
+            new_end_aligned = _committed[ind].end();
+          }
+        } else {
           new_end_aligned = _committed[ind].end();
         }
       }
@@ -297,6 +304,19 @@
     // In any case, we can reset the end of the current committed entry.
     _committed[ind].set_end(new_end_aligned);
 
+#ifdef ASSERT
+    // Check that the last card in the new region is committed according
+    // to the tables.
+    bool covered = false;
+    for (int cr = 0; cr < _cur_covered_regions; cr++) {
+      if (_committed[cr].contains(new_end - 1)) {
+        covered = true;
+        break;
+      }
+    }
+    assert(covered, "Card for end of new region not committed");
+#endif
+
     // The default of 0 is not necessarily clean cards.
     jbyte* entry;
     if (old_region.last() < _whole_heap.start()) {
@@ -354,6 +374,9 @@
                   addr_for((jbyte*) _committed[ind].start()),
                   addr_for((jbyte*) _committed[ind].last()));
   }
+  // Touch the last card of the covered region to show that it
+  // is committed (or SEGV).
+  debug_only(*byte_for(_covered[ind].last());)
   debug_only(verify_guard();)
 }
 
--- a/src/share/vm/memory/genCollectedHeap.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -179,9 +179,14 @@
     }
     n_covered_regions += _gen_specs[i]->n_covered_regions();
   }
-  assert(total_reserved % pageSize == 0, "Gen size");
+  assert(total_reserved % pageSize == 0,
+         err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
+                 SIZE_FORMAT, total_reserved, pageSize));
   total_reserved += perm_gen_spec->max_size();
-  assert(total_reserved % pageSize == 0, "Perm Gen size");
+  assert(total_reserved % pageSize == 0,
+         err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize="
+                 SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved,
+                 pageSize, perm_gen_spec->max_size()));
 
   if (total_reserved < perm_gen_spec->max_size()) {
     vm_exit_during_initialization(overflow_msg);
--- a/src/share/vm/oops/constantPoolKlass.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/oops/constantPoolKlass.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -372,6 +372,17 @@
         entry->print_value_on(st);
         }
         break;
+      case JVM_CONSTANT_MethodHandle :
+        st->print("ref_kind=%d", cp->method_handle_ref_kind_at(index));
+        st->print(" ref_index=%d", cp->method_handle_index_at(index));
+        break;
+      case JVM_CONSTANT_MethodType :
+        st->print("signature_index=%d", cp->method_type_index_at(index));
+        break;
+      case JVM_CONSTANT_InvokeDynamic :
+        st->print("bootstrap_method_index=%d", cp->invoke_dynamic_bootstrap_method_ref_index_at(index));
+        st->print(" name_and_type_index=%d", cp->invoke_dynamic_name_and_type_ref_index_at(index));
+        break;
       default:
         ShouldNotReachHere();
         break;
@@ -437,6 +448,7 @@
           // can be non-perm, can be non-instance (array)
         }
       }
+      // FIXME: verify JSR 292 tags JVM_CONSTANT_MethodHandle, etc.
       base++;
     }
     guarantee(cp->tags()->is_perm(),         "should be in permspace");
--- a/src/share/vm/oops/constantPoolOop.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/oops/constantPoolOop.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -264,10 +264,15 @@
 int constantPoolOopDesc::impl_name_and_type_ref_index_at(int which, bool uncached) {
   int i = which;
   if (!uncached && cache() != NULL) {
-    if (constantPoolCacheOopDesc::is_secondary_index(which))
+    if (constantPoolCacheOopDesc::is_secondary_index(which)) {
       // Invokedynamic indexes are always processed in native order
       // so there is no question of reading a native u2 in Java order here.
-      return cache()->main_entry_at(which)->constant_pool_index();
+      int pool_index = cache()->main_entry_at(which)->constant_pool_index();
+      if (tag_at(pool_index).is_invoke_dynamic())
+        pool_index = invoke_dynamic_name_and_type_ref_index_at(pool_index);
+      assert(tag_at(pool_index).is_name_and_type(), "");
+      return pool_index;
+    }
     // change byte-ordering and go via cache
     i = remap_instruction_operand_from_cache(which);
   } else {
@@ -358,6 +363,11 @@
   return klass_at_noresolve(ref_index);
 }
 
+symbolOop constantPoolOopDesc::uncached_klass_ref_at_noresolve(int which) {
+  jint ref_index = uncached_klass_ref_index_at(which);
+  return klass_at_noresolve(ref_index);
+}
+
 char* constantPoolOopDesc::string_at_noresolve(int which) {
   // Test entry type in case string is resolved while in here.
   oop entry = *(obj_at_addr(which));
@@ -384,6 +394,119 @@
   }
 }
 
+oop constantPoolOopDesc::resolve_constant_at_impl(constantPoolHandle this_oop, int index, int cache_index, TRAPS) {
+  oop result_oop = NULL;
+  if (cache_index >= 0) {
+    assert(index < 0, "only one kind of index at a time");
+    ConstantPoolCacheEntry* cpc_entry = this_oop->cache()->entry_at(cache_index);
+    result_oop = cpc_entry->f1();
+    if (result_oop != NULL) {
+      return result_oop;  // that was easy...
+    }
+    index = cpc_entry->constant_pool_index();
+  }
+
+  int tag_value = this_oop->tag_at(index).value();
+  switch (tag_value) {
+
+  case JVM_CONSTANT_UnresolvedClass:
+  case JVM_CONSTANT_UnresolvedClassInError:
+  case JVM_CONSTANT_Class:
+    {
+      klassOop resolved = klass_at_impl(this_oop, index, CHECK_NULL);
+      // ldc wants the java mirror.
+      result_oop = resolved->klass_part()->java_mirror();
+      break;
+    }
+
+  case JVM_CONSTANT_String:
+  case JVM_CONSTANT_UnresolvedString:
+    if (this_oop->is_pseudo_string_at(index)) {
+      result_oop = this_oop->pseudo_string_at(index);
+      break;
+    }
+    result_oop = string_at_impl(this_oop, index, CHECK_NULL);
+    break;
+
+  case JVM_CONSTANT_Object:
+    result_oop = this_oop->object_at(index);
+    break;
+
+  case JVM_CONSTANT_MethodHandle:
+    {
+      int ref_kind                 = this_oop->method_handle_ref_kind_at(index);
+      int callee_index             = this_oop->method_handle_klass_index_at(index);
+      symbolHandle name(THREAD,      this_oop->method_handle_name_ref_at(index));
+      symbolHandle signature(THREAD, this_oop->method_handle_signature_ref_at(index));
+      if (PrintMiscellaneous)
+        tty->print_cr("resolve JVM_CONSTANT_MethodHandle:%d [%d/%d/%d] %s.%s",
+                      ref_kind, index, this_oop->method_handle_index_at(index),
+                      callee_index, name->as_C_string(), signature->as_C_string());
+      KlassHandle callee;
+      { klassOop k = klass_at_impl(this_oop, callee_index, CHECK_NULL);
+        callee = KlassHandle(THREAD, k);
+      }
+      KlassHandle klass(THREAD, this_oop->pool_holder());
+      Handle value = SystemDictionary::link_method_handle_constant(klass, ref_kind,
+                                                                   callee, name, signature,
+                                                                   CHECK_NULL);
+      result_oop = value();
+      // FIXME: Uniquify errors, using SystemDictionary::find_resolution_error.
+      break;
+    }
+
+  case JVM_CONSTANT_MethodType:
+    {
+      symbolHandle signature(THREAD, this_oop->method_type_signature_at(index));
+      if (PrintMiscellaneous)
+        tty->print_cr("resolve JVM_CONSTANT_MethodType [%d/%d] %s",
+                      index, this_oop->method_type_index_at(index),
+                      signature->as_C_string());
+      KlassHandle klass(THREAD, this_oop->pool_holder());
+      bool ignore_is_on_bcp = false;
+      Handle value = SystemDictionary::find_method_handle_type(signature,
+                                                               klass,
+                                                               ignore_is_on_bcp,
+                                                               CHECK_NULL);
+      result_oop = value();
+      // FIXME: Uniquify errors, using SystemDictionary::find_resolution_error.
+      break;
+    }
+
+    /* maybe some day
+  case JVM_CONSTANT_Integer:
+  case JVM_CONSTANT_Float:
+  case JVM_CONSTANT_Long:
+  case JVM_CONSTANT_Double:
+    result_oop = java_lang_boxing_object::create(...);
+    break;
+    */
+
+  default:
+    DEBUG_ONLY( tty->print_cr("*** %p: tag at CP[%d/%d] = %d",
+                              this_oop(), index, cache_index, tag_value) );
+    assert(false, "unexpected constant tag");
+    break;
+  }
+
+  if (cache_index >= 0) {
+    // Cache the oop here also.
+    Handle result(THREAD, result_oop);
+    result_oop = NULL;  // safety
+    ObjectLocker ol(this_oop, THREAD);
+    ConstantPoolCacheEntry* cpc_entry = this_oop->cache()->entry_at(cache_index);
+    oop result_oop2 = cpc_entry->f1();
+    if (result_oop2 != NULL) {
+      // Race condition:  May already be filled in while we were trying to lock.
+      return result_oop2;
+    }
+    cpc_entry->set_f1(result());
+    return result();
+  } else {
+    return result_oop;
+  }
+}
+
 oop constantPoolOopDesc::string_at_impl(constantPoolHandle this_oop, int which, TRAPS) {
   oop entry = *(this_oop->obj_at_addr(which));
   if (entry->is_symbol()) {
@@ -690,6 +813,41 @@
     }
   } break;
 
+  case JVM_CONSTANT_MethodType:
+  {
+    int k1 = method_type_index_at(index1);
+    int k2 = cp2->method_type_index_at(index2);
+    if (k1 == k2) {
+      return true;
+    }
+  } break;
+
+  case JVM_CONSTANT_MethodHandle:
+  {
+    int k1 = method_handle_ref_kind_at(index1);
+    int k2 = cp2->method_handle_ref_kind_at(index2);
+    if (k1 == k2) {
+      int i1 = method_handle_index_at(index1);
+      int i2 = cp2->method_handle_index_at(index2);
+      if (i1 == i2) {
+        return true;
+      }
+    }
+  } break;
+
+  case JVM_CONSTANT_InvokeDynamic:
+  {
+    int k1 = invoke_dynamic_bootstrap_method_ref_index_at(index1);
+    int k2 = cp2->invoke_dynamic_bootstrap_method_ref_index_at(index2);
+    if (k1 == k2) {
+      int i1 = invoke_dynamic_name_and_type_ref_index_at(index1);
+      int i2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2);
+      if (i1 == i2) {
+        return true;
+      }
+    }
+  } break;
+
   case JVM_CONSTANT_UnresolvedString:
   {
     symbolOop s1 = unresolved_string_at(index1);
@@ -863,6 +1021,26 @@
     to_cp->symbol_at_put(to_i, s);
   } break;
 
+  case JVM_CONSTANT_MethodType:
+  {
+    jint k = method_type_index_at(from_i);
+    to_cp->method_type_index_at_put(to_i, k);
+  } break;
+
+  case JVM_CONSTANT_MethodHandle:
+  {
+    int k1 = method_handle_ref_kind_at(from_i);
+    int k2 = method_handle_index_at(from_i);
+    to_cp->method_handle_index_at_put(to_i, k1, k2);
+  } break;
+
+  case JVM_CONSTANT_InvokeDynamic:
+  {
+    int k1 = invoke_dynamic_bootstrap_method_ref_index_at(from_i);
+    int k2 = invoke_dynamic_name_and_type_ref_index_at(from_i);
+    to_cp->invoke_dynamic_at_put(to_i, k1, k2);
+  } break;
+
   // Invalid is used as the tag for the second constant pool entry
   // occupied by JVM_CONSTANT_Double or JVM_CONSTANT_Long. It should
   // not be seen by itself.
@@ -1066,14 +1244,19 @@
     case JVM_CONSTANT_UnresolvedClassInError:
     case JVM_CONSTANT_StringIndex:
     case JVM_CONSTANT_UnresolvedString:
+    case JVM_CONSTANT_MethodType:
       return 3;
 
+    case JVM_CONSTANT_MethodHandle:
+      return 4; //tag, ref_kind, ref_index
+
     case JVM_CONSTANT_Integer:
     case JVM_CONSTANT_Float:
     case JVM_CONSTANT_Fieldref:
     case JVM_CONSTANT_Methodref:
     case JVM_CONSTANT_InterfaceMethodref:
     case JVM_CONSTANT_NameAndType:
+    case JVM_CONSTANT_InvokeDynamic:
       return 5;
 
     case JVM_CONSTANT_Long:
@@ -1271,6 +1454,31 @@
         DBG(printf("JVM_CONSTANT_StringIndex: %hd", idx1));
         break;
       }
+      case JVM_CONSTANT_MethodHandle: {
+        *bytes = JVM_CONSTANT_MethodHandle;
+        int kind = method_handle_ref_kind_at(idx);
+        idx1 = method_handle_index_at(idx);
+        *(bytes+1) = (unsigned char) kind;
+        Bytes::put_Java_u2((address) (bytes+2), idx1);
+        DBG(printf("JVM_CONSTANT_MethodHandle: %d %hd", kind, idx1));
+        break;
+      }
+      case JVM_CONSTANT_MethodType: {
+        *bytes = JVM_CONSTANT_MethodType;
+        idx1 = method_type_index_at(idx);
+        Bytes::put_Java_u2((address) (bytes+1), idx1);
+        DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1));
+        break;
+      }
+      case JVM_CONSTANT_InvokeDynamic: {
+        *bytes = JVM_CONSTANT_InvokeDynamic;
+        idx1 = invoke_dynamic_bootstrap_method_ref_index_at(idx);
+        idx2 = invoke_dynamic_name_and_type_ref_index_at(idx);
+        Bytes::put_Java_u2((address) (bytes+1), idx1);
+        Bytes::put_Java_u2((address) (bytes+3), idx2);
+        DBG(printf("JVM_CONSTANT_InvokeDynamic: %hd %hd", idx1, idx2));
+        break;
+      }
     }
     DBG(printf("\n"));
     bytes += ent_size;
--- a/src/share/vm/oops/constantPoolOop.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/oops/constantPoolOop.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -146,6 +146,21 @@
     oop_store_without_check(obj_at_addr(which), oop(s));
   }
 
+  void method_handle_index_at_put(int which, int ref_kind, int ref_index) {
+    tag_at_put(which, JVM_CONSTANT_MethodHandle);
+    *int_at_addr(which) = ((jint) ref_index<<16) | ref_kind;
+  }
+
+  void method_type_index_at_put(int which, int ref_index) {
+    tag_at_put(which, JVM_CONSTANT_MethodType);
+    *int_at_addr(which) = ref_index;
+  }
+
+  void invoke_dynamic_at_put(int which, int bootstrap_method_index, int name_and_type_index) {
+    tag_at_put(which, JVM_CONSTANT_InvokeDynamic);
+    *int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_method_index;
+  }
+
   // Temporary until actual use
   void unresolved_string_at_put(int which, symbolOop s) {
     *obj_at_addr(which) = NULL;
@@ -357,6 +372,46 @@
     return *int_at_addr(which);
   }
 
+  int method_handle_ref_kind_at(int which) {
+    assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
+    return extract_low_short_from_int(*int_at_addr(which));  // mask out unwanted ref_index bits
+  }
+  int method_handle_index_at(int which) {
+    assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
+    return extract_high_short_from_int(*int_at_addr(which));  // shift out unwanted ref_kind bits
+  }
+  int method_type_index_at(int which) {
+    assert(tag_at(which).is_method_type(), "Corrupted constant pool");
+    return *int_at_addr(which);
+  }
+  // Derived queries:
+  symbolOop method_handle_name_ref_at(int which) {
+    int member = method_handle_index_at(which);
+    return impl_name_ref_at(member, true);
+  }
+  symbolOop method_handle_signature_ref_at(int which) {
+    int member = method_handle_index_at(which);
+    return impl_signature_ref_at(member, true);
+  }
+  int method_handle_klass_index_at(int which) {
+    int member = method_handle_index_at(which);
+    return impl_klass_ref_index_at(member, true);
+  }
+  symbolOop method_type_signature_at(int which) {
+    int sym = method_type_index_at(which);
+    return symbol_at(sym);
+  }
+  int invoke_dynamic_bootstrap_method_ref_index_at(int which) {
+    assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
+    jint ref_index = *int_at_addr(which);
+    return extract_low_short_from_int(ref_index);
+  }
+  int invoke_dynamic_name_and_type_ref_index_at(int which) {
+    assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
+    jint ref_index = *int_at_addr(which);
+    return extract_high_short_from_int(ref_index);
+  }
+
   // The following methods (name/signature/klass_ref_at, klass_ref_at_noresolve,
   // name_and_type_ref_index_at) all expect to be passed indices obtained
   // directly from the bytecode, and extracted according to java byte order.
@@ -388,6 +443,17 @@
     resolve_string_constants_impl(h_this, CHECK);
   }
 
+  // Resolve late bound constants.
+  oop resolve_constant_at(int index, TRAPS) {
+    constantPoolHandle h_this(THREAD, this);
+    return resolve_constant_at_impl(h_this, index, -1, THREAD);
+  }
+
+  oop resolve_cached_constant_at(int cache_index, TRAPS) {
+    constantPoolHandle h_this(THREAD, this);
+    return resolve_constant_at_impl(h_this, -1, cache_index, THREAD);
+  }
+
   // Klass name matches name at offset
   bool klass_name_at_matches(instanceKlassHandle k, int which);
 
@@ -420,6 +486,7 @@
   // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the
   // future by other Java code. These take constant pool indices rather than possibly-byte-swapped
   // constant pool cache indices as do the peer methods above.
+  symbolOop uncached_klass_ref_at_noresolve(int which);
   symbolOop uncached_name_ref_at(int which)                 { return impl_name_ref_at(which, true); }
   symbolOop uncached_signature_ref_at(int which)            { return impl_signature_ref_at(which, true); }
   int       uncached_klass_ref_index_at(int which)          { return impl_klass_ref_index_at(which, true); }
@@ -436,6 +503,8 @@
 
 #ifdef ASSERT
   enum { CPCACHE_INDEX_TAG = 0x10000 };  // helps keep CP cache indices distinct from CP indices
+#else
+  enum { CPCACHE_INDEX_TAG = 0 };        // in product mode, this zero value is a no-op
 #endif //ASSERT
 
  private:
@@ -469,6 +538,8 @@
   // Resolve string constants (to prevent allocation during compilation)
   static void resolve_string_constants_impl(constantPoolHandle this_oop, TRAPS);
 
+  static oop resolve_constant_at_impl(constantPoolHandle this_oop, int index, int cache_index, TRAPS);
+
  public:
   // Merging constantPoolOop support:
   bool compare_entry_to(int index1, constantPoolHandle cp2, int index2, TRAPS);
--- a/src/share/vm/oops/cpCacheOop.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/oops/cpCacheOop.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -134,7 +134,7 @@
 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
                                         methodHandle method,
                                         int vtable_index) {
-
+  assert(!is_secondary_entry(), "");
   assert(method->interpreter_entry() != NULL, "should have been set at this point");
   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
   bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface);
@@ -142,7 +142,6 @@
   int byte_no = -1;
   bool needs_vfinal_flag = false;
   switch (invoke_code) {
-    case Bytecodes::_invokedynamic:
     case Bytecodes::_invokevirtual:
     case Bytecodes::_invokeinterface: {
         if (method->can_be_statically_bound()) {
@@ -155,6 +154,23 @@
         byte_no = 2;
         break;
     }
+
+    case Bytecodes::_invokedynamic:  // similar to _invokevirtual
+      if (TraceInvokeDynamic) {
+        tty->print_cr("InvokeDynamic set_method%s method="PTR_FORMAT" index=%d",
+                      (is_secondary_entry() ? " secondary" : ""),
+                      (intptr_t)method(), vtable_index);
+        method->print();
+        this->print(tty, 0);
+      }
+      assert(method->can_be_statically_bound(), "must be a MH invoker method");
+      assert(AllowTransitionalJSR292 || _f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized");
+      set_f1(method());
+      needs_vfinal_flag = false;  // _f2 is not an oop
+      assert(!is_vfinal(), "f2 not an oop");
+      byte_no = 1;  // coordinate this with bytecode_number & is_resolved
+      break;
+
     case Bytecodes::_invokespecial:
       // Preserve the value of the vfinal flag on invokevirtual bytecode
       // which may be shared with this constant pool cache entry.
@@ -209,6 +225,7 @@
 
 
 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
+  assert(!is_secondary_entry(), "");
   klassOop interf = method->method_holder();
   assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
   set_f1(interf);
@@ -218,8 +235,23 @@
 }
 
 
+void ConstantPoolCacheEntry::initialize_bootstrap_method_index_in_cache(int bsm_cache_index) {
+  assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
+  assert(_f2 == 0, "initialize once");
+  assert(bsm_cache_index == (int)(u2)bsm_cache_index, "oob");
+  set_f2(bsm_cache_index + constantPoolOopDesc::CPCACHE_INDEX_TAG);
+}
+
+int ConstantPoolCacheEntry::bootstrap_method_index_in_cache() {
+  assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
+  intptr_t bsm_cache_index = (intptr_t) _f2 - constantPoolOopDesc::CPCACHE_INDEX_TAG;
+  assert(bsm_cache_index == (intptr_t)(u2)bsm_cache_index, "oob");
+  return (int) bsm_cache_index;
+}
+
 void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site,
                                               methodHandle signature_invoker) {
+  assert(is_secondary_entry(), "");
   int param_size = signature_invoker->size_of_parameters();
   assert(param_size >= 1, "method argument size must include MH.this");
   param_size -= 1;              // do not count MH.this; it is not stacked for invokedynamic
@@ -227,7 +259,6 @@
     // racing threads might be trying to install their own favorites
     set_f1(call_site());
   }
-  //set_f2(0);
   bool is_final = true;
   assert(signature_invoker->is_final_method(), "is_final");
   set_flags(as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size);
@@ -417,14 +448,14 @@
   // print separator
   if (index == 0) tty->print_cr("                 -------------");
   // print entry
-  tty->print_cr("%3d  (%08x)  ", index, this);
+  tty->print("%3d  ("PTR_FORMAT")  ", index, (intptr_t)this);
   if (is_secondary_entry())
     tty->print_cr("[%5d|secondary]", main_entry_index());
   else
     tty->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
-  tty->print_cr("                 [   %08x]", (address)(oop)_f1);
-  tty->print_cr("                 [   %08x]", _f2);
-  tty->print_cr("                 [   %08x]", _flags);
+  tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)(oop)_f1);
+  tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_f2);
+  tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_flags);
   tty->print_cr("                 -------------");
 }
 
--- a/src/share/vm/oops/cpCacheOop.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/oops/cpCacheOop.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -110,6 +110,7 @@
 class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
   friend class VMStructs;
   friend class constantPoolCacheKlass;
+  friend class constantPoolOopDesc;  //resolve_constant_at_impl => set_f1
 
  private:
   volatile intx     _indices;  // constant pool index & rewrite bytecodes
@@ -184,6 +185,10 @@
     methodHandle signature_invoker               // determines signature information
   );
 
+  // For JVM_CONSTANT_InvokeDynamic cache entries:
+  void initialize_bootstrap_method_index_in_cache(int bsm_cache_index);
+  int  bootstrap_method_index_in_cache();
+
   void set_parameter_size(int value) {
     assert(parameter_size() == 0 || parameter_size() == value,
            "size must not change");
@@ -206,6 +211,7 @@
       case Bytecodes::_getfield        :    // fall through
       case Bytecodes::_invokespecial   :    // fall through
       case Bytecodes::_invokestatic    :    // fall through
+      case Bytecodes::_invokedynamic   :    // fall through
       case Bytecodes::_invokeinterface : return 1;
       case Bytecodes::_putstatic       :    // fall through
       case Bytecodes::_putfield        :    // fall through
@@ -233,6 +239,7 @@
   Bytecodes::Code bytecode_1() const             { return Bytecodes::cast((_indices >> 16) & 0xFF); }
   Bytecodes::Code bytecode_2() const             { return Bytecodes::cast((_indices >> 24) & 0xFF); }
   volatile oop  f1() const                       { return _f1; }
+  bool is_f1_null() const                        { return (oop)_f1 == NULL; }  // classifies a CPC entry as unbound
   intx f2() const                                { return _f2; }
   int  field_index() const;
   int  parameter_size() const                    { return _flags & 0xFF; }
--- a/src/share/vm/oops/methodOop.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/oops/methodOop.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -851,9 +851,15 @@
 // MethodHandleCompiler.
 // Must be consistent with MethodHandleCompiler::get_method_oop().
 bool methodOopDesc::is_method_handle_adapter() const {
-  return (is_method_handle_invoke_name(name()) &&
-          is_synthetic() &&
-          MethodHandleCompiler::klass_is_method_handle_adapter_holder(method_holder()));
+  if (is_synthetic() &&
+      !is_native() &&   // has code from MethodHandleCompiler
+      is_method_handle_invoke_name(name()) &&
+      MethodHandleCompiler::klass_is_method_handle_adapter_holder(method_holder())) {
+    assert(!is_method_handle_invoke(), "disjoint");
+    return true;
+  } else {
+    return false;
+  }
 }
 
 methodHandle methodOopDesc::make_invoke_method(KlassHandle holder,
--- a/src/share/vm/opto/callnode.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/callnode.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1524,7 +1524,7 @@
     ConnectionGraph *cgr = phase->C->congraph();
     PointsToNode::EscapeState es = PointsToNode::GlobalEscape;
     if (cgr != NULL)
-      es = cgr->escape_state(obj_node(), phase);
+      es = cgr->escape_state(obj_node());
     if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
       // Mark it eliminated to update any counters
       this->set_eliminated();
@@ -1627,7 +1627,7 @@
     ConnectionGraph *cgr = phase->C->congraph();
     PointsToNode::EscapeState es = PointsToNode::GlobalEscape;
     if (cgr != NULL)
-      es = cgr->escape_state(obj_node(), phase);
+      es = cgr->escape_state(obj_node());
     if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
       // Mark it eliminated to update any counters
       this->set_eliminated();
--- a/src/share/vm/opto/cfgnode.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/cfgnode.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -472,9 +472,7 @@
             assert( n->req() == 2 &&  n->in(1) != NULL, "Only one data input expected" );
             // Break dead loop data path.
             // Eagerly replace phis with top to avoid phis copies generation.
-            igvn->add_users_to_worklist(n);
-            igvn->hash_delete(n); // Yank from hash before hacking edges
-            igvn->subsume_node(n, top);
+            igvn->replace_node(n, top);
             if( max != outcnt() ) {
               progress = true;
               j = refresh_out_pos(j);
@@ -518,18 +516,17 @@
         igvn->hash_delete(n); // Remove from worklist before modifying edges
         if( n->is_Phi() ) {   // Collapse all Phis
           // Eagerly replace phis to avoid copies generation.
-          igvn->add_users_to_worklist(n);
-          igvn->hash_delete(n); // Yank from hash before hacking edges
+          Node* in;
           if( cnt == 0 ) {
             assert( n->req() == 1, "No data inputs expected" );
-            igvn->subsume_node(n, parent_ctrl); // replaced by top
+            in = parent_ctrl; // replaced by top
           } else {
             assert( n->req() == 2 &&  n->in(1) != NULL, "Only one data input expected" );
-            Node* in1 = n->in(1);               // replaced by unique input
-            if( n->as_Phi()->is_unsafe_data_reference(in1) )
-              in1 = phase->C->top();            // replaced by top
-            igvn->subsume_node(n, in1);
+            in = n->in(1);               // replaced by unique input
+            if( n->as_Phi()->is_unsafe_data_reference(in) )
+              in = phase->C->top();      // replaced by top
           }
+          igvn->replace_node(n, in);
         }
         else if( n->is_Region() ) { // Update all incoming edges
           assert( !igvn->eqv(n, this), "Must be removed from DefUse edges");
@@ -2127,7 +2124,7 @@
     // if it's not there, there's nothing to do.
     Node* fallthru = proj_out(0);
     if (fallthru != NULL) {
-      phase->is_IterGVN()->subsume_node(fallthru, in(0));
+      phase->is_IterGVN()->replace_node(fallthru, in(0));
     }
     return phase->C->top();
   }
--- a/src/share/vm/opto/compile.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/compile.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -637,34 +637,6 @@
   if (failing())  return;
   NOT_PRODUCT( verify_graph_edges(); )
 
-  // Perform escape analysis
-  if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
-    TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, true);
-    // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction.
-    PhaseGVN* igvn = initial_gvn();
-    Node* oop_null = igvn->zerocon(T_OBJECT);
-    Node* noop_null = igvn->zerocon(T_NARROWOOP);
-
-    _congraph = new(comp_arena()) ConnectionGraph(this);
-    bool has_non_escaping_obj = _congraph->compute_escape();
-
-#ifndef PRODUCT
-    if (PrintEscapeAnalysis) {
-      _congraph->dump();
-    }
-#endif
-    // Cleanup.
-    if (oop_null->outcnt() == 0)
-      igvn->hash_delete(oop_null);
-    if (noop_null->outcnt() == 0)
-      igvn->hash_delete(noop_null);
-
-    if (!has_non_escaping_obj) {
-      _congraph = NULL;
-    }
-
-    if (failing())  return;
-  }
   // Now optimize
   Optimize();
   if (failing())  return;
@@ -1601,6 +1573,20 @@
 
   if (failing())  return;
 
+  // Perform escape analysis
+  if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
+    TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, true);
+    ConnectionGraph::do_analysis(this, &igvn);
+
+    if (failing())  return;
+
+    igvn.optimize();
+    print_method("Iter GVN 3", 2);
+
+    if (failing())  return;
+
+  }
+
   // Loop transforms on the ideal graph.  Range Check Elimination,
   // peeling, unrolling, etc.
 
@@ -2000,6 +1986,17 @@
     }
   }
 
+#ifdef ASSERT
+  if( n->is_Mem() ) {
+    Compile* C = Compile::current();
+    int alias_idx = C->get_alias_index(n->as_Mem()->adr_type());
+    assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw ||
+            // oop will be recorded in oop map if load crosses safepoint
+            n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
+                             LoadNode::is_immutable_value(n->in(MemNode::Address))),
+            "raw memory operations should have control edge");
+  }
+#endif
   // Count FPU ops and common calls, implements item (3)
   switch( nop ) {
   // Count all float operations that may use FPU
--- a/src/share/vm/opto/compile.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/compile.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -362,6 +362,7 @@
   Node*         macro_node(int idx)             { return _macro_nodes->at(idx); }
   Node*         predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);}
   ConnectionGraph* congraph()                   { return _congraph;}
+  void set_congraph(ConnectionGraph* congraph)  { _congraph = congraph;}
   void add_macro_node(Node * n) {
     //assert(n->is_macro(), "must be a macro node");
     assert(!_macro_nodes->contains(n), " duplicate entry in expand list");
--- a/src/share/vm/opto/doCall.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/doCall.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -343,7 +343,8 @@
   // being initialized.  Uncommon-trap for not-initialized static or
   // v-calls.  Let interface calls happen.
   ciInstanceKlass* holder_klass = dest_method->holder();
-  if (!holder_klass->is_initialized() &&
+  if (!holder_klass->is_being_initialized() &&
+      !holder_klass->is_initialized() &&
       !holder_klass->is_interface()) {
     uncommon_trap(Deoptimization::Reason_uninitialized,
                   Deoptimization::Action_reinterpret,
--- a/src/share/vm/opto/escape.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/escape.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -81,18 +81,18 @@
 }
 #endif
 
-ConnectionGraph::ConnectionGraph(Compile * C) :
+ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
   _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()),
   _processed(C->comp_arena()),
   _collecting(true),
   _compile(C),
+  _igvn(igvn),
   _node_map(C->comp_arena()) {
 
   _phantom_object = C->top()->_idx,
   add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true);
 
   // Add ConP(#NULL) and ConN(#NULL) nodes.
-  PhaseGVN* igvn = C->initial_gvn();
   Node* oop_null = igvn->zerocon(T_OBJECT);
   _oop_null = oop_null->_idx;
   assert(_oop_null < C->unique(), "should be created already");
@@ -182,7 +182,7 @@
     _processed.set(n->_idx);
 }
 
-PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n, PhaseTransform *phase) {
+PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n) {
   uint idx = n->_idx;
   PointsToNode::EscapeState es;
 
@@ -207,22 +207,26 @@
   if (n->uncast()->_idx >= nodes_size())
     return PointsToNode::UnknownEscape;
 
+  PointsToNode::EscapeState orig_es = es;
+
   // compute max escape state of anything this node could point to
   VectorSet ptset(Thread::current()->resource_area());
-  PointsTo(ptset, n, phase);
+  PointsTo(ptset, n);
   for(VectorSetI i(&ptset); i.test() && es != PointsToNode::GlobalEscape; ++i) {
     uint pt = i.elem;
     PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state();
     if (pes > es)
       es = pes;
   }
-  // cache the computed escape state
-  assert(es != PointsToNode::UnknownEscape, "should have computed an escape state");
-  ptnode_adr(idx)->set_escape_state(es);
+  if (orig_es != es) {
+    // cache the computed escape state
+    assert(es != PointsToNode::UnknownEscape, "should have computed an escape state");
+    ptnode_adr(idx)->set_escape_state(es);
+  } // orig_es could be PointsToNode::UnknownEscape
   return es;
 }
 
-void ConnectionGraph::PointsTo(VectorSet &ptset, Node * n, PhaseTransform *phase) {
+void ConnectionGraph::PointsTo(VectorSet &ptset, Node * n) {
   VectorSet visited(Thread::current()->resource_area());
   GrowableArray<uint>  worklist;
 
@@ -990,7 +994,7 @@
   GrowableArray<Node *>  memnode_worklist;
   GrowableArray<PhiNode *>  orig_phis;
 
-  PhaseGVN  *igvn = _compile->initial_gvn();
+  PhaseGVN  *igvn = _igvn;
   uint new_index_start = (uint) _compile->num_alias_types();
   Arena* arena = Thread::current()->resource_area();
   VectorSet visited(arena);
@@ -1012,7 +1016,7 @@
       CallNode *alloc = n->as_Call();
       // copy escape information to call node
       PointsToNode* ptn = ptnode_adr(alloc->_idx);
-      PointsToNode::EscapeState es = escape_state(alloc, igvn);
+      PointsToNode::EscapeState es = escape_state(alloc);
       // We have an allocation or call which returns a Java object,
       // see if it is unescaped.
       if (es != PointsToNode::NoEscape || !ptn->_scalar_replaceable)
@@ -1123,7 +1127,7 @@
       }
     } else if (n->is_AddP()) {
       ptset.Clear();
-      PointsTo(ptset, get_addp_base(n), igvn);
+      PointsTo(ptset, get_addp_base(n));
       assert(ptset.Size() == 1, "AddP address is unique");
       uint elem = ptset.getelem(); // Allocation node's index
       if (elem == _phantom_object) {
@@ -1143,7 +1147,7 @@
         continue;  // already processed
       }
       ptset.Clear();
-      PointsTo(ptset, n, igvn);
+      PointsTo(ptset, n);
       if (ptset.Size() == 1) {
         uint elem = ptset.getelem(); // Allocation node's index
         if (elem == _phantom_object) {
@@ -1478,6 +1482,26 @@
   return false;
 }
 
+void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
+  // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
+  // to create space for them in ConnectionGraph::_nodes[].
+  Node* oop_null = igvn->zerocon(T_OBJECT);
+  Node* noop_null = igvn->zerocon(T_NARROWOOP);
+
+  ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
+  // Perform escape analysis
+  if (congraph->compute_escape()) {
+    // There are non escaping objects.
+    C->set_congraph(congraph);
+  }
+
+  // Cleanup.
+  if (oop_null->outcnt() == 0)
+    igvn->hash_delete(oop_null);
+  if (noop_null->outcnt() == 0)
+    igvn->hash_delete(noop_null);
+}
+
 bool ConnectionGraph::compute_escape() {
   Compile* C = _compile;
 
@@ -1492,7 +1516,7 @@
   }
 
   GrowableArray<int> cg_worklist;
-  PhaseGVN* igvn = C->initial_gvn();
+  PhaseGVN* igvn = _igvn;
   bool has_allocations = false;
 
   // Push all useful nodes onto CG list and set their type.
@@ -1661,6 +1685,12 @@
   _collecting = false;
   assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
 
+#ifndef PRODUCT
+  if (PrintEscapeAnalysis) {
+    dump(); // Dump ConnectionGraph
+  }
+#endif
+
   bool has_scalar_replaceable_candidates = alloc_worklist.length() > 0;
   if ( has_scalar_replaceable_candidates &&
        C->AliasLevel() >= 3 && EliminateAllocations ) {
@@ -1671,10 +1701,6 @@
 
     if (C->failing())  return false;
 
-    // Clean up after split unique types.
-    ResourceMark rm;
-    PhaseRemoveUseless pru(C->initial_gvn(), C->for_igvn());
-
     C->print_method("After Escape Analysis", 2);
 
 #ifdef ASSERT
@@ -1711,7 +1737,7 @@
   int offset = ptn->offset();
   Node* base = get_addp_base(n);
   ptset.Clear();
-  PointsTo(ptset, base, phase);
+  PointsTo(ptset, base);
   int ptset_size = ptset.Size();
 
   // Check if a oop field's initializing value is recorded and add
@@ -1889,7 +1915,7 @@
             arg = get_addp_base(arg);
           }
           ptset.Clear();
-          PointsTo(ptset, arg, phase);
+          PointsTo(ptset, arg);
           for( VectorSetI j(&ptset); j.test(); ++j ) {
             uint pt = j.elem;
             set_escape_state(pt, PointsToNode::ArgEscape);
@@ -1934,7 +1960,7 @@
             }
 
             ptset.Clear();
-            PointsTo(ptset, arg, phase);
+            PointsTo(ptset, arg);
             for( VectorSetI j(&ptset); j.test(); ++j ) {
               uint pt = j.elem;
               if (global_escapes) {
@@ -1970,7 +1996,7 @@
           Node *arg = call->in(i)->uncast();
           set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
           ptset.Clear();
-          PointsTo(ptset, arg, phase);
+          PointsTo(ptset, arg);
           for( VectorSetI j(&ptset); j.test(); ++j ) {
             uint pt = j.elem;
             set_escape_state(pt, PointsToNode::GlobalEscape);
@@ -2433,7 +2459,7 @@
       Node *base = get_addp_base(n);
       // Create a field edge to this node from everything base could point to.
       VectorSet ptset(Thread::current()->resource_area());
-      PointsTo(ptset, base, phase);
+      PointsTo(ptset, base);
       for( VectorSetI i(&ptset); i.test(); ++i ) {
         uint pt = i.elem;
         add_field_edge(pt, n_idx, address_offset(n, phase));
@@ -2501,7 +2527,7 @@
       // For everything "adr_base" could point to, create a deferred edge from
       // this node to each field with the same offset.
       VectorSet ptset(Thread::current()->resource_area());
-      PointsTo(ptset, adr_base, phase);
+      PointsTo(ptset, adr_base);
       int offset = address_offset(adr, phase);
       for( VectorSetI i(&ptset); i.test(); ++i ) {
         uint pt = i.elem;
@@ -2594,7 +2620,7 @@
       // For everything "adr_base" could point to, create a deferred edge
       // to "val" from each field with the same offset.
       VectorSet ptset(Thread::current()->resource_area());
-      PointsTo(ptset, adr_base, phase);
+      PointsTo(ptset, adr_base);
       for( VectorSetI i(&ptset); i.test(); ++i ) {
         uint pt = i.elem;
         add_edge_from_fields(pt, val->_idx, address_offset(adr, phase));
@@ -2638,7 +2664,6 @@
 
 #ifndef PRODUCT
 void ConnectionGraph::dump() {
-  PhaseGVN  *igvn = _compile->initial_gvn();
   bool first = true;
 
   uint size = nodes_size();
@@ -2648,7 +2673,7 @@
 
     if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL)
       continue;
-    PointsToNode::EscapeState es = escape_state(ptn->_node, igvn);
+    PointsToNode::EscapeState es = escape_state(ptn->_node);
     if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
       if (first) {
         tty->cr();
--- a/src/share/vm/opto/escape.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/escape.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -227,6 +227,7 @@
   uint                     _noop_null; // ConN(#NULL)
 
   Compile *                  _compile; // Compile object for current compilation
+  PhaseIterGVN *                _igvn; // Value numbering
 
   // Address of an element in _nodes.  Used when the element is to be modified
   PointsToNode *ptnode_adr(uint idx) const {
@@ -257,7 +258,7 @@
   // walk the connection graph starting at the node corresponding to "n" and
   // add the index of everything it could point to, to "ptset".  This may cause
   // Phi's encountered to get (re)processed  (which requires "phase".)
-  void PointsTo(VectorSet &ptset, Node * n, PhaseTransform *phase);
+  void PointsTo(VectorSet &ptset, Node * n);
 
   //  Edge manipulation.  The "from_i" and "to_i" arguments are the
   //  node indices of the source and destination of the edge
@@ -310,7 +311,7 @@
   // Node:  This assumes that escape analysis is run before
   //        PhaseIterGVN creation
   void record_for_optimizer(Node *n) {
-    _compile->record_for_igvn(n);
+    _igvn->_worklist.push(n);
   }
 
   // Set the escape state of a node
@@ -320,16 +321,20 @@
   void verify_escape_state(int nidx, VectorSet& ptset, PhaseTransform* phase);
 
 public:
-  ConnectionGraph(Compile *C);
+  ConnectionGraph(Compile *C, PhaseIterGVN *igvn);
 
   // Check for non-escaping candidates
   static bool has_candidates(Compile *C);
 
+  // Perform escape analysis
+  static void do_analysis(Compile *C, PhaseIterGVN *igvn);
+
   // Compute the escape information
   bool compute_escape();
 
   // escape state of a node
-  PointsToNode::EscapeState escape_state(Node *n, PhaseTransform *phase);
+  PointsToNode::EscapeState escape_state(Node *n);
+
   // other information we have collected
   bool is_scalar_replaceable(Node *n) {
     if (_collecting || (n->_idx >= nodes_size()))
--- a/src/share/vm/opto/graphKit.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/graphKit.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1789,9 +1789,10 @@
 
 void GraphKit::increment_counter(Node* counter_addr) {
   int adr_type = Compile::AliasIdxRaw;
-  Node* cnt  = make_load(NULL, counter_addr, TypeInt::INT, T_INT, adr_type);
+  Node* ctrl = control();
+  Node* cnt  = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type);
   Node* incr = _gvn.transform(new (C, 3) AddINode(cnt, _gvn.intcon(1)));
-  store_to_memory( NULL, counter_addr, incr, T_INT, adr_type );
+  store_to_memory( ctrl, counter_addr, incr, T_INT, adr_type );
 }
 
 
@@ -2771,11 +2772,7 @@
     // Update the counter for this lock.  Don't bother using an atomic
     // operation since we don't require absolute accuracy.
     lock->create_lock_counter(map()->jvms());
-    int adr_type = Compile::AliasIdxRaw;
-    Node* counter_addr = makecon(TypeRawPtr::make(lock->counter()->addr()));
-    Node* cnt  = make_load(NULL, counter_addr, TypeInt::INT, T_INT, adr_type);
-    Node* incr = _gvn.transform(new (C, 3) AddINode(cnt, _gvn.intcon(1)));
-    store_to_memory(control(), counter_addr, incr, T_INT, adr_type);
+    increment_counter(lock->counter()->addr());
   }
 #endif
 
--- a/src/share/vm/opto/ifnode.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/ifnode.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1081,11 +1081,9 @@
 
   igvn->register_new_node_with_optimizer(new_if_f);
   igvn->register_new_node_with_optimizer(new_if_t);
-  igvn->hash_delete(old_if_f);
-  igvn->hash_delete(old_if_t);
   // Flip test, so flip trailing control
-  igvn->subsume_node(old_if_f, new_if_t);
-  igvn->subsume_node(old_if_t, new_if_f);
+  igvn->replace_node(old_if_f, new_if_t);
+  igvn->replace_node(old_if_t, new_if_f);
 
   // Progress
   return iff;
--- a/src/share/vm/opto/library_call.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/library_call.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -3512,8 +3512,7 @@
 
   // Get the header out of the object, use LoadMarkNode when available
   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
-  Node* header = make_load(NULL, header_addr, TypeRawPtr::BOTTOM, T_ADDRESS);
-  header = _gvn.transform( new (C, 2) CastP2XNode(NULL, header) );
+  Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type());
 
   // Test the header to see if it is unlocked.
   Node *lock_mask      = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
@@ -5202,7 +5201,7 @@
   // super_check_offset, for the desired klass.
   int sco_offset = Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc);
   Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
-  Node* n3 = new(C, 3) LoadINode(NULL, immutable_memory(), p3, TypeRawPtr::BOTTOM);
+  Node* n3 = new(C, 3) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr());
   Node* check_offset = _gvn.transform(n3);
   Node* check_value  = dest_elem_klass;
 
--- a/src/share/vm/opto/loopTransform.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/loopTransform.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -194,8 +194,7 @@
     addx = new (phase->C, 3) AddINode(x, inv);
   }
   phase->register_new_node(addx, phase->get_ctrl(x));
-  phase->_igvn.hash_delete(n1);
-  phase->_igvn.subsume_node(n1, addx);
+  phase->_igvn.replace_node(n1, addx);
   return addx;
 }
 
@@ -1586,8 +1585,7 @@
   Node *phi = cl->phi();
   Node *final = new (phase->C, 3) SubINode( cl->limit(), cl->stride() );
   phase->register_new_node(final,cl->in(LoopNode::EntryControl));
-  phase->_igvn.hash_delete(phi);
-  phase->_igvn.subsume_node(phi,final);
+  phase->_igvn.replace_node(phi,final);
   phase->C->set_major_progress();
   return true;
 }
--- a/src/share/vm/opto/loopnode.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/loopnode.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -400,7 +400,7 @@
     nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl));
     nphi = _igvn.register_new_node_with_optimizer(nphi);
     set_ctrl(nphi, get_ctrl(phi));
-    _igvn.subsume_node(phi, nphi);
+    _igvn.replace_node(phi, nphi);
     phi = nphi->as_Phi();
   }
   cmp = cmp->clone();
@@ -760,7 +760,7 @@
       // which in turn prevents removing an empty loop.
       Node *id_old_phi = old_phi->Identity( &igvn );
       if( id_old_phi != old_phi ) { // Found a simple identity?
-        // Note that I cannot call 'subsume_node' here, because
+        // Note that I cannot call 'replace_node' here, because
         // that will yank the edge from old_phi to the Region and
         // I'm mid-iteration over the Region's uses.
         for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) {
@@ -1065,11 +1065,9 @@
     l = igvn.register_new_node_with_optimizer(l, _head);
     phase->set_created_loop_node();
     // Go ahead and replace _head
-    phase->_igvn.subsume_node( _head, l );
+    phase->_igvn.replace_node( _head, l );
     _head = l;
     phase->set_loop(_head, this);
-    for (DUIterator_Fast imax, i = l->fast_outs(imax); i < imax; i++)
-      phase->_igvn.add_users_to_worklist(l->fast_out(i));
   }
 
   // Now recursively beautify nested loops
@@ -1329,8 +1327,7 @@
         Node* add  = new (C, 3) AddINode(ratio_idx, diff);
         phase->_igvn.register_new_node_with_optimizer(add);
         phase->set_ctrl(add, cl);
-        phase->_igvn.hash_delete( phi2 );
-        phase->_igvn.subsume_node( phi2, add );
+        phase->_igvn.replace_node( phi2, add );
         // Sometimes an induction variable is unused
         if (add->outcnt() == 0) {
           phase->_igvn.remove_dead_node(add);
--- a/src/share/vm/opto/loopnode.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/loopnode.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -626,8 +626,7 @@
     _nodes.map( old_node->_idx, (Node*)((intptr_t)new_node + 1) );
   }
   void lazy_replace( Node *old_node, Node *new_node ) {
-    _igvn.hash_delete(old_node);
-    _igvn.subsume_node( old_node, new_node );
+    _igvn.replace_node( old_node, new_node );
     lazy_update( old_node, new_node );
   }
   void lazy_replace_proj( Node *old_node, Node *new_node ) {
--- a/src/share/vm/opto/loopopts.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/loopopts.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -354,8 +354,7 @@
     register_new_node( var_scale, n_ctrl );
     Node *var_add = new (C, 3) AddINode( var_scale, inv_scale );
     register_new_node( var_add, n_ctrl );
-    _igvn.hash_delete( n );
-    _igvn.subsume_node( n, var_add );
+    _igvn.replace_node( n, var_add );
     return var_add;
   }
 
@@ -390,8 +389,7 @@
           register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) );
           Node *add2 = new (C, 4) AddPNode( n->in(1), add1, n->in(2)->in(3) );
           register_new_node( add2, n_ctrl );
-          _igvn.hash_delete( n );
-          _igvn.subsume_node( n, add2 );
+          _igvn.replace_node( n, add2 );
           return add2;
         }
       }
@@ -412,8 +410,7 @@
           register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) );
           Node *add2 = new (C, 4) AddPNode( n->in(1), add1, V );
           register_new_node( add2, n_ctrl );
-          _igvn.hash_delete( n );
-          _igvn.subsume_node( n, add2 );
+          _igvn.replace_node( n, add2 );
           return add2;
         }
       }
@@ -555,8 +552,7 @@
     }
     Node *cmov = CMoveNode::make( C, cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi) );
     register_new_node( cmov, cmov_ctrl );
-    _igvn.hash_delete(phi);
-    _igvn.subsume_node( phi, cmov );
+    _igvn.replace_node( phi, cmov );
 #ifndef PRODUCT
     if( VerifyLoopOptimizations ) verify();
 #endif
@@ -642,8 +638,7 @@
 
   // Found a Phi to split thru!
   // Replace 'n' with the new phi
-  _igvn.hash_delete(n);
-  _igvn.subsume_node( n, phi );
+  _igvn.replace_node( n, phi );
   // Moved a load around the loop, 'en-registering' something.
   if( n_blk->Opcode() == Op_Loop && n->is_Load() &&
       !phi->in(LoopNode::LoopBackControl)->is_Load() )
@@ -789,13 +784,11 @@
 
     // Found a Phi to split thru!
     // Replace 'n' with the new phi
-    _igvn.hash_delete(n);
-    _igvn.subsume_node( n, phi );
+    _igvn.replace_node( n, phi );
 
     // Now split the bool up thru the phi
     Node *bolphi = split_thru_phi( bol, n_ctrl, -1 );
-    _igvn.hash_delete(bol);
-    _igvn.subsume_node( bol, bolphi );
+    _igvn.replace_node( bol, bolphi );
     assert( iff->in(1) == bolphi, "" );
     if( bolphi->Value(&_igvn)->singleton() )
       return;
@@ -803,8 +796,7 @@
     // Conditional-move?  Must split up now
     if( !iff->is_If() ) {
       Node *cmovphi = split_thru_phi( iff, n_ctrl, -1 );
-      _igvn.hash_delete(iff);
-      _igvn.subsume_node( iff, cmovphi );
+      _igvn.replace_node( iff, cmovphi );
       return;
     }
 
@@ -950,9 +942,7 @@
   if( n_op == Op_Opaque2 &&
       n->in(1) != NULL &&
       get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) {
-    _igvn.add_users_to_worklist(n);
-    _igvn.hash_delete(n);
-    _igvn.subsume_node( n, n->in(1) );
+    _igvn.replace_node( n, n->in(1) );
   }
 }
 
@@ -1425,7 +1415,7 @@
           // IGVN does CSE).
           Node *hit = _igvn.hash_find_insert(use);
           if( hit )             // Go ahead and re-hash for hits.
-            _igvn.subsume_node( use, hit );
+            _igvn.replace_node( use, hit );
         }
 
         // If 'use' was in the loop-exit block, it now needs to be sunk
--- a/src/share/vm/opto/macro.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/macro.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -135,8 +135,7 @@
   if (parm1 != NULL)  call->init_req(TypeFunc::Parms+1, parm1);
   copy_call_debug_info(oldcall, call);
   call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
-  _igvn.hash_delete(oldcall);
-  _igvn.subsume_node(oldcall, call);
+  _igvn.replace_node(oldcall, call);
   transform_later(call);
 
   return call;
@@ -523,8 +522,7 @@
         // Kill all new Phis
         while(value_phis.is_nonempty()) {
           Node* n = value_phis.node();
-          _igvn.hash_delete(n);
-          _igvn.subsume_node(n, C->top());
+          _igvn.replace_node(n, C->top());
           value_phis.pop();
         }
       }
@@ -1311,8 +1309,7 @@
   if (!always_slow) {
     call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
   }
-  _igvn.hash_delete(alloc);
-  _igvn.subsume_node(alloc, call);
+  _igvn.replace_node(alloc, call);
   transform_later(call);
 
   // Identify the output projections from the allocate node and
@@ -1431,7 +1428,7 @@
   Node* mark_node = NULL;
   // For now only enable fast locking for non-array types
   if (UseBiasedLocking && (length == NULL)) {
-    mark_node = make_load(NULL, rawmem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeRawPtr::BOTTOM, T_ADDRESS);
+    mark_node = make_load(control, rawmem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeRawPtr::BOTTOM, T_ADDRESS);
   } else {
     mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
   }
--- a/src/share/vm/opto/memnode.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/memnode.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -815,6 +815,16 @@
 }
 #endif
 
+#ifdef ASSERT
+//----------------------------is_immutable_value-------------------------------
+// Helper function to allow a raw load without control edge for some cases
+bool LoadNode::is_immutable_value(Node* adr) {
+  return (adr->is_AddP() && adr->in(AddPNode::Base)->is_top() &&
+          adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
+          (adr->in(AddPNode::Offset)->find_intptr_t_con(-1) ==
+           in_bytes(JavaThread::osthread_offset())));
+}
+#endif
 
 //----------------------------LoadNode::make-----------------------------------
 // Polymorphic factory method:
@@ -828,6 +838,11 @@
   assert(!(adr_type->isa_aryptr() &&
            adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
          "use LoadRangeNode instead");
+  // Check control edge of raw loads
+  assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
+          // oop will be recorded in oop map if load crosses safepoint
+          rt->isa_oopptr() || is_immutable_value(adr),
+          "raw memory operations should have control edge");
   switch (bt) {
   case T_BOOLEAN: return new (C, 3) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int()    );
   case T_BYTE:    return new (C, 3) LoadBNode (ctl, mem, adr, adr_type, rt->is_int()    );
@@ -2064,6 +2079,8 @@
 // Polymorphic factory method:
 StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) {
   Compile* C = gvn.C;
+  assert( C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
+          ctl != NULL, "raw memory operations should have control edge");
 
   switch (bt) {
   case T_BOOLEAN:
--- a/src/share/vm/opto/memnode.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/memnode.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -189,6 +189,10 @@
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const;
 #endif
+#ifdef ASSERT
+  // Helper function to allow a raw load without control edge for some cases
+  static bool is_immutable_value(Node* adr);
+#endif
 protected:
   const Type* load_array_final_field(const TypeKlassPtr *tkls,
                                      ciKlass* klass) const;
--- a/src/share/vm/opto/parse.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/parse.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -480,6 +480,7 @@
   bool push_constant(ciConstant con, bool require_constant = false);
 
   // implementation of object creation bytecodes
+  void emit_guard_for_new(ciInstanceKlass* klass);
   void do_new();
   void do_newarray(BasicType elemtype);
   void do_anewarray();
--- a/src/share/vm/opto/parse1.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/parse1.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -88,15 +88,16 @@
                                      Node *local_addrs_base) {
   Node *mem = memory(Compile::AliasIdxRaw);
   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
+  Node *ctl = control();
 
   // Very similar to LoadNode::make, except we handle un-aligned longs and
   // doubles on Sparc.  Intel can handle them just fine directly.
   Node *l;
   switch( bt ) {                // Signature is flattened
-  case T_INT:     l = new (C, 3) LoadINode( 0, mem, adr, TypeRawPtr::BOTTOM ); break;
-  case T_FLOAT:   l = new (C, 3) LoadFNode( 0, mem, adr, TypeRawPtr::BOTTOM ); break;
-  case T_ADDRESS: l = new (C, 3) LoadPNode( 0, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM  ); break;
-  case T_OBJECT:  l = new (C, 3) LoadPNode( 0, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break;
+  case T_INT:     l = new (C, 3) LoadINode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break;
+  case T_FLOAT:   l = new (C, 3) LoadFNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break;
+  case T_ADDRESS: l = new (C, 3) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM  ); break;
+  case T_OBJECT:  l = new (C, 3) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break;
   case T_LONG:
   case T_DOUBLE: {
     // Since arguments are in reverse order, the argument address 'adr'
@@ -104,12 +105,12 @@
     adr = basic_plus_adr( local_addrs_base, local_addrs, -(index+1)*wordSize );
     if( Matcher::misaligned_doubles_ok ) {
       l = (bt == T_DOUBLE)
-        ? (Node*)new (C, 3) LoadDNode( 0, mem, adr, TypeRawPtr::BOTTOM )
-        : (Node*)new (C, 3) LoadLNode( 0, mem, adr, TypeRawPtr::BOTTOM );
+        ? (Node*)new (C, 3) LoadDNode( ctl, mem, adr, TypeRawPtr::BOTTOM )
+        : (Node*)new (C, 3) LoadLNode( ctl, mem, adr, TypeRawPtr::BOTTOM );
     } else {
       l = (bt == T_DOUBLE)
-        ? (Node*)new (C, 3) LoadD_unalignedNode( 0, mem, adr, TypeRawPtr::BOTTOM )
-        : (Node*)new (C, 3) LoadL_unalignedNode( 0, mem, adr, TypeRawPtr::BOTTOM );
+        ? (Node*)new (C, 3) LoadD_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM )
+        : (Node*)new (C, 3) LoadL_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM );
     }
     break;
   }
--- a/src/share/vm/opto/parse2.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/parse2.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1324,33 +1324,21 @@
   case Bytecodes::_ldc_w:
   case Bytecodes::_ldc2_w:
     // If the constant is unresolved, run this BC once in the interpreter.
-    if (iter().is_unresolved_string()) {
-      uncommon_trap(Deoptimization::make_trap_request
-                    (Deoptimization::Reason_unloaded,
-                     Deoptimization::Action_reinterpret,
-                     iter().get_constant_index()),
-                    NULL, "unresolved_string");
-      break;
-    } else {
+    {
       ciConstant constant = iter().get_constant();
-      if (constant.basic_type() == T_OBJECT) {
-        ciObject* c = constant.as_object();
-        if (c->is_klass()) {
-          // The constant returned for a klass is the ciKlass for the
-          // entry.  We want the java_mirror so get it.
-          ciKlass* klass = c->as_klass();
-          if (klass->is_loaded()) {
-            constant = ciConstant(T_OBJECT, klass->java_mirror());
-          } else {
-            uncommon_trap(Deoptimization::make_trap_request
-                          (Deoptimization::Reason_unloaded,
-                           Deoptimization::Action_reinterpret,
-                           iter().get_constant_index()),
-                          NULL, "unresolved_klass");
-            break;
-          }
-        }
+      if (constant.basic_type() == T_OBJECT &&
+          !constant.as_object()->is_loaded()) {
+        int index = iter().get_constant_pool_index();
+        constantTag tag = iter().get_constant_pool_tag(index);
+        uncommon_trap(Deoptimization::make_trap_request
+                      (Deoptimization::Reason_unloaded,
+                       Deoptimization::Action_reinterpret,
+                       index),
+                      NULL, tag.internal_name());
+        break;
       }
+      assert(constant.basic_type() != T_OBJECT || !constant.as_object()->is_klass(),
+             "must be java_mirror of klass");
       bool pushed = push_constant(constant, true);
       guarantee(pushed, "must be possible to push this constant");
     }
--- a/src/share/vm/opto/parseHelper.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/parseHelper.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -197,6 +197,43 @@
 }
 
 
+void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
+  // Emit guarded new
+  //   if (klass->_init_thread != current_thread ||
+  //       klass->_init_state != being_initialized)
+  //      uncommon_trap
+  Node* cur_thread = _gvn.transform( new (C, 1) ThreadLocalNode() );
+  Node* merge = new (C, 3) RegionNode(3);
+  _gvn.set_type(merge, Type::CONTROL);
+  Node* kls = makecon(TypeKlassPtr::make(klass));
+
+  Node* init_thread_offset = _gvn.MakeConX(instanceKlass::init_thread_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes());
+  Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset);
+  Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS);
+  Node *tst   = Bool( CmpP( init_thread, cur_thread), BoolTest::eq);
+  IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
+  set_control(IfTrue(iff));
+  merge->set_req(1, IfFalse(iff));
+
+  Node* init_state_offset = _gvn.MakeConX(instanceKlass::init_state_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes());
+  adr_node = basic_plus_adr(kls, kls, init_state_offset);
+  Node* init_state = make_load(NULL, adr_node, TypeInt::INT, T_INT);
+  Node* being_init = _gvn.intcon(instanceKlass::being_initialized);
+  tst   = Bool( CmpI( init_state, being_init), BoolTest::eq);
+  iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
+  set_control(IfTrue(iff));
+  merge->set_req(2, IfFalse(iff));
+
+  PreserveJVMState pjvms(this);
+  record_for_igvn(merge);
+  set_control(merge);
+
+  uncommon_trap(Deoptimization::Reason_uninitialized,
+                Deoptimization::Action_reinterpret,
+                klass);
+}
+
+
 //------------------------------do_new-----------------------------------------
 void Parse::do_new() {
   kill_dead_locals();
@@ -206,7 +243,7 @@
   assert(will_link, "_new: typeflow responsibility");
 
   // Should initialize, or throw an InstantiationError?
-  if (!klass->is_initialized() ||
+  if (!klass->is_initialized() && !klass->is_being_initialized() ||
       klass->is_abstract() || klass->is_interface() ||
       klass->name() == ciSymbol::java_lang_Class() ||
       iter().is_unresolved_klass()) {
@@ -215,6 +252,9 @@
                   klass);
     return;
   }
+  if (klass->is_being_initialized()) {
+    emit_guard_for_new(klass);
+  }
 
   Node* kls = makecon(TypeKlassPtr::make(klass));
   Node* obj = new_instance(kls);
--- a/src/share/vm/opto/phaseX.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/phaseX.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1447,16 +1447,12 @@
           Node* m = n->out(i);
           if( m->is_Phi() ) {
             assert(type(m) == Type::TOP, "Unreachable region should not have live phis.");
-            add_users_to_worklist(m);
-            hash_delete(m); // Yank from hash before hacking edges
-            subsume_node(m, nn);
+            replace_node(m, nn);
             --i; // deleted this phi; rescan starting with next position
           }
         }
       }
-      add_users_to_worklist(n); // Users of about-to-be-constant 'n'
-      hash_delete(n);           // Removed 'n' from table before subsuming it
-      subsume_node(n,nn);       // Update DefUse edges for new constant
+      replace_node(n,nn);       // Update DefUse edges for new constant
     }
     return nn;
   }
--- a/src/share/vm/opto/phaseX.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/phaseX.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -393,6 +393,10 @@
 
   // Idealize old Node 'n' with respect to its inputs and its value
   virtual Node *transform_old( Node *a_node );
+
+  // Subsume users of node 'old' into node 'nn'
+  void subsume_node( Node *old, Node *nn );
+
 protected:
 
   // Idealize new Node 'n' with respect to its inputs and its value
@@ -439,10 +443,6 @@
     remove_globally_dead_node(dead);
   }
 
-  // Subsume users of node 'old' into node 'nn'
-  // If no Def-Use info existed for 'nn' it will after call.
-  void subsume_node( Node *old, Node *nn );
-
   // Add users of 'n' to worklist
   void add_users_to_worklist0( Node *n );
   void add_users_to_worklist ( Node *n );
@@ -450,7 +450,7 @@
   // Replace old node with new one.
   void replace_node( Node *old, Node *nn ) {
     add_users_to_worklist(old);
-    hash_delete(old);
+    hash_delete(old); // Yank from hash before hacking edges
     subsume_node(old, nn);
   }
 
--- a/src/share/vm/opto/split_if.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/split_if.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -217,8 +217,7 @@
   register_new_node(phi, blk1);
 
   // Remove cloned-up value from optimizer; use phi instead
-  _igvn.hash_delete(n);
-  _igvn.subsume_node( n, phi );
+  _igvn.replace_node( n, phi );
 
   // (There used to be a self-recursive call to split_up() here,
   // but it is not needed.  All necessary forward walking is done
@@ -352,8 +351,7 @@
   }
 
   if (use_blk == NULL) {        // He's dead, Jim
-    _igvn.hash_delete(use);
-    _igvn.subsume_node(use, C->top());
+    _igvn.replace_node(use, C->top());
   }
 
   return use_blk;
--- a/src/share/vm/opto/superword.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/superword.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1172,8 +1172,7 @@
       _phase->set_ctrl(vn, _phase->get_ctrl(p->at(0)));
       for (uint j = 0; j < p->size(); j++) {
         Node* pm = p->at(j);
-        _igvn.hash_delete(pm);
-        _igvn.subsume_node(pm, vn);
+        _igvn.replace_node(pm, vn);
       }
       _igvn._worklist.push(vn);
     }
--- a/src/share/vm/opto/type.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/opto/type.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -182,6 +182,8 @@
   return t->hash();
 }
 
+#define SMALLINT ((juint)3)  // a value too insignificant to consider widening
+
 //--------------------------Initialize_shared----------------------------------
 void Type::Initialize_shared(Compile* current) {
   // This method does not need to be locked because the first system
@@ -240,6 +242,7 @@
   assert( TypeInt::CC_GT == TypeInt::ONE,     "types must match for CmpL to work" );
   assert( TypeInt::CC_EQ == TypeInt::ZERO,    "types must match for CmpL to work" );
   assert( TypeInt::CC_GE == TypeInt::BOOL,    "types must match for CmpL to work" );
+  assert( (juint)(TypeInt::CC->_hi - TypeInt::CC->_lo) <= SMALLINT, "CC is truly small");
 
   TypeLong::MINUS_1 = TypeLong::make(-1);        // -1
   TypeLong::ZERO    = TypeLong::make( 0);        //  0
@@ -1054,16 +1057,21 @@
   return (TypeInt*)(new TypeInt(lo,lo,WidenMin))->hashcons();
 }
 
-#define SMALLINT ((juint)3)  // a value too insignificant to consider widening
-
-const TypeInt *TypeInt::make( jint lo, jint hi, int w ) {
+static int normalize_int_widen( jint lo, jint hi, int w ) {
   // Certain normalizations keep us sane when comparing types.
   // The 'SMALLINT' covers constants and also CC and its relatives.
-  assert(CC == NULL || (juint)(CC->_hi - CC->_lo) <= SMALLINT, "CC is truly small");
   if (lo <= hi) {
-    if ((juint)(hi - lo) <= SMALLINT)   w = Type::WidenMin;
-    if ((juint)(hi - lo) >= max_juint)  w = Type::WidenMax; // plain int
+    if ((juint)(hi - lo) <= SMALLINT)  w = Type::WidenMin;
+    if ((juint)(hi - lo) >= max_juint) w = Type::WidenMax; // TypeInt::INT
+  } else {
+    if ((juint)(lo - hi) <= SMALLINT)  w = Type::WidenMin;
+    if ((juint)(lo - hi) >= max_juint) w = Type::WidenMin; // dual TypeInt::INT
   }
+  return w;
+}
+
+const TypeInt *TypeInt::make( jint lo, jint hi, int w ) {
+  w = normalize_int_widen(lo, hi, w);
   return (TypeInt*)(new TypeInt(lo,hi,w))->hashcons();
 }
 
@@ -1103,14 +1111,14 @@
 
   // Expand covered set
   const TypeInt *r = t->is_int();
-  // (Avoid TypeInt::make, to avoid the argument normalizations it enforces.)
-  return (new TypeInt( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) ))->hashcons();
+  return make( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) );
 }
 
 //------------------------------xdual------------------------------------------
 // Dual: reverse hi & lo; flip widen
 const Type *TypeInt::xdual() const {
-  return new TypeInt(_hi,_lo,WidenMax-_widen);
+  int w = normalize_int_widen(_hi,_lo, WidenMax-_widen);
+  return new TypeInt(_hi,_lo,w);
 }
 
 //------------------------------widen------------------------------------------
@@ -1202,7 +1210,7 @@
 //-----------------------------filter------------------------------------------
 const Type *TypeInt::filter( const Type *kills ) const {
   const TypeInt* ft = join(kills)->isa_int();
-  if (ft == NULL || ft->_lo > ft->_hi)
+  if (ft == NULL || ft->empty())
     return Type::TOP;           // Canonical empty value
   if (ft->_widen < this->_widen) {
     // Do not allow the value of kill->_widen to affect the outcome.
@@ -1304,13 +1312,21 @@
   return (TypeLong*)(new TypeLong(lo,lo,WidenMin))->hashcons();
 }
 
-const TypeLong *TypeLong::make( jlong lo, jlong hi, int w ) {
+static int normalize_long_widen( jlong lo, jlong hi, int w ) {
   // Certain normalizations keep us sane when comparing types.
-  // The '1' covers constants.
+  // The 'SMALLINT' covers constants.
   if (lo <= hi) {
-    if ((julong)(hi - lo) <= SMALLINT)    w = Type::WidenMin;
-    if ((julong)(hi - lo) >= max_julong)  w = Type::WidenMax; // plain long
+    if ((julong)(hi - lo) <= SMALLINT)   w = Type::WidenMin;
+    if ((julong)(hi - lo) >= max_julong) w = Type::WidenMax; // TypeLong::LONG
+  } else {
+    if ((julong)(lo - hi) <= SMALLINT)   w = Type::WidenMin;
+    if ((julong)(lo - hi) >= max_julong) w = Type::WidenMin; // dual TypeLong::LONG
   }
+  return w;
+}
+
+const TypeLong *TypeLong::make( jlong lo, jlong hi, int w ) {
+  w = normalize_long_widen(lo, hi, w);
   return (TypeLong*)(new TypeLong(lo,hi,w))->hashcons();
 }
 
@@ -1351,14 +1367,14 @@
 
   // Expand covered set
   const TypeLong *r = t->is_long(); // Turn into a TypeLong
-  // (Avoid TypeLong::make, to avoid the argument normalizations it enforces.)
-  return (new TypeLong( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) ))->hashcons();
+  return make( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) );
 }
 
 //------------------------------xdual------------------------------------------
 // Dual: reverse hi & lo; flip widen
 const Type *TypeLong::xdual() const {
-  return new TypeLong(_hi,_lo,WidenMax-_widen);
+  int w = normalize_long_widen(_hi,_lo, WidenMax-_widen);
+  return new TypeLong(_hi,_lo,w);
 }
 
 //------------------------------widen------------------------------------------
@@ -1453,7 +1469,7 @@
 //-----------------------------filter------------------------------------------
 const Type *TypeLong::filter( const Type *kills ) const {
   const TypeLong* ft = join(kills)->isa_long();
-  if (ft == NULL || ft->_lo > ft->_hi)
+  if (ft == NULL || ft->empty())
     return Type::TOP;           // Canonical empty value
   if (ft->_widen < this->_widen) {
     // Do not allow the value of kill->_widen to affect the outcome.
--- a/src/share/vm/prims/jvm.h	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/prims/jvm.h	Sat Jul 31 15:10:59 2010 +0100
@@ -1044,7 +1044,23 @@
     JVM_CONSTANT_Fieldref,
     JVM_CONSTANT_Methodref,
     JVM_CONSTANT_InterfaceMethodref,
-    JVM_CONSTANT_NameAndType
+    JVM_CONSTANT_NameAndType,
+    JVM_CONSTANT_MethodHandle           = 15,  // JSR 292
+    JVM_CONSTANT_MethodType             = 16,  // JSR 292
+    JVM_CONSTANT_InvokeDynamic          = 17  // JSR 292
+};
+
+/* JVM_CONSTANT_MethodHandle subtypes */
+enum {
+    JVM_REF_getField                = 1,
+    JVM_REF_getStatic               = 2,
+    JVM_REF_putField                = 3,
+    JVM_REF_putStatic               = 4,
+    JVM_REF_invokeVirtual           = 5,
+    JVM_REF_invokeStatic            = 6,
+    JVM_REF_invokeSpecial           = 7,
+    JVM_REF_newInvokeSpecial        = 8,
+    JVM_REF_invokeInterface         = 9
 };
 
 /* Used in the newarray instruction. */
--- a/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -122,30 +122,8 @@
     }
   }
 
-  // we must name the CodeBlob - some CodeBlobs already have names :-
-  // - stubs used by compiled code to call a (static) C++ runtime routine
-  // - non-relocatable machine code such as the interpreter, stubroutines, etc.
-  // - various singleton blobs
-  //
-  // others are unnamed so we create a name :-
-  // - OSR adapter (interpreter frame that has been on-stack replaced)
-  // - I2C and C2I adapters
-  const char* name = NULL;
-  if (cb->is_runtime_stub()) {
-    name = ((RuntimeStub*)cb)->name();
-  }
-  if (cb->is_buffer_blob()) {
-    name = ((BufferBlob*)cb)->name();
-  }
-  if (cb->is_deoptimization_stub() || cb->is_safepoint_stub()) {
-    name = ((SingletonBlob*)cb)->name();
-  }
-  if (cb->is_uncommon_trap_stub() || cb->is_exception_stub()) {
-    name = ((SingletonBlob*)cb)->name();
-  }
-
   // record the CodeBlob details as a JvmtiCodeBlobDesc
-  JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(name, cb->instructions_begin(),
+  JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(cb->name(), cb->instructions_begin(),
                                                  cb->instructions_end());
   _global_code_blobs->append(scb);
 }
@@ -197,7 +175,10 @@
 jvmtiError JvmtiCodeBlobEvents::generate_dynamic_code_events(JvmtiEnv* env) {
   CodeBlobCollector collector;
 
-  // first collect all the code blobs
+  // First collect all the code blobs.  This has to be done in a
+  // single pass over the code cache with CodeCache_lock held because
+  // there isn't any safe way to iterate over regular CodeBlobs since
+  // they can be freed at any point.
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
     collector.collect();
@@ -213,168 +194,28 @@
 }
 
 
-// Support class to describe a nmethod in the CodeCache
-
-class nmethodDesc: public CHeapObj {
- private:
-  methodHandle _method;
-  address _code_begin;
-  address _code_end;
-  jvmtiAddrLocationMap* _map;
-  jint _map_length;
- public:
-  nmethodDesc(methodHandle method, address code_begin, address code_end,
-              jvmtiAddrLocationMap* map, jint map_length) {
-    _method = method;
-    _code_begin = code_begin;
-    _code_end = code_end;
-    _map = map;
-    _map_length = map_length;
-  }
-  methodHandle method() const           { return _method; }
-  address code_begin() const            { return _code_begin; }
-  address code_end() const              { return _code_end; }
-  jvmtiAddrLocationMap* map() const     { return _map; }
-  jint map_length() const               { return _map_length; }
-};
-
-
-// Support class to collect a list of the nmethod CodeBlobs in
-// the CodeCache.
-//
-// Usage :-
-//
-// nmethodCollector collector;
-//
-// collector.collect();
-// JvmtiCodeBlobDesc* blob = collector.first();
-// while (blob != NULL) {
-//   :
-//   blob = collector.next();
-// }
-//
-class nmethodCollector : StackObj {
- private:
-  GrowableArray<nmethodDesc*>* _nmethods;           // collect nmethods
-  int _pos;                                         // iteration support
-
-  // used during a collection
-  static GrowableArray<nmethodDesc*>* _global_nmethods;
-  static void do_nmethod(nmethod* nm);
- public:
-  nmethodCollector() {
-    _nmethods = NULL;
-    _pos = -1;
-  }
-  ~nmethodCollector() {
-    if (_nmethods != NULL) {
-      for (int i=0; i<_nmethods->length(); i++) {
-        nmethodDesc* blob = _nmethods->at(i);
-        if (blob->map()!= NULL) {
-          FREE_C_HEAP_ARRAY(jvmtiAddrLocationMap, blob->map());
-        }
-      }
-      delete _nmethods;
-    }
-  }
-
-  // collect list of nmethods in the cache
-  void collect();
-
-  // iteration support - return first code blob
-  nmethodDesc* first() {
-    assert(_nmethods != NULL, "not collected");
-    if (_nmethods->length() == 0) {
-      return NULL;
-    }
-    _pos = 0;
-    return _nmethods->at(0);
-  }
-
-  // iteration support - return next code blob
-  nmethodDesc* next() {
-    assert(_pos >= 0, "iteration not started");
-    if (_pos+1 >= _nmethods->length()) {
-      return NULL;
-    }
-    return _nmethods->at(++_pos);
-  }
-};
-
-// used during collection
-GrowableArray<nmethodDesc*>* nmethodCollector::_global_nmethods;
-
-
-// called for each nmethod in the CodeCache
-//
-// This function simply adds a descriptor for each nmethod to the global list.
-
-void nmethodCollector::do_nmethod(nmethod* nm) {
-  // ignore zombies
-  if (!nm->is_alive()) {
-    return;
-  }
-
-  assert(nm->method() != NULL, "checking");
-
-  // create the location map for the nmethod.
-  jvmtiAddrLocationMap* map;
-  jint map_length;
-  JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &map, &map_length);
-
-  // record the nmethod details
-  methodHandle mh(nm->method());
-  nmethodDesc* snm = new nmethodDesc(mh,
-                                     nm->code_begin(),
-                                     nm->code_end(),
-                                     map,
-                                     map_length);
-  _global_nmethods->append(snm);
-}
-
-// collects a list of nmethod in the CodeCache.
-//
-// The created list is growable array of nmethodDesc - each one describes
-// a nmethod and includs its JVMTI address location map.
-
-void nmethodCollector::collect() {
-  assert_locked_or_safepoint(CodeCache_lock);
-  assert(_global_nmethods == NULL, "checking");
-
-  // create the list
-  _global_nmethods = new (ResourceObj::C_HEAP) GrowableArray<nmethodDesc*>(100,true);
-
-  // any a descriptor for each nmethod to the list.
-  CodeCache::nmethods_do(do_nmethod);
-
-  // make the list the instance list
-  _nmethods = _global_nmethods;
-  _global_nmethods = NULL;
-}
-
 // Generate a COMPILED_METHOD_LOAD event for each nnmethod
-
 jvmtiError JvmtiCodeBlobEvents::generate_compiled_method_load_events(JvmtiEnv* env) {
   HandleMark hm;
-  nmethodCollector collector;
-
-  // first collect all nmethods
-  {
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    collector.collect();
-  }
 
-  // iterate over the  list and post an event for each nmethod
-  nmethodDesc* nm_desc = collector.first();
-  while (nm_desc != NULL) {
-    methodOop method = nm_desc->method()();
-    jmethodID mid = method->jmethod_id();
-    assert(mid != NULL, "checking");
-    JvmtiExport::post_compiled_method_load(env, mid,
-                                           (jint)(nm_desc->code_end() - nm_desc->code_begin()),
-                                           nm_desc->code_begin(), nm_desc->map_length(),
-                                           nm_desc->map());
-    nm_desc = collector.next();
+  // Walk the CodeCache notifying for live nmethods.  The code cache
+  // may be changing while this is happening which is ok since newly
+  // created nmethod will notify normally and nmethods which are freed
+  // can be safely skipped.
+  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+  nmethod* current = CodeCache::first_nmethod();
+  while (current != NULL) {
+    // Only notify for live nmethods
+    if (current->is_alive()) {
+      // Lock the nmethod so it can't be freed
+      nmethodLocker nml(current);
+
+      // Don't hold the lock over the notify or jmethodID creation
+      MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      current->get_and_cache_jmethod_id();
+      JvmtiExport::post_compiled_method_load(current);
+    }
+    current = CodeCache::next_nmethod(current);
   }
   return JVMTI_ERROR_NONE;
 }
--- a/src/share/vm/prims/methodComparator.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/prims/methodComparator.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -163,10 +163,10 @@
 
   case Bytecodes::_ldc   : // fall through
   case Bytecodes::_ldc_w : {
-    Bytecode_loadconstant* ldc_old = Bytecode_loadconstant_at(_s_old->method()(), _s_old->bcp());
-    Bytecode_loadconstant* ldc_new = Bytecode_loadconstant_at(_s_new->method()(), _s_new->bcp());
-    int cpi_old = ldc_old->index();
-    int cpi_new = ldc_new->index();
+    Bytecode_loadconstant* ldc_old = Bytecode_loadconstant_at(_s_old->method(), _s_old->bci());
+    Bytecode_loadconstant* ldc_new = Bytecode_loadconstant_at(_s_new->method(), _s_new->bci());
+    int cpi_old = ldc_old->pool_index();
+    int cpi_new = ldc_new->pool_index();
     constantTag tag_old = _old_cp->tag_at(cpi_old);
     constantTag tag_new = _new_cp->tag_at(cpi_new);
     if (tag_old.is_int() || tag_old.is_float()) {
@@ -187,12 +187,30 @@
       if (strcmp(_old_cp->string_at_noresolve(cpi_old),
                  _new_cp->string_at_noresolve(cpi_new)) != 0)
         return false;
-    } else { // tag_old should be klass - 4881222
+    } else if (tag_old.is_klass() || tag_old.is_unresolved_klass()) {
+      // tag_old should be klass - 4881222
       if (! (tag_new.is_unresolved_klass() || tag_new.is_klass()))
         return false;
       if (_old_cp->klass_at_noresolve(cpi_old) !=
           _new_cp->klass_at_noresolve(cpi_new))
         return false;
+    } else if (tag_old.is_method_type() && tag_new.is_method_type()) {
+      int mti_old = _old_cp->method_type_index_at(cpi_old);
+      int mti_new = _new_cp->method_type_index_at(cpi_new);
+      if ((_old_cp->symbol_at(mti_old) != _new_cp->symbol_at(mti_new)))
+        return false;
+    } else if (tag_old.is_method_handle() && tag_new.is_method_handle()) {
+      if (_old_cp->method_handle_ref_kind_at(cpi_old) !=
+          _new_cp->method_handle_ref_kind_at(cpi_new))
+        return false;
+      int mhi_old = _old_cp->method_handle_index_at(cpi_old);
+      int mhi_new = _new_cp->method_handle_index_at(cpi_new);
+      if ((_old_cp->uncached_klass_ref_at_noresolve(mhi_old) != _new_cp->uncached_klass_ref_at_noresolve(mhi_new)) ||
+          (_old_cp->uncached_name_ref_at(mhi_old) != _new_cp->uncached_name_ref_at(mhi_new)) ||
+          (_old_cp->uncached_signature_ref_at(mhi_old) != _new_cp->uncached_signature_ref_at(mhi_new)))
+        return false;
+    } else {
+      return false;  // unknown tag
     }
     break;
   }
--- a/src/share/vm/prims/methodHandleWalk.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/prims/methodHandleWalk.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -738,6 +738,12 @@
 
   // bi
   case Bytecodes::_ldc:
+    assert(Bytecodes::format_bits(op, false) == (Bytecodes::_fmt_b|Bytecodes::_fmt_has_k), "wrong bytecode format");
+    assert((char) index == index, "index does not fit in 8-bit");
+    _bytecode.push(op);
+    _bytecode.push(index);
+    break;
+
   case Bytecodes::_iload:
   case Bytecodes::_lload:
   case Bytecodes::_fload:
@@ -754,7 +760,8 @@
     _bytecode.push(index);
     break;
 
-  // bii
+  // bkk
+  case Bytecodes::_ldc_w:
   case Bytecodes::_ldc2_w:
   case Bytecodes::_checkcast:
     assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bkk, "wrong bytecode format");
--- a/src/share/vm/prims/methodHandles.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/prims/methodHandles.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -2475,6 +2475,10 @@
 
 JVM_ENTRY(void, MHI_registerBootstrap(JNIEnv *env, jobject igcls, jclass caller_jh, jobject bsm_jh)) {
   instanceKlassHandle ik = MethodHandles::resolve_instance_klass(caller_jh, THREAD);
+  if (!AllowTransitionalJSR292) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "registerBootstrapMethod is only supported in JSR 292 EDR");
+  }
   ik->link_class(CHECK);
   if (!java_dyn_MethodHandle::is_instance(JNIHandles::resolve(bsm_jh))) {
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "method handle");
--- a/src/share/vm/runtime/arguments.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/arguments.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1376,11 +1376,6 @@
   }
   no_shared_spaces();
 
-  // Set the maximum pause time goal to be a reasonable default.
-  if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
-    FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
-  }
-
   if (FLAG_IS_DEFAULT(MarkStackSize)) {
     FLAG_SET_DEFAULT(MarkStackSize, 128 * TASKQUEUE_SIZE);
   }
@@ -1513,6 +1508,9 @@
   if (AggressiveOpts && FLAG_IS_DEFAULT(BiasedLockingStartupDelay)) {
     FLAG_SET_DEFAULT(BiasedLockingStartupDelay, 500);
   }
+  if (AggressiveOpts && FLAG_IS_DEFAULT(OptimizeStringConcat)) {
+    FLAG_SET_DEFAULT(OptimizeStringConcat, true);
+  }
 #endif
 
   if (AggressiveOpts) {
@@ -1697,20 +1695,21 @@
 
   status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
 
-  // Check user specified sharing option conflict with Parallel GC
-  bool cannot_share = ((UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC || UseParNewGC ||
-                       UseParallelGC || UseParallelOldGC ||
-                       SOLARIS_ONLY(UseISM) NOT_SOLARIS(UseLargePages));
-
+  // Check whether user-specified sharing option conflicts with GC or page size.
+  // Both sharing and large pages are enabled by default on some platforms;
+  // large pages override sharing only if explicitly set on the command line.
+  const bool cannot_share = UseConcMarkSweepGC || CMSIncrementalMode ||
+          UseG1GC || UseParNewGC || UseParallelGC || UseParallelOldGC ||
+          UseLargePages && FLAG_IS_CMDLINE(UseLargePages);
   if (cannot_share) {
     // Either force sharing on by forcing the other options off, or
     // force sharing off.
     if (DumpSharedSpaces || ForceSharedSpaces) {
       jio_fprintf(defaultStream::error_stream(),
-                  "Reverting to Serial GC because of %s\n",
-                  ForceSharedSpaces ? " -Xshare:on" : "-Xshare:dump");
+                  "Using Serial GC and default page size because of %s\n",
+                  ForceSharedSpaces ? "-Xshare:on" : "-Xshare:dump");
       force_serial_gc();
-      FLAG_SET_DEFAULT(SOLARIS_ONLY(UseISM) NOT_SOLARIS(UseLargePages), false);
+      FLAG_SET_DEFAULT(UseLargePages, false);
     } else {
       if (UseSharedSpaces && Verbose) {
         jio_fprintf(defaultStream::error_stream(),
@@ -1719,6 +1718,8 @@
       }
       no_shared_spaces();
     }
+  } else if (UseLargePages && (UseSharedSpaces || DumpSharedSpaces)) {
+    FLAG_SET_DEFAULT(UseLargePages, false);
   }
 
   status = status && check_gc_consistency();
--- a/src/share/vm/runtime/globals.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/globals.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1975,7 +1975,7 @@
           "Adaptive size policy maximum GC pause time goal in msec, "       \
           "or (G1 Only) the max. GC time per MMU time slice")               \
                                                                             \
-  product(intx, GCPauseIntervalMillis, 500,                                 \
+  product(uintx, GCPauseIntervalMillis, 0,                                  \
           "Time slice for MMU specification")                               \
                                                                             \
   product(uintx, MaxGCMinorPauseMillis, max_uintx,                          \
@@ -2541,9 +2541,6 @@
           "Enable String cache capabilities on String.java")                \
                                                                             \
   /* statistics */                                                          \
-  develop(bool, UseVTune, false,                                            \
-          "enable support for Intel's VTune profiler")                      \
-                                                                            \
   develop(bool, CountCompiledCalls, false,                                  \
           "counts method invocations")                                      \
                                                                             \
@@ -3520,6 +3517,9 @@
   experimental(bool, EnableInvokeDynamic, false,                            \
           "recognize the invokedynamic instruction")                        \
                                                                             \
+  experimental(bool, AllowTransitionalJSR292, true,                         \
+          "recognize pre-PFD formats of invokedynamic")                     \
+                                                                            \
   develop(bool, TraceInvokeDynamic, false,                                  \
           "trace internal invoke dynamic operations")                       \
                                                                             \
--- a/src/share/vm/runtime/init.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/init.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -34,7 +34,6 @@
 
 // Initialization done by Java thread in init_globals()
 void management_init();
-void vtune_init();
 void bytecodes_init();
 void classLoader_init();
 void codeCache_init();
@@ -82,7 +81,6 @@
 jint init_globals() {
   HandleMark hm;
   management_init();
-  vtune_init();
   bytecodes_init();
   classLoader_init();
   codeCache_init();
--- a/src/share/vm/runtime/java.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/java.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -432,8 +432,6 @@
   print_statistics();
   Universe::heap()->print_tracing_info();
 
-  VTune::exit();
-
   { MutexLocker ml(BeforeExit_lock);
     _before_exit_status = BEFORE_EXIT_DONE;
     BeforeExit_lock->notify_all();
--- a/src/share/vm/runtime/jniHandles.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/jniHandles.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -66,6 +66,7 @@
 
 
 jobject JNIHandles::make_global(Handle obj) {
+  assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
   jobject res = NULL;
   if (!obj.is_null()) {
     // ignore null handles
@@ -81,6 +82,7 @@
 
 
 jobject JNIHandles::make_weak_global(Handle obj) {
+  assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
   jobject res = NULL;
   if (!obj.is_null()) {
     // ignore null handles
--- a/src/share/vm/runtime/mutexLocker.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/mutexLocker.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -159,6 +159,8 @@
   def(STS_init_lock              , Mutex,   leaf,        true );
   if (UseConcMarkSweepGC) {
     def(iCMS_lock                  , Monitor, special,     true ); // CMS incremental mode start/stop notification
+  }
+  if (UseConcMarkSweepGC || UseG1GC) {
     def(FullGCCount_lock           , Monitor, leaf,        true ); // in support of ExplicitGCInvokesConcurrent
   }
   if (UseG1GC) {
--- a/src/share/vm/runtime/os.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/os.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -886,6 +886,11 @@
         "%/lib/jsse.jar:"
         "%/lib/jce.jar:"
         "%/lib/charsets.jar:"
+
+        // ## TEMPORARY hack to keep the legacy launcher working when
+        // ## only the boot module is installed (cf. j.l.ClassLoader)
+        "%/lib/modules/jdk.boot.jar:"
+
         "%/classes";
     char* sysclasspath = format_boot_path(classpath_format, home, home_len, fileSep, pathSep);
     if (sysclasspath == NULL) return false;
--- a/src/share/vm/runtime/sharedRuntime.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -779,7 +779,7 @@
 
   // Find bytecode
   Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci);
-  bc = bytecode->adjusted_invoke_code();
+  bc = bytecode->java_code();
   int bytecode_index = bytecode->index();
 
   // Find receiver for non-static call
@@ -2251,7 +2251,6 @@
                  B->name(),
                  fingerprint->as_string(),
                  B->instructions_begin());
-    VTune::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
     Forte::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
 
     if (JvmtiExport::should_post_dynamic_code_generated()) {
--- a/src/share/vm/runtime/stubCodeGenerator.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/stubCodeGenerator.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -132,7 +132,6 @@
   _cdesc->set_end(_cgen->assembler()->pc());
   assert(StubCodeDesc::_list == _cdesc, "expected order on list");
   _cgen->stub_epilog(_cdesc);
-  VTune::register_stub(_cdesc->name(), _cdesc->begin(), _cdesc->end());
   Forte::register_stub(_cdesc->name(), _cdesc->begin(), _cdesc->end());
 
   if (JvmtiExport::should_post_dynamic_code_generated()) {
--- a/src/share/vm/runtime/stubRoutines.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/stubRoutines.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -135,28 +135,32 @@
 static void test_arraycopy_func(address func, int alignment) {
   int v = 0xcc;
   int v2 = 0x11;
-  jlong lbuffer[2];
-  jlong lbuffer2[2];
-  address buffer  = (address) lbuffer;
-  address buffer2 = (address) lbuffer2;
+  jlong lbuffer[8];
+  jlong lbuffer2[8];
+  address fbuffer  = (address) lbuffer;
+  address fbuffer2 = (address) lbuffer2;
   unsigned int i;
   for (i = 0; i < sizeof(lbuffer); i++) {
-    buffer[i] = v; buffer2[i] = v2;
+    fbuffer[i] = v; fbuffer2[i] = v2;
   }
+  // C++ does not guarantee jlong[] array alignment to 8 bytes.
+  // Use middle of array to check that memory before it is not modified.
+  address buffer  = (address) round_to((intptr_t)&lbuffer[4], BytesPerLong);
+  address buffer2 = (address) round_to((intptr_t)&lbuffer2[4], BytesPerLong);
   // do an aligned copy
   ((arraycopy_fn)func)(buffer, buffer2, 0);
   for (i = 0; i < sizeof(lbuffer); i++) {
-    assert(buffer[i] == v && buffer2[i] == v2, "shouldn't have copied anything");
+    assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
   }
   // adjust destination alignment
   ((arraycopy_fn)func)(buffer, buffer2 + alignment, 0);
   for (i = 0; i < sizeof(lbuffer); i++) {
-    assert(buffer[i] == v && buffer2[i] == v2, "shouldn't have copied anything");
+    assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
   }
   // adjust source alignment
   ((arraycopy_fn)func)(buffer + alignment, buffer2, 0);
   for (i = 0; i < sizeof(lbuffer); i++) {
-    assert(buffer[i] == v && buffer2[i] == v2, "shouldn't have copied anything");
+    assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
   }
 }
 #endif
@@ -183,7 +187,7 @@
   test_arraycopy_func(arrayof_##type##_arraycopy(),          sizeof(HeapWord)); \
   test_arraycopy_func(arrayof_##type##_disjoint_arraycopy(), sizeof(HeapWord))
 
-  // Make sure all the arraycopy stubs properly handle zeros
+  // Make sure all the arraycopy stubs properly handle zero count
   TEST_ARRAYCOPY(jbyte);
   TEST_ARRAYCOPY(jshort);
   TEST_ARRAYCOPY(jint);
@@ -191,6 +195,25 @@
 
 #undef TEST_ARRAYCOPY
 
+#define TEST_COPYRTN(type) \
+  test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::conjoint_##type##s_atomic),  sizeof(type)); \
+  test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::arrayof_conjoint_##type##s), (int)MAX2(sizeof(HeapWord), sizeof(type)))
+
+  // Make sure all the copy runtime routines properly handle zero count
+  TEST_COPYRTN(jbyte);
+  TEST_COPYRTN(jshort);
+  TEST_COPYRTN(jint);
+  TEST_COPYRTN(jlong);
+
+#undef TEST_COPYRTN
+
+  test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::conjoint_words), sizeof(HeapWord));
+  test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::disjoint_words), sizeof(HeapWord));
+  test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::disjoint_words_atomic), sizeof(HeapWord));
+  // Aligned to BytesPerLong
+  test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_conjoint_words), sizeof(jlong));
+  test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_disjoint_words), sizeof(jlong));
+
 #endif
 }
 
@@ -221,15 +244,13 @@
 #ifndef PRODUCT
   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
-  Copy::conjoint_bytes_atomic(src, dest, count);
+  Copy::conjoint_jbytes_atomic(src, dest, count);
 JRT_END
 
 JRT_LEAF(void, StubRoutines::jshort_copy(jshort* src, jshort* dest, size_t count))
 #ifndef PRODUCT
   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
   Copy::conjoint_jshorts_atomic(src, dest, count);
 JRT_END
 
@@ -237,7 +258,6 @@
 #ifndef PRODUCT
   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
   Copy::conjoint_jints_atomic(src, dest, count);
 JRT_END
 
@@ -245,7 +265,6 @@
 #ifndef PRODUCT
   SharedRuntime::_jlong_array_copy_ctr++;      // Slow-path long/double array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
   Copy::conjoint_jlongs_atomic(src, dest, count);
 JRT_END
 
@@ -263,15 +282,13 @@
 #ifndef PRODUCT
   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
-  Copy::arrayof_conjoint_bytes(src, dest, count);
+  Copy::arrayof_conjoint_jbytes(src, dest, count);
 JRT_END
 
 JRT_LEAF(void, StubRoutines::arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count))
 #ifndef PRODUCT
   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
   Copy::arrayof_conjoint_jshorts(src, dest, count);
 JRT_END
 
@@ -279,7 +296,6 @@
 #ifndef PRODUCT
   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
   Copy::arrayof_conjoint_jints(src, dest, count);
 JRT_END
 
@@ -287,7 +303,6 @@
 #ifndef PRODUCT
   SharedRuntime::_jlong_array_copy_ctr++;       // Slow-path int/float array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
   Copy::arrayof_conjoint_jlongs(src, dest, count);
 JRT_END
 
--- a/src/share/vm/runtime/sweeper.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/sweeper.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,15 +26,16 @@
 # include "incls/_sweeper.cpp.incl"
 
 long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
-CodeBlob* NMethodSweeper::_current = NULL;   // Current nmethod
-int       NMethodSweeper::_seen = 0 ;        // No. of blobs we have currently processed in current pass of CodeCache
-int       NMethodSweeper::_invocations = 0;  // No. of invocations left until we are completed with this pass
+nmethod*  NMethodSweeper::_current = NULL;   // Current nmethod
+int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
+
+volatile int NMethodSweeper::_invocations = 0;   // No. of invocations left until we are completed with this pass
+volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
 
 jint      NMethodSweeper::_locked_seen = 0;
 jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
 bool      NMethodSweeper::_rescan = false;
 bool      NMethodSweeper::_do_sweep = false;
-jint      NMethodSweeper::_sweep_started = 0;
 bool      NMethodSweeper::_was_full = false;
 jint      NMethodSweeper::_advise_to_sweep = 0;
 jlong     NMethodSweeper::_last_was_full = 0;
@@ -108,23 +109,14 @@
         // code cache is filling up
         _last_was_full = os::javaTimeMillis();
 
-        if (PrintMethodFlushing) {
-          tty->print_cr("### sweeper: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes, restarting compiler",
-            CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
-        }
-        if (LogCompilation && (xtty != NULL)) {
-          ttyLocker ttyl;
-          xtty->begin_elem("restart_compiler live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
-                           CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
-          xtty->stamp();
-          xtty->end_elem();
-        }
+        log_sweep("restart_compiler");
       }
     }
   }
 }
 
 void NMethodSweeper::possibly_sweep() {
+  assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
   if ((!MethodFlushing) || (!_do_sweep)) return;
 
   if (_invocations > 0) {
@@ -133,32 +125,31 @@
     if (old != 0) {
       return;
     }
-    sweep_code_cache();
+    if (_invocations > 0) {
+      sweep_code_cache();
+      _invocations--;
+    }
+    _sweep_started = 0;
   }
-  _sweep_started = 0;
 }
 
 void NMethodSweeper::sweep_code_cache() {
 #ifdef ASSERT
   jlong sweep_start;
-  if(PrintMethodFlushing) {
+  if (PrintMethodFlushing) {
     sweep_start = os::javaTimeMillis();
   }
 #endif
   if (PrintMethodFlushing && Verbose) {
-    tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
+    tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
   }
 
-  // We want to visit all nmethods after NmethodSweepFraction invocations.
-  // If invocation is 1 we do the rest
-  int todo = CodeCache::nof_blobs();
-  if (_invocations > 1) {
-    todo = (CodeCache::nof_blobs() - _seen) / _invocations;
-  }
-
-  // Compilers may check to sweep more often than stack scans happen,
-  // don't keep trying once it is all scanned
-  _invocations--;
+  // We want to visit all nmethods after NmethodSweepFraction
+  // invocations so divide the remaining number of nmethods by the
+  // remaining number of invocations.  This is only an estimate since
+  // the number of nmethods changes during the sweep so the final
+  // stage must iterate until it there are no more nmethods.
+  int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
 
   assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
   assert(!CodeCache_lock->owned_by_self(), "just checking");
@@ -166,26 +157,25 @@
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 
-    for(int i = 0; i < todo && _current != NULL; i++) {
+    // The last invocation iterates until there are no more nmethods
+    for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
 
-      // Since we will give up the CodeCache_lock, always skip ahead to an nmethod.
-      // Other blobs can be deleted by other threads
-      // Read next before we potentially delete current
-      CodeBlob* next = CodeCache::next_nmethod(_current);
+      // Since we will give up the CodeCache_lock, always skip ahead
+      // to the next nmethod.  Other blobs can be deleted by other
+      // threads but nmethods are only reclaimed by the sweeper.
+      nmethod* next = CodeCache::next_nmethod(_current);
 
       // Now ready to process nmethod and give up CodeCache_lock
       {
         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-        process_nmethod((nmethod *)_current);
+        process_nmethod(_current);
       }
       _seen++;
       _current = next;
     }
+  }
 
-    // Skip forward to the next nmethod (if any). Code blobs other than nmethods
-    // can be freed async to us and make _current invalid while we sleep.
-    _current = CodeCache::next_nmethod(_current);
-  }
+  assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
 
   if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
     // we've completed a scan without making progress but there were
@@ -205,6 +195,10 @@
     tty->print_cr("### sweeper:      sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
   }
 #endif
+
+  if (_invocations == 1) {
+    log_sweep("finished");
+  }
 }
 
 
@@ -227,7 +221,7 @@
   if (nm->is_zombie()) {
     // If it is first time, we see nmethod then we mark it. Otherwise,
     // we reclame it. When we have seen a zombie method twice, we know that
-    // there are no inline caches that referes to it.
+    // there are no inline caches that refer to it.
     if (nm->is_marked_for_reclamation()) {
       assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
       if (PrintMethodFlushing && Verbose) {
@@ -324,16 +318,8 @@
     jlong curr_interval = now - _last_was_full;
     if (curr_interval < max_interval) {
       _rescan = true;
-      if (PrintMethodFlushing) {
-        tty->print_cr("### handle full too often, turning off compiler");
-      }
-      if (LogCompilation && (xtty != NULL)) {
-        ttyLocker ttyl;
-        xtty->begin_elem("disable_compiler flushing_interval='" UINT64_FORMAT "' live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
-                         curr_interval/1000, CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
-        xtty->stamp();
-        xtty->end_elem();
-      }
+      log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
+                           curr_interval/1000);
       return;
     }
   }
@@ -353,17 +339,7 @@
 
   if ((!was_full()) && (is_full)) {
     if (!CodeCache::needs_flushing()) {
-      if (PrintMethodFlushing) {
-        tty->print_cr("### sweeper: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes, restarting compiler",
-          CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
-      }
-      if (LogCompilation && (xtty != NULL)) {
-        ttyLocker ttyl;
-        xtty->begin_elem("restart_compiler live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
-                         CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
-        xtty->stamp();
-        xtty->end_elem();
-      }
+      log_sweep("restart_compiler");
       CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
       return;
     }
@@ -372,17 +348,7 @@
   // Traverse the code cache trying to dump the oldest nmethods
   uint curr_max_comp_id = CompileBroker::get_compilation_id();
   uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
-  if (PrintMethodFlushing && Verbose) {
-    tty->print_cr("### Cleaning code cache: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes",
-        CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
-  }
-  if (LogCompilation && (xtty != NULL)) {
-    ttyLocker ttyl;
-    xtty->begin_elem("start_cleaning_code_cache live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
-                      CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
-    xtty->stamp();
-    xtty->end_elem();
-  }
+  log_sweep("start_cleaning");
 
   nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
   jint disconnected = 0;
@@ -415,13 +381,9 @@
     nm = CodeCache::alive_nmethod(CodeCache::next(nm));
   }
 
-  if (LogCompilation && (xtty != NULL)) {
-    ttyLocker ttyl;
-    xtty->begin_elem("stop_cleaning_code_cache disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "' live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
-                      disconnected, made_not_entrant, CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
-    xtty->stamp();
-    xtty->end_elem();
-  }
+  log_sweep("stop_cleaning",
+                       "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
+                       disconnected, made_not_entrant);
 
   // Shut off compiler. Sweeper will start over with a new stack scan and
   // traversal cycle and turn it back on if it clears enough space.
@@ -439,3 +401,38 @@
   }
 #endif
 }
+
+
+// Print out some state information about the current sweep and the
+// state of the code cache if it's requested.
+void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
+  if (PrintMethodFlushing) {
+    ttyLocker ttyl;
+    tty->print("### sweeper: %s ", msg);
+    if (format != NULL) {
+      va_list ap;
+      va_start(ap, format);
+      tty->vprint(format, ap);
+      va_end(ap);
+    }
+    tty->print_cr(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
+                  " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
+                  CodeCache::nof_blobs(), CodeCache::nof_nmethods(), CodeCache::nof_adapters(), CodeCache::unallocated_capacity());
+  }
+
+  if (LogCompilation && (xtty != NULL)) {
+    ttyLocker ttyl;
+    xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
+    if (format != NULL) {
+      va_list ap;
+      va_start(ap, format);
+      xtty->vprint(format, ap);
+      va_end(ap);
+    }
+    xtty->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
+                " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
+                CodeCache::nof_blobs(), CodeCache::nof_nmethods(), CodeCache::nof_adapters(), CodeCache::unallocated_capacity());
+    xtty->stamp();
+    xtty->end_elem();
+  }
+}
--- a/src/share/vm/runtime/sweeper.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/sweeper.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,14 +29,15 @@
 
 class NMethodSweeper : public AllStatic {
   static long      _traversals;   // Stack traversal count
-  static CodeBlob* _current;      // Current nmethod
+  static nmethod*  _current;      // Current nmethod
   static int       _seen;         // Nof. nmethod we have currently processed in current pass of CodeCache
-  static int       _invocations;  // No. of invocations left until we are completed with this pass
+
+  static volatile int      _invocations;   // No. of invocations left until we are completed with this pass
+  static volatile int      _sweep_started; // Flag to control conc sweeper
 
   static bool      _rescan;          // Indicates that we should do a full rescan of the
                                      // of the code cache looking for work to do.
   static bool      _do_sweep;        // Flag to skip the conc sweep if no stack scan happened
-  static jint      _sweep_started;   // Flag to control conc sweeper
   static int       _locked_seen;     // Number of locked nmethods encountered during the scan
   static int       _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
 
@@ -47,6 +48,9 @@
   static long      _was_full_traversal;   // trav number at last emergency unloading
 
   static void process_nmethod(nmethod *nm);
+
+  static void log_sweep(const char* msg, const char* format = NULL, ...);
+
  public:
   static long traversal_count() { return _traversals; }
 
--- a/src/share/vm/runtime/synchronizer.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/synchronizer.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -747,6 +747,8 @@
 
 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL ;
+ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL ;
+int ObjectSynchronizer::gOmInUseCount = 0;
 static volatile intptr_t ListLock = 0 ;      // protects global monitor free-list cache
 static volatile int MonitorFreeCount  = 0 ;      // # on gFreeList
 static volatile int MonitorPopulation = 0 ;      // # Extant -- in circulation
@@ -826,6 +828,22 @@
     }
   }
 }
+/* Too slow for general assert or debug
+void ObjectSynchronizer::verifyInUse (Thread *Self) {
+   ObjectMonitor* mid;
+   int inusetally = 0;
+   for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
+     inusetally ++;
+   }
+   assert(inusetally == Self->omInUseCount, "inuse count off");
+
+   int freetally = 0;
+   for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
+     freetally ++;
+   }
+   assert(freetally == Self->omFreeCount, "free count off");
+}
+*/
 
 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
     // A large MAXPRIVATE value reduces both list lock contention
@@ -853,6 +871,9 @@
              m->FreeNext = Self->omInUseList;
              Self->omInUseList = m;
              Self->omInUseCount ++;
+             // verifyInUse(Self);
+           } else {
+             m->FreeNext = NULL;
            }
            return m ;
         }
@@ -874,13 +895,12 @@
                 guarantee (take->object() == NULL, "invariant") ;
                 guarantee (!take->is_busy(), "invariant") ;
                 take->Recycle() ;
-                omRelease (Self, take) ;
+                omRelease (Self, take, false) ;
             }
             Thread::muxRelease (&ListLock) ;
             Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
             if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
             TEVENT (omFirst - reprovision) ;
-            continue ;
 
             const int mx = MonitorBound ;
             if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
@@ -961,11 +981,34 @@
 // That is, *not* one-at-a-time.
 
 
-void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m) {
+void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) {
     guarantee (m->object() == NULL, "invariant") ;
-    m->FreeNext = Self->omFreeList ;
-    Self->omFreeList = m ;
-    Self->omFreeCount ++ ;
+
+    // Remove from omInUseList
+    if (MonitorInUseLists && fromPerThreadAlloc) {
+      ObjectMonitor* curmidinuse = NULL;
+      for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; ) {
+       if (m == mid) {
+         // extract from per-thread in-use-list
+         if (mid == Self->omInUseList) {
+           Self->omInUseList = mid->FreeNext;
+         } else if (curmidinuse != NULL) {
+           curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
+         }
+         Self->omInUseCount --;
+         // verifyInUse(Self);
+         break;
+       } else {
+         curmidinuse = mid;
+         mid = mid->FreeNext;
+      }
+    }
+  }
+
+  // FreeNext is used for both onInUseList and omFreeList, so clear old before setting new
+  m->FreeNext = Self->omFreeList ;
+  Self->omFreeList = m ;
+  Self->omFreeCount ++ ;
 }
 
 // Return the monitors of a moribund thread's local free list to
@@ -975,6 +1018,10 @@
 // consecutive STW safepoints.  Relatedly, we might decay
 // omFreeProvision at STW safepoints.
 //
+// Also return the monitors of a moribund thread"s omInUseList to
+// a global gOmInUseList under the global list lock so these
+// will continue to be scanned.
+//
 // We currently call omFlush() from the Thread:: dtor _after the thread
 // has been excised from the thread list and is no longer a mutator.
 // That means that omFlush() can run concurrently with a safepoint and
@@ -987,24 +1034,50 @@
 void ObjectSynchronizer::omFlush (Thread * Self) {
     ObjectMonitor * List = Self->omFreeList ;  // Null-terminated SLL
     Self->omFreeList = NULL ;
-    if (List == NULL) return ;
     ObjectMonitor * Tail = NULL ;
-    ObjectMonitor * s ;
     int Tally = 0;
-    for (s = List ; s != NULL ; s = s->FreeNext) {
-        Tally ++ ;
-        Tail = s ;
-        guarantee (s->object() == NULL, "invariant") ;
-        guarantee (!s->is_busy(), "invariant") ;
-        s->set_owner (NULL) ;   // redundant but good hygiene
-        TEVENT (omFlush - Move one) ;
+    if (List != NULL) {
+      ObjectMonitor * s ;
+      for (s = List ; s != NULL ; s = s->FreeNext) {
+          Tally ++ ;
+          Tail = s ;
+          guarantee (s->object() == NULL, "invariant") ;
+          guarantee (!s->is_busy(), "invariant") ;
+          s->set_owner (NULL) ;   // redundant but good hygiene
+          TEVENT (omFlush - Move one) ;
+      }
+      guarantee (Tail != NULL && List != NULL, "invariant") ;
     }
 
-    guarantee (Tail != NULL && List != NULL, "invariant") ;
+    ObjectMonitor * InUseList = Self->omInUseList;
+    ObjectMonitor * InUseTail = NULL ;
+    int InUseTally = 0;
+    if (InUseList != NULL) {
+      Self->omInUseList = NULL;
+      ObjectMonitor *curom;
+      for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
+        InUseTail = curom;
+        InUseTally++;
+      }
+// TODO debug
+      assert(Self->omInUseCount == InUseTally, "inuse count off");
+      Self->omInUseCount = 0;
+      guarantee (InUseTail != NULL && InUseList != NULL, "invariant");
+    }
+
     Thread::muxAcquire (&ListLock, "omFlush") ;
-    Tail->FreeNext = gFreeList ;
-    gFreeList = List ;
-    MonitorFreeCount += Tally;
+    if (Tail != NULL) {
+      Tail->FreeNext = gFreeList ;
+      gFreeList = List ;
+      MonitorFreeCount += Tally;
+    }
+
+    if (InUseTail != NULL) {
+      InUseTail->FreeNext = gOmInUseList;
+      gOmInUseList = InUseList;
+      gOmInUseCount += InUseTally;
+    }
+
     Thread::muxRelease (&ListLock) ;
     TEVENT (omFlush) ;
 }
@@ -1166,7 +1239,6 @@
           // We do this before the CAS in order to minimize the length of time
           // in which INFLATING appears in the mark.
           m->Recycle();
-          m->FreeNext      = NULL ;
           m->_Responsible  = NULL ;
           m->OwnerIsThread = 0 ;
           m->_recursions   = 0 ;
@@ -1174,7 +1246,7 @@
 
           markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
           if (cmp != mark) {
-             omRelease (Self, m) ;
+             omRelease (Self, m, true) ;
              continue ;       // Interference -- just retry
           }
 
@@ -1262,7 +1334,6 @@
       m->set_object(object);
       m->OwnerIsThread = 1 ;
       m->_recursions   = 0 ;
-      m->FreeNext      = NULL ;
       m->_Responsible  = NULL ;
       m->_SpinDuration = Knob_SpinLimit ;       // consider: keep metastats by type/class
 
@@ -1271,7 +1342,7 @@
           m->set_owner  (NULL) ;
           m->OwnerIsThread = 0 ;
           m->Recycle() ;
-          omRelease (Self, m) ;
+          omRelease (Self, m, true) ;
           m = NULL ;
           continue ;
           // interference - the markword changed - just retry.
@@ -1852,6 +1923,10 @@
 // only scans the per-thread inuse lists. omAlloc() puts all
 // assigned monitors on the per-thread list. deflate_idle_monitors()
 // returns the non-busy monitors to the global free list.
+// When a thread dies, omFlush() adds the list of active monitors for
+// that thread to a global gOmInUseList acquiring the
+// global list lock. deflate_idle_monitors() acquires the global
+// list lock to scan for non-busy monitors to the global free list.
 // An alternative could have used a single global inuse list. The
 // downside would have been the additional cost of acquiring the global list lock
 // for every omAlloc().
@@ -1904,6 +1979,7 @@
      if (*FreeHeadp == NULL) *FreeHeadp = mid;
      if (*FreeTailp != NULL) {
        ObjectMonitor * prevtail = *FreeTailp;
+       assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
        prevtail->FreeNext = mid;
       }
      *FreeTailp = mid;
@@ -1912,6 +1988,39 @@
   return deflated;
 }
 
+// Caller acquires ListLock
+int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
+                                          ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
+  ObjectMonitor* mid;
+  ObjectMonitor* next;
+  ObjectMonitor* curmidinuse = NULL;
+  int deflatedcount = 0;
+
+  for (mid = *listheadp; mid != NULL; ) {
+     oop obj = (oop) mid->object();
+     bool deflated = false;
+     if (obj != NULL) {
+       deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp);
+     }
+     if (deflated) {
+       // extract from per-thread in-use-list
+       if (mid == *listheadp) {
+         *listheadp = mid->FreeNext;
+       } else if (curmidinuse != NULL) {
+         curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
+       }
+       next = mid->FreeNext;
+       mid->FreeNext = NULL;  // This mid is current tail in the FreeHead list
+       mid = next;
+       deflatedcount++;
+     } else {
+       curmidinuse = mid;
+       mid = mid->FreeNext;
+    }
+  }
+  return deflatedcount;
+}
+
 void ObjectSynchronizer::deflate_idle_monitors() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   int nInuse = 0 ;              // currently associated with objects
@@ -1929,36 +2038,25 @@
   Thread::muxAcquire (&ListLock, "scavenge - return") ;
 
   if (MonitorInUseLists) {
-    ObjectMonitor* mid;
-    ObjectMonitor* next;
-    ObjectMonitor* curmidinuse;
+    int inUse = 0;
     for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) {
-      curmidinuse = NULL;
-      for (mid = cur->omInUseList; mid != NULL; ) {
-        oop obj = (oop) mid->object();
-        deflated = false;
-        if (obj != NULL) {
-          deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail);
-        }
-        if (deflated) {
-          // extract from per-thread in-use-list
-          if (mid == cur->omInUseList) {
-            cur->omInUseList = mid->FreeNext;
-          } else if (curmidinuse != NULL) {
-            curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
-          }
-          next = mid->FreeNext;
-          mid->FreeNext = NULL;  // This mid is current tail in the FreeHead list
-          mid = next;
-          cur->omInUseCount--;
-          nScavenged ++ ;
-        } else {
-          curmidinuse = mid;
-          mid = mid->FreeNext;
-          nInuse ++;
-        }
+      nInCirculation+= cur->omInUseCount;
+      int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail);
+      cur->omInUseCount-= deflatedcount;
+      // verifyInUse(cur);
+      nScavenged += deflatedcount;
+      nInuse += cur->omInUseCount;
      }
-   }
+
+   // For moribund threads, scan gOmInUseList
+   if (gOmInUseList) {
+     nInCirculation += gOmInUseCount;
+     int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail);
+     gOmInUseCount-= deflatedcount;
+     nScavenged += deflatedcount;
+     nInuse += gOmInUseCount;
+    }
+
   } else for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
   // Iterate over all extant monitors - Scavenge all idle monitors.
     assert(block->object() == CHAINMARKER, "must be a block header");
--- a/src/share/vm/runtime/synchronizer.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/synchronizer.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -122,8 +122,9 @@
   static void reenter            (Handle obj, intptr_t recursion, TRAPS);
 
   // thread-specific and global objectMonitor free list accessors
+//  static void verifyInUse (Thread * Self) ; too slow for general assert/debug
   static ObjectMonitor * omAlloc (Thread * Self) ;
-  static void omRelease (Thread * Self, ObjectMonitor * m) ;
+  static void omRelease (Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc) ;
   static void omFlush   (Thread * Self) ;
 
   // Inflate light weight monitor to heavy weight monitor
@@ -150,6 +151,9 @@
   // Basically we deflate all monitors that are not busy.
   // An adaptive profile-based deflation policy could be used if needed
   static void deflate_idle_monitors();
+  static int walk_monitor_list(ObjectMonitor** listheadp,
+                               ObjectMonitor** FreeHeadp,
+                               ObjectMonitor** FreeTailp);
   static bool deflate_monitor(ObjectMonitor* mid, oop obj, ObjectMonitor** FreeHeadp,
                               ObjectMonitor** FreeTailp);
   static void oops_do(OopClosure* f);
@@ -163,6 +167,8 @@
   enum { _BLOCKSIZE = 128 };
   static ObjectMonitor* gBlockList;
   static ObjectMonitor * volatile gFreeList;
+  static ObjectMonitor * volatile gOmInUseList; // for moribund thread, so monitors they inflated still get scanned
+  static int gOmInUseCount;
 
  public:
   static void Initialize () ;
--- a/src/share/vm/runtime/thread.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/thread.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -2700,7 +2700,7 @@
   if (in_bytes(size_in_bytes) != 0) {
     _popframe_preserved_args = NEW_C_HEAP_ARRAY(char, in_bytes(size_in_bytes));
     _popframe_preserved_args_size = in_bytes(size_in_bytes);
-    Copy::conjoint_bytes(start, _popframe_preserved_args, _popframe_preserved_args_size);
+    Copy::conjoint_jbytes(start, _popframe_preserved_args, _popframe_preserved_args_size);
   }
 }
 
--- a/src/share/vm/runtime/thread.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/thread.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -270,6 +270,7 @@
   static void interrupt(Thread* thr);
   static bool is_interrupted(Thread* thr, bool clear_interrupted);
 
+  ObjectMonitor** omInUseList_addr()             { return (ObjectMonitor **)&omInUseList; }
   Monitor* SR_lock() const                       { return _SR_lock; }
 
   bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }
--- a/src/share/vm/runtime/vframeArray.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/vframeArray.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -355,9 +355,9 @@
       } else {
         base = iframe()->interpreter_frame_expression_stack();
       }
-      Copy::conjoint_bytes(saved_args,
-                           base,
-                           popframe_preserved_args_size_in_bytes);
+      Copy::conjoint_jbytes(saved_args,
+                            base,
+                            popframe_preserved_args_size_in_bytes);
       thread->popframe_free_preserved_args();
     }
   }
--- a/src/share/vm/runtime/virtualspace.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/virtualspace.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -111,6 +111,35 @@
   return result;
 }
 
+// Helper method.
+static bool failed_to_reserve_as_requested(char* base, char* requested_address,
+                                           const size_t size, bool special)
+{
+  if (base == requested_address || requested_address == NULL)
+    return false; // did not fail
+
+  if (base != NULL) {
+    // Different reserve address may be acceptable in other cases
+    // but for compressed oops heap should be at requested address.
+    assert(UseCompressedOops, "currently requested address used only for compressed oops");
+    if (PrintCompressedOopsMode) {
+      tty->cr();
+      tty->print_cr("Reserved memory at not requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
+    }
+    // OS ignored requested address. Try different address.
+    if (special) {
+      if (!os::release_memory_special(base, size)) {
+        fatal("os::release_memory_special failed");
+      }
+    } else {
+      if (!os::release_memory(base, size)) {
+        fatal("os::release_memory failed");
+      }
+    }
+  }
+  return true;
+}
+
 ReservedSpace::ReservedSpace(const size_t prefix_size,
                              const size_t prefix_align,
                              const size_t suffix_size,
@@ -129,6 +158,10 @@
   assert((suffix_align & prefix_align - 1) == 0,
     "suffix_align not divisible by prefix_align");
 
+  // Assert that if noaccess_prefix is used, it is the same as prefix_align.
+  assert(noaccess_prefix == 0 ||
+         noaccess_prefix == prefix_align, "noaccess prefix wrong");
+
   // Add in noaccess_prefix to prefix_size;
   const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
   const size_t size = adjusted_prefix_size + suffix_size;
@@ -150,15 +183,16 @@
   _noaccess_prefix = 0;
   _executable = false;
 
-  // Assert that if noaccess_prefix is used, it is the same as prefix_align.
-  assert(noaccess_prefix == 0 ||
-         noaccess_prefix == prefix_align, "noaccess prefix wrong");
-
   // Optimistically try to reserve the exact size needed.
   char* addr;
   if (requested_address != 0) {
-    addr = os::attempt_reserve_memory_at(size,
-                                         requested_address-noaccess_prefix);
+    requested_address -= noaccess_prefix; // adjust address
+    assert(requested_address != NULL, "huge noaccess prefix?");
+    addr = os::attempt_reserve_memory_at(size, requested_address);
+    if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
+      // OS ignored requested address. Try different address.
+      addr = NULL;
+    }
   } else {
     addr = os::reserve_memory(size, NULL, prefix_align);
   }
@@ -222,11 +256,20 @@
   bool special = large && !os::can_commit_large_page_memory();
   char* base = NULL;
 
+  if (requested_address != 0) {
+    requested_address -= noaccess_prefix; // adjust requested address
+    assert(requested_address != NULL, "huge noaccess prefix?");
+  }
+
   if (special) {
 
     base = os::reserve_memory_special(size, requested_address, executable);
 
     if (base != NULL) {
+      if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
+        // OS ignored requested address. Try different address.
+        return;
+      }
       // Check alignment constraints
       if (alignment > 0) {
         assert((uintptr_t) base % alignment == 0,
@@ -235,6 +278,13 @@
       _special = true;
     } else {
       // failed; try to reserve regular memory below
+      if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
+                            !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
+        if (PrintCompressedOopsMode) {
+          tty->cr();
+          tty->print_cr("Reserve regular memory without large pages.");
+        }
+      }
     }
   }
 
@@ -248,8 +298,11 @@
     // important.  If available space is not detected, return NULL.
 
     if (requested_address != 0) {
-      base = os::attempt_reserve_memory_at(size,
-                                           requested_address-noaccess_prefix);
+      base = os::attempt_reserve_memory_at(size, requested_address);
+      if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
+        // OS ignored requested address. Try different address.
+        base = NULL;
+      }
     } else {
       base = os::reserve_memory(size, NULL, alignment);
     }
@@ -365,7 +418,12 @@
 }
 
 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
-  // If there is noaccess prefix, return.
+  assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
+                                      (size_t(_base + _size) > OopEncodingHeapMax) &&
+                                      Universe::narrow_oop_use_implicit_null_checks()),
+         "noaccess_prefix should be used only with non zero based compressed oops");
+
+  // If there is no noaccess prefix, return.
   if (_noaccess_prefix == 0) return;
 
   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
@@ -377,6 +435,10 @@
                           _special)) {
     fatal("cannot protect protection page");
   }
+  if (PrintCompressedOopsMode) {
+    tty->cr();
+    tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
+  }
 
   _base += _noaccess_prefix;
   _size -= _noaccess_prefix;
--- a/src/share/vm/runtime/vmStructs.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/runtime/vmStructs.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -614,7 +614,6 @@
   /* NMethods (NOTE: incomplete, but only a little) */                                                                               \
   /**************************************************/                                                                               \
                                                                                                                                      \
-     static_field(nmethod,             _zombie_instruction_size,                      int)                                   \
   nonstatic_field(nmethod,             _method,                                       methodOop)                             \
   nonstatic_field(nmethod,             _entry_bci,                                    int)                                   \
   nonstatic_field(nmethod,             _osr_link,                                     nmethod*)                              \
--- a/src/share/vm/runtime/vtune.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-// Interface to Intel's VTune profiler.
-
-class VTune : AllStatic {
- public:
-   static void create_nmethod(nmethod* nm);      // register newly created nmethod
-   static void delete_nmethod(nmethod* nm);      // unregister nmethod before discarding it
-
-   static void register_stub(const char* name, address start, address end);
-                                                 // register internal VM stub
-   static void start_GC();                       // start/end of GC or scavenge
-   static void end_GC();
-
-   static void start_class_load();               // start/end of class loading
-   static void end_class_load();
-
-   static void exit();                           // VM exit
-};
-
-
-// helper objects
-class VTuneGCMarker : StackObj {
- public:
-   VTuneGCMarker() { VTune::start_GC(); }
-  ~VTuneGCMarker() { VTune::end_GC(); }
-};
-
-class VTuneClassLoadMarker : StackObj {
- public:
-   VTuneClassLoadMarker() { VTune::start_class_load(); }
-  ~VTuneClassLoadMarker() { VTune::end_class_load(); }
-};
--- a/src/share/vm/utilities/constantTag.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/utilities/constantTag.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -28,56 +28,87 @@
 #ifndef PRODUCT
 
 void constantTag::print_on(outputStream* st) const {
+  st->print(internal_name());
+}
+
+#endif // PRODUCT
+
+BasicType constantTag::basic_type() const {
   switch (_tag) {
-    case JVM_CONSTANT_Class :
-      st->print("Class");
-      break;
-    case JVM_CONSTANT_Fieldref :
-      st->print("Field");
-      break;
-    case JVM_CONSTANT_Methodref :
-      st->print("Method");
-      break;
-    case JVM_CONSTANT_InterfaceMethodref :
-      st->print("InterfaceMethod");
-      break;
-    case JVM_CONSTANT_String :
-      st->print("String");
-      break;
     case JVM_CONSTANT_Integer :
-      st->print("Integer");
-      break;
+      return T_INT;
     case JVM_CONSTANT_Float :
-      st->print("Float");
-      break;
+      return T_FLOAT;
     case JVM_CONSTANT_Long :
-      st->print("Long");
-      break;
+      return T_LONG;
     case JVM_CONSTANT_Double :
-      st->print("Double");
-      break;
-    case JVM_CONSTANT_NameAndType :
-      st->print("NameAndType");
-      break;
-    case JVM_CONSTANT_Utf8 :
-      st->print("Utf8");
-      break;
+      return T_DOUBLE;
+
+    case JVM_CONSTANT_Class :
+    case JVM_CONSTANT_String :
     case JVM_CONSTANT_UnresolvedClass :
-      st->print("Unresolved class");
-      break;
+    case JVM_CONSTANT_UnresolvedClassInError :
     case JVM_CONSTANT_ClassIndex :
-      st->print("Unresolved class index");
-      break;
     case JVM_CONSTANT_UnresolvedString :
-      st->print("Unresolved string");
-      break;
     case JVM_CONSTANT_StringIndex :
-      st->print("Unresolved string index");
-      break;
+    case JVM_CONSTANT_MethodHandle :
+    case JVM_CONSTANT_MethodType :
+    case JVM_CONSTANT_Object :
+      return T_OBJECT;
     default:
       ShouldNotReachHere();
-      break;
+      return T_ILLEGAL;
   }
 }
 
-#endif // PRODUCT
+
+
+const char* constantTag::internal_name() const {
+  switch (_tag) {
+    case JVM_CONSTANT_Invalid :
+      return "Invalid index";
+    case JVM_CONSTANT_Class :
+      return "Class";
+    case JVM_CONSTANT_Fieldref :
+      return "Field";
+    case JVM_CONSTANT_Methodref :
+      return "Method";
+    case JVM_CONSTANT_InterfaceMethodref :
+      return "InterfaceMethod";
+    case JVM_CONSTANT_String :
+      return "String";
+    case JVM_CONSTANT_Integer :
+      return "Integer";
+    case JVM_CONSTANT_Float :
+      return "Float";
+    case JVM_CONSTANT_Long :
+      return "Long";
+    case JVM_CONSTANT_Double :
+      return "Double";
+    case JVM_CONSTANT_NameAndType :
+      return "NameAndType";
+    case JVM_CONSTANT_MethodHandle :
+      return "MethodHandle";
+    case JVM_CONSTANT_MethodType :
+      return "MethodType";
+    case JVM_CONSTANT_InvokeDynamic :
+      return "InvokeDynamic";
+    case JVM_CONSTANT_Object :
+      return "Object";
+    case JVM_CONSTANT_Utf8 :
+      return "Utf8";
+    case JVM_CONSTANT_UnresolvedClass :
+      return "Unresolved Class";
+    case JVM_CONSTANT_UnresolvedClassInError :
+      return "Unresolved Class Error";
+    case JVM_CONSTANT_ClassIndex :
+      return "Unresolved Class Index";
+    case JVM_CONSTANT_UnresolvedString :
+      return "Unresolved String";
+    case JVM_CONSTANT_StringIndex :
+      return "Unresolved String Index";
+    default:
+      ShouldNotReachHere();
+      return "Illegal";
+  }
+}
--- a/src/share/vm/utilities/constantTag.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/utilities/constantTag.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -78,13 +78,25 @@
   bool is_field_or_method() const   { return is_field() || is_method() || is_interface_method(); }
   bool is_symbol() const            { return is_utf8(); }
 
+  bool is_method_type() const              { return _tag == JVM_CONSTANT_MethodType; }
+  bool is_method_handle() const            { return _tag == JVM_CONSTANT_MethodHandle; }
+  bool is_invoke_dynamic() const           { return _tag == JVM_CONSTANT_InvokeDynamic; }
+
+  constantTag() {
+    _tag = JVM_CONSTANT_Invalid;
+  }
   constantTag(jbyte tag) {
     assert((tag >= 0 && tag <= JVM_CONSTANT_NameAndType) ||
+           (tag >= JVM_CONSTANT_MethodHandle && tag <= JVM_CONSTANT_InvokeDynamic) ||
            (tag >= JVM_CONSTANT_InternalMin && tag <= JVM_CONSTANT_InternalMax), "Invalid constant tag");
     _tag = tag;
   }
 
   jbyte value()                      { return _tag; }
 
+  BasicType basic_type() const;        // if used with ldc, what kind of value gets pushed?
+
+  const char* internal_name() const;  // for error reporting
+
   void print_on(outputStream* st) const PRODUCT_RETURN;
 };
--- a/src/share/vm/utilities/copy.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/utilities/copy.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -48,7 +48,7 @@
     Copy::conjoint_jshorts_atomic((jshort*) src, (jshort*) dst, size / sizeof(jshort));
   } else {
     // Not aligned, so no need to be atomic.
-    Copy::conjoint_bytes((void*) src, (void*) dst, size);
+    Copy::conjoint_jbytes((void*) src, (void*) dst, size);
   }
 }
 
--- a/src/share/vm/utilities/copy.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/utilities/copy.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -73,6 +73,9 @@
   // whole alignment units.  E.g., if BytesPerLong is 2x word alignment, an odd
   // count may copy an extra word.  In the arrayof case, we are allowed to copy
   // only the number of copy units specified.
+  //
+  // All callees check count for 0.
+  //
 
   // HeapWords
 
@@ -99,7 +102,6 @@
   // Object-aligned words,  conjoint, not atomic on each word
   static void aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
     assert_params_aligned(from, to);
-    assert_non_zero(count);
     pd_aligned_conjoint_words(from, to, count);
   }
 
@@ -107,49 +109,42 @@
   static void aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
     assert_params_aligned(from, to);
     assert_disjoint(from, to, count);
-    assert_non_zero(count);
     pd_aligned_disjoint_words(from, to, count);
   }
 
   // bytes, jshorts, jints, jlongs, oops
 
   // bytes,                 conjoint, not atomic on each byte (not that it matters)
-  static void conjoint_bytes(void* from, void* to, size_t count) {
-    assert_non_zero(count);
+  static void conjoint_jbytes(void* from, void* to, size_t count) {
     pd_conjoint_bytes(from, to, count);
   }
 
   // bytes,                 conjoint, atomic on each byte (not that it matters)
-  static void conjoint_bytes_atomic(void* from, void* to, size_t count) {
-    assert_non_zero(count);
+  static void conjoint_jbytes_atomic(void* from, void* to, size_t count) {
     pd_conjoint_bytes(from, to, count);
   }
 
   // jshorts,               conjoint, atomic on each jshort
   static void conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerShort);
-    assert_non_zero(count);
     pd_conjoint_jshorts_atomic(from, to, count);
   }
 
   // jints,                 conjoint, atomic on each jint
   static void conjoint_jints_atomic(jint* from, jint* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerInt);
-    assert_non_zero(count);
     pd_conjoint_jints_atomic(from, to, count);
   }
 
   // jlongs,                conjoint, atomic on each jlong
   static void conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerLong);
-    assert_non_zero(count);
     pd_conjoint_jlongs_atomic(from, to, count);
   }
 
   // oops,                  conjoint, atomic on each oop
   static void conjoint_oops_atomic(oop* from, oop* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerHeapOop);
-    assert_non_zero(count);
     pd_conjoint_oops_atomic(from, to, count);
   }
 
@@ -157,7 +152,6 @@
   static void conjoint_oops_atomic(narrowOop* from, narrowOop* to, size_t count) {
     assert(sizeof(narrowOop) == sizeof(jint), "this cast is wrong");
     assert_params_ok(from, to, LogBytesPerInt);
-    assert_non_zero(count);
     pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
   }
 
@@ -168,36 +162,31 @@
   static void conjoint_memory_atomic(void* from, void* to, size_t size);
 
   // bytes,                 conjoint array, atomic on each byte (not that it matters)
-  static void arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
-    assert_non_zero(count);
+  static void arrayof_conjoint_jbytes(HeapWord* from, HeapWord* to, size_t count) {
     pd_arrayof_conjoint_bytes(from, to, count);
   }
 
   // jshorts,               conjoint array, atomic on each jshort
   static void arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerShort);
-    assert_non_zero(count);
     pd_arrayof_conjoint_jshorts(from, to, count);
   }
 
   // jints,                 conjoint array, atomic on each jint
   static void arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerInt);
-    assert_non_zero(count);
     pd_arrayof_conjoint_jints(from, to, count);
   }
 
   // jlongs,                conjoint array, atomic on each jlong
   static void arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerLong);
-    assert_non_zero(count);
     pd_arrayof_conjoint_jlongs(from, to, count);
   }
 
   // oops,                  conjoint array, atomic on each oop
   static void arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerHeapOop);
-    assert_non_zero(count);
     pd_arrayof_conjoint_oops(from, to, count);
   }
 
@@ -319,14 +308,6 @@
 #endif
   }
 
-  static void assert_non_zero(size_t count) {
-#ifdef ASSERT
-    if (count == 0) {
-      basic_fatal("count must be non-zero");
-    }
-#endif
-  }
-
   static void assert_byte_count_ok(size_t byte_count, size_t unit_size) {
 #ifdef ASSERT
     if ((size_t)round_to(byte_count, unit_size) != byte_count) {
--- a/src/share/vm/utilities/globalDefinitions.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -345,6 +345,35 @@
   return align_size_up(offset, HeapWordsPerLong);
 }
 
+// The expected size in bytes of a cache line, used to pad data structures.
+#define DEFAULT_CACHE_LINE_SIZE 64
+
+// Bytes needed to pad type to avoid cache-line sharing; alignment should be the
+// expected cache line size (a power of two).  The first addend avoids sharing
+// when the start address is not a multiple of alignment; the second maintains
+// alignment of starting addresses that happen to be a multiple.
+#define PADDING_SIZE(type, alignment)                           \
+  ((alignment) + align_size_up_(sizeof(type), alignment))
+
+// Templates to create a subclass padded to avoid cache line sharing.  These are
+// effective only when applied to derived-most (leaf) classes.
+
+// When no args are passed to the base ctor.
+template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class Padded: public T {
+private:
+  char _pad_buf_[PADDING_SIZE(T, alignment)];
+};
+
+// When either 0 or 1 args may be passed to the base ctor.
+template <class T, typename Arg1T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class Padded01: public T {
+public:
+  Padded01(): T() { }
+  Padded01(Arg1T arg1): T(arg1) { }
+private:
+  char _pad_buf_[PADDING_SIZE(T, alignment)];
+};
 
 //----------------------------------------------------------------------------------------------------
 // Utility macros for compilers
--- a/src/share/vm/utilities/taskqueue.cpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/utilities/taskqueue.cpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,48 @@
 uint ParallelTaskTerminator::_total_peeks = 0;
 #endif
 
+#if TASKQUEUE_STATS
+const char * const TaskQueueStats::_names[last_stat_id] = {
+  "qpush", "qpop", "qpop-s", "qattempt", "qsteal", "opush", "omax"
+};
+
+void TaskQueueStats::print_header(unsigned int line, outputStream* const stream,
+                                  unsigned int width)
+{
+  // Use a width w: 1 <= w <= max_width
+  const unsigned int max_width = 40;
+  const unsigned int w = MAX2(MIN2(width, max_width), 1U);
+
+  if (line == 0) { // spaces equal in width to the header
+    const unsigned int hdr_width = w * last_stat_id + last_stat_id - 1;
+    stream->print("%*s", hdr_width, " ");
+  } else if (line == 1) { // labels
+    stream->print("%*s", w, _names[0]);
+    for (unsigned int i = 1; i < last_stat_id; ++i) {
+      stream->print(" %*s", w, _names[i]);
+    }
+  } else if (line == 2) { // dashed lines
+    char dashes[max_width + 1];
+    memset(dashes, '-', w);
+    dashes[w] = '\0';
+    stream->print("%s", dashes);
+    for (unsigned int i = 1; i < last_stat_id; ++i) {
+      stream->print(" %s", dashes);
+    }
+  }
+}
+
+void TaskQueueStats::print(outputStream* stream, unsigned int width) const
+{
+  #define FMT SIZE_FORMAT_W(*)
+  stream->print(FMT, width, _stats[0]);
+  for (unsigned int i = 1; i < last_stat_id; ++i) {
+    stream->print(" " FMT, width, _stats[i]);
+  }
+  #undef FMT
+}
+#endif // TASKQUEUE_STATS
+
 int TaskQueueSetSuper::randomParkAndMiller(int *seed0) {
   const int a =      16807;
   const int m = 2147483647;
@@ -182,73 +224,3 @@
     _index < objArrayOop(_obj)->length();
 }
 #endif // ASSERT
-
-bool RegionTaskQueueWithOverflow::is_empty() {
-  return (_region_queue.size() == 0) &&
-         (_overflow_stack->length() == 0);
-}
-
-bool RegionTaskQueueWithOverflow::stealable_is_empty() {
-  return _region_queue.size() == 0;
-}
-
-bool RegionTaskQueueWithOverflow::overflow_is_empty() {
-  return _overflow_stack->length() == 0;
-}
-
-void RegionTaskQueueWithOverflow::initialize() {
-  _region_queue.initialize();
-  assert(_overflow_stack == 0, "Creating memory leak");
-  _overflow_stack =
-    new (ResourceObj::C_HEAP) GrowableArray<RegionTask>(10, true);
-}
-
-void RegionTaskQueueWithOverflow::save(RegionTask t) {
-  if (TraceRegionTasksQueuing && Verbose) {
-    gclog_or_tty->print_cr("CTQ: save " PTR_FORMAT, t);
-  }
-  if(!_region_queue.push(t)) {
-    _overflow_stack->push(t);
-  }
-}
-
-// Note that using this method will retrieve all regions
-// that have been saved but that it will always check
-// the overflow stack.  It may be more efficient to
-// check the stealable queue and the overflow stack
-// separately.
-bool RegionTaskQueueWithOverflow::retrieve(RegionTask& region_task) {
-  bool result = retrieve_from_overflow(region_task);
-  if (!result) {
-    result = retrieve_from_stealable_queue(region_task);
-  }
-  if (TraceRegionTasksQueuing && Verbose && result) {
-    gclog_or_tty->print_cr("  CTQ: retrieve " PTR_FORMAT, result);
-  }
-  return result;
-}
-
-bool RegionTaskQueueWithOverflow::retrieve_from_stealable_queue(
-                                   RegionTask& region_task) {
-  bool result = _region_queue.pop_local(region_task);
-  if (TraceRegionTasksQueuing && Verbose) {
-    gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
-  }
-  return result;
-}
-
-bool
-RegionTaskQueueWithOverflow::retrieve_from_overflow(RegionTask& region_task) {
-  bool result;
-  if (!_overflow_stack->is_empty()) {
-    region_task = _overflow_stack->pop();
-    result = true;
-  } else {
-    region_task = (RegionTask) NULL;
-    result = false;
-  }
-  if (TraceRegionTasksQueuing && Verbose) {
-    gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
-  }
-  return result;
-}
--- a/src/share/vm/utilities/taskqueue.hpp	Fri Jul 30 22:43:50 2010 +0100
+++ b/src/share/vm/utilities/taskqueue.hpp	Sat Jul 31 15:10:59 2010 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,72 @@
  *
  */
 
+// Simple TaskQueue stats that are collected by default in debug builds.
+
+#if !defined(TASKQUEUE_STATS) && defined(ASSERT)
+#define TASKQUEUE_STATS 1
+#elif !defined(TASKQUEUE_STATS)
+#define TASKQUEUE_STATS 0
+#endif
+
+#if TASKQUEUE_STATS
+#define TASKQUEUE_STATS_ONLY(code) code
+#else
+#define TASKQUEUE_STATS_ONLY(code)
+#endif // TASKQUEUE_STATS
+
+#if TASKQUEUE_STATS
+class TaskQueueStats {
+public:
+  enum StatId {
+    push,             // number of taskqueue pushes
+    pop,              // number of taskqueue pops
+    pop_slow,         // subset of taskqueue pops that were done slow-path
+    steal_attempt,    // number of taskqueue steal attempts
+    steal,            // number of taskqueue steals
+    overflow,         // number of overflow pushes
+    overflow_max_len, // max length of overflow stack
+    last_stat_id
+  };
+
+public:
+  inline TaskQueueStats()       { reset(); }
+
+  inline void record_push()     { ++_stats[push]; }
+  inline void record_pop()      { ++_stats[pop]; }
+  inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; }
+  inline void record_steal(bool success);
+  inline void record_overflow(size_t new_length);
+
+  inline size_t get(StatId id) const { return _stats[id]; }
+  inline const size_t* get() const   { return _stats; }
+
+  inline void reset();
+
+  static void print_header(unsigned int line, outputStream* const stream = tty,
+                           unsigned int width = 10);
+  void print(outputStream* const stream = tty, unsigned int width = 10) const;
+
+private:
+  size_t                    _stats[last_stat_id];
+  static const char * const _names[last_stat_id];
+};
+
+void TaskQueueStats::record_steal(bool success) {
+  ++_stats[steal_attempt];
+  if (success) ++_stats[steal];
+}
+
+void TaskQueueStats::record_overflow(size_t new_len) {
+  ++_stats[overflow];
+  if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len;
+}
+
+void TaskQueueStats::reset() {
+  memset(_stats, 0, sizeof(_stats));
+}
+#endif // TASKQUEUE_STATS
+
 template <unsigned int N>
 class TaskQueueSuper: public CHeapObj {
 protected:
@@ -109,8 +175,9 @@
 public:
   TaskQueueSuper() : _bottom(0), _age() {}
 
-  // Return true if the TaskQueue contains any tasks.
-  bool peek() { return _bottom != _age.top(); }
+  // Return true if the TaskQueue contains/does not contain any tasks.
+  bool peek()     const { return _bottom != _age.top(); }
+  bool is_empty() const { return size() == 0; }
 
   // Return an estimate of the number of elements in the queue.
   // The "careful" version admits the possibility of pop_local/pop_global
@@ -134,6 +201,8 @@
 
   // Total size of queue.
   static const uint total_size() { return N; }
+
+  TASKQUEUE_STATS_ONLY(TaskQueueStats stats;)
 };
 
 template<class E, unsigned int N = TASKQUEUE_SIZE>
@@ -151,6 +220,7 @@
 public:
   using TaskQueueSuper<N>::max_elems;
   using TaskQueueSuper<N>::size;
+  TASKQUEUE_STATS_ONLY(using TaskQueueSuper<N>::stats;)
 
 private:
   // Slow paths for push, pop_local.  (pop_global has no fast path.)
@@ -165,18 +235,16 @@
 
   void initialize();
 
-  // Push the task "t" on the queue.  Returns "false" iff the queue is
-  // full.
+  // Push the task "t" on the queue.  Returns "false" iff the queue is full.
   inline bool push(E t);
 
-  // If succeeds in claiming a task (from the 'local' end, that is, the
-  // most recently pushed task), returns "true" and sets "t" to that task.
-  // Otherwise, the queue is empty and returns false.
+  // Attempts to claim a task from the "local" end of the queue (the most
+  // recently pushed).  If successful, returns true and sets t to the task;
+  // otherwise, returns false (the queue is empty).
   inline bool pop_local(E& t);
 
-  // If succeeds in claiming a task (from the 'global' end, that is, the
-  // least recently pushed task), returns "true" and sets "t" to that task.
-  // Otherwise, the queue is empty and returns false.
+  // Like pop_local(), but uses the "global" end of the queue (the least
+  // recently pushed).
   bool pop_global(E& t);
 
   // Delete any resource associated with the queue.
@@ -198,7 +266,6 @@
 template<class E, unsigned int N>
 void GenericTaskQueue<E, N>::initialize() {
   _elems = NEW_C_HEAP_ARRAY(E, N);
-  guarantee(_elems != NULL, "Allocation failed.");
 }
 
 template<class E, unsigned int N>
@@ -226,14 +293,14 @@
     // g++ complains if the volatile result of the assignment is unused.
     const_cast<E&>(_elems[localBot] = t);
     OrderAccess::release_store(&_bottom, increment_index(localBot));
+    TASKQUEUE_STATS_ONLY(stats.record_push());
     return true;
   }
   return false;
 }
 
 template<class E, unsigned int N>
-bool GenericTaskQueue<E, N>::
-pop_local_slow(uint localBot, Age oldAge) {
+bool GenericTaskQueue<E, N>::pop_local_slow(uint localBot, Age oldAge) {
   // This queue was observed to contain exactly one element; either this
   // thread will claim it, or a competing "pop_global".  In either case,
   // the queue will be logically empty afterwards.  Create a new Age value
@@ -253,6 +320,7 @@
     if (tempAge == oldAge) {
       // We win.
       assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
+      TASKQUEUE_STATS_ONLY(stats.record_pop_slow());
       return true;
     }
   }
@@ -289,7 +357,90 @@
   FREE_C_HEAP_ARRAY(E, _elems);
 }
 
-// Inherits the typedef of "Task" from above.
+// OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
+// elements that do not fit in the TaskQueue.
+//
+// Three methods from super classes are overridden:
+//
+// initialize() - initialize the super classes and create the overflow stack
+// push() - push onto the task queue or, if that fails, onto the overflow stack
+// is_empty() - return true if both the TaskQueue and overflow stack are empty
+//
+// Note that size() is not overridden--it returns the number of elements in the
+// TaskQueue, and does not include the size of the overflow stack.  This
+// simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
+template<class E, unsigned int N = TASKQUEUE_SIZE>
+class OverflowTaskQueue: public GenericTaskQueue<E, N>
+{
+public:
+  typedef GrowableArray<E>       overflow_t;
+  typedef GenericTaskQueue<E, N> taskqueue_t;
+
+  TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
+
+  OverflowTaskQueue();
+  ~OverflowTaskQueue();
+  void initialize();
+
+  inline overflow_t* overflow_stack() const { return _overflow_stack; }
+
+  // Push task t onto the queue or onto the overflow stack.  Return true.
+  inline bool push(E t);
+
+  // Attempt to pop from the overflow stack; return true if anything was popped.
+  inline bool pop_overflow(E& t);
+
+  inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
+  inline bool overflow_empty()  const { return overflow_stack()->is_empty(); }
+  inline bool is_empty()        const {
+    return taskqueue_empty() && overflow_empty();
+  }
+
+private:
+  overflow_t* _overflow_stack;
+};
+
+template <class E, unsigned int N>
+OverflowTaskQueue<E, N>::OverflowTaskQueue()
+{
+  _overflow_stack = NULL;
+}
+
+template <class E, unsigned int N>
+OverflowTaskQueue<E, N>::~OverflowTaskQueue()
+{
+  if (_overflow_stack != NULL) {
+    delete _overflow_stack;
+    _overflow_stack = NULL;
+  }
+}
+
+template <class E, unsigned int N>
+void OverflowTaskQueue<E, N>::initialize()
+{
+  taskqueue_t::initialize();
+  assert(_overflow_stack == NULL, "memory leak");
+  _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<E>(10, true);
+}
+
+template <class E, unsigned int N>
+bool OverflowTaskQueue<E, N>::push(E t)
+{
+  if (!taskqueue_t::push(t)) {
+    overflow_stack()->push(t);
+    TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->length()));
+  }
+  return true;
+}
+
+template <class E, unsigned int N>
+bool OverflowTaskQueue<E, N>::pop_overflow(E& t)
+{
+  if (overflow_empty()) return false;
+  t = overflow_stack()->pop();
+  return true;
+}
+
 class TaskQueueSetSuper: public CHeapObj {
 protected:
   static int randomParkAndMiller(int* seed0);
@@ -323,11 +474,11 @@
 
   T* queue(uint n);
 
-  // The thread with queue number "queue_num" (and whose random number seed
-  // is at "seed") is trying to steal a task from some other queue.  (It
-  // may try several queues, according to some configuration parameter.)
-  // If some steal succeeds, returns "true" and sets "t" the stolen task,
-  // otherwise returns false.
+  // The thread with queue number "queue_num" (and whose random number seed is
+  // at "seed") is trying to steal a task from some other queue.  (It may try
+  // several queues, according to some configuration parameter.)  If some steal
+  // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
+  // false.
   bool steal(uint queue_num, int* seed, E& t);
 
   bool peek();
@@ -346,9 +497,13 @@
 
 template<class T> bool
 GenericTaskQueueSet<T>::steal(uint queue_num, int* seed, E& t) {
-  for (uint i = 0; i < 2 * _n; i++)
-    if (steal_best_of_2(queue_num, seed, t))
+  for (uint i = 0; i < 2 * _n; i++) {
+    if (steal_best_of_2(queue_num, seed, t)) {
+      TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true));
       return true;
+    }
+  }
+  TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false));
   return false;
 }
 
@@ -496,6 +651,7 @@
     // g++ complains if the volatile result of the assignment is unused.
     const_cast<E&>(_elems[localBot] = t);
     OrderAccess::release_store(&_bottom, increment_index(localBot));
+    TASKQUEUE_STATS_ONLY(stats.record_push());
     return true;
   } else {
     return push_slow(t, dirty_n_elems);
@@ -507,7 +663,7 @@
   uint localBot = _bottom;
   // This value cannot be N-1.  That can only occur as a result of
   // the assignment to bottom in this method.  If it does, this method
-  // resets the size( to 0 before the next call (which is sequential,
+  // resets the size to 0 before the next call (which is sequential,
   // since this is pop_local.)
   uint dirty_n_elems = dirty_size(localBot, _age.top());
   assert(dirty_n_elems != N - 1, "Shouldn't be possible...");
@@ -525,6 +681,7 @@
   idx_t tp = _age.top();    // XXX
   if (size(localBot, tp) > 0) {
     assert(dirty_size(localBot, tp) != N - 1, "sanity");
+    TASKQUEUE_STATS_ONLY(stats.record_pop());
     return true;
   } else {
     // Otherwise, the queue contained exactly one element; we take the slow
@@ -533,8 +690,7 @@
   }
 }
 
-typedef oop Task;
-typedef GenericTaskQueue<Task>            OopTaskQueue;
+typedef GenericTaskQueue<oop>             OopTaskQueue;
 typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet;
 
 #ifdef _MSC_VER
@@ -615,35 +771,8 @@
 #pragma warning(pop)
 #endif
 
-typedef GenericTaskQueue<StarTask>            OopStarTaskQueue;
+typedef OverflowTaskQueue<StarTask>           OopStarTaskQueue;
 typedef GenericTaskQueueSet<OopStarTaskQueue> OopStarTaskQueueSet;
 
-typedef size_t RegionTask;  // index for region
-typedef GenericTaskQueue<RegionTask>         RegionTaskQueue;
-typedef GenericTaskQueueSet<RegionTaskQueue> RegionTaskQueueSet;
-
-class RegionTaskQueueWithOverflow: public CHeapObj {
- protected:
-  RegionTaskQueue              _region_queue;
-  GrowableArray<RegionTask>*   _overflow_stack;
-
- public:
-  RegionTaskQueueWithOverflow() : _overflow_stack(NULL) {}
-  // Initialize both stealable queue and overflow
-  void initialize();
-  // Save first to stealable queue and then to overflow
-  void save(RegionTask t);
-  // Retrieve first from overflow and then from stealable queue
-  bool retrieve(RegionTask& region_index);
-  // Retrieve from stealable queue
-  bool retrieve_from_stealable_queue(RegionTask& region_index);
-  // Retrieve from overflow
-  bool retrieve_from_overflow(RegionTask& region_index);
-  bool is_empty();
-  bool stealable_is_empty();
-  bool overflow_is_empty();
-  uint stealable_size() { return _region_queue.size(); }
-  RegionTaskQueue* task_queue() { return &_region_queue; }
-};
-
-#define USE_RegionTaskQueueWithOverflow
+typedef OverflowTaskQueue<size_t>             RegionTaskQueue;
+typedef GenericTaskQueueSet<RegionTaskQueue>  RegionTaskQueueSet;