changeset 989:148e5441d916

6863023: need non-perm oops in code cache for JSR 292 Summary: Make a special root-list for those few nmethods which might contain non-perm oops. Reviewed-by: twisti, kvn, never, jmasa, ysr
author jrose
date Tue, 15 Sep 2009 21:53:47 -0700
parents 00977607da34
children be094e0c089a
files agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp src/cpu/x86/vm/c1_LIRAssembler_x86.cpp src/cpu/x86/vm/c1_LIRGenerator_x86.cpp src/cpu/x86/vm/x86_32.ad src/cpu/x86/vm/x86_64.ad src/share/vm/c1/c1_GraphBuilder.cpp src/share/vm/c1/c1_InstructionPrinter.cpp src/share/vm/c1/c1_LIRGenerator.cpp src/share/vm/c1/c1_ValueType.cpp src/share/vm/ci/ciEnv.cpp src/share/vm/ci/ciEnv.hpp src/share/vm/ci/ciObject.cpp src/share/vm/ci/ciObject.hpp src/share/vm/ci/ciObjectFactory.cpp src/share/vm/classfile/systemDictionary.cpp src/share/vm/code/codeBlob.hpp src/share/vm/code/codeCache.cpp src/share/vm/code/codeCache.hpp src/share/vm/code/debugInfoRec.cpp src/share/vm/code/dependencies.cpp src/share/vm/code/nmethod.cpp src/share/vm/code/nmethod.hpp src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp src/share/vm/gc_implementation/g1/concurrentMark.cpp src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp src/share/vm/gc_implementation/g1/g1MarkSweep.cpp src/share/vm/gc_implementation/includeDB_gc_parallelScavenge src/share/vm/gc_implementation/parNew/parNewGeneration.cpp src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp src/share/vm/gc_implementation/shared/markSweep.cpp src/share/vm/gc_implementation/shared/markSweep.hpp src/share/vm/gc_interface/collectedHeap.hpp src/share/vm/memory/defNewGeneration.cpp src/share/vm/memory/genCollectedHeap.cpp src/share/vm/memory/genCollectedHeap.hpp src/share/vm/memory/genMarkSweep.cpp src/share/vm/memory/iterator.cpp src/share/vm/memory/iterator.hpp src/share/vm/memory/sharedHeap.cpp src/share/vm/memory/sharedHeap.hpp src/share/vm/oops/instanceKlass.cpp src/share/vm/oops/oop.hpp src/share/vm/oops/oop.inline2.hpp src/share/vm/opto/output.cpp src/share/vm/opto/parse.hpp src/share/vm/opto/parse2.cpp src/share/vm/opto/parse3.cpp src/share/vm/opto/type.cpp src/share/vm/opto/type.hpp src/share/vm/prims/jvmtiTagMap.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/frame.cpp src/share/vm/runtime/frame.hpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/sweeper.cpp src/share/vm/runtime/thread.cpp src/share/vm/runtime/thread.hpp src/share/vm/runtime/vmStructs.cpp src/share/vm/runtime/vmThread.cpp src/share/vm/runtime/vmThread.hpp src/share/vm/utilities/debug.cpp
diffstat 74 files changed, 979 insertions(+), 279 deletions(-) [+]
line wrap: on
line diff
--- a/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Tue Sep 15 11:09:34 2009 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Tue Sep 15 21:53:47 2009 -0700
@@ -33,6 +33,7 @@
 
 public class CodeCache {
   private static AddressField       heapField;
+  private static AddressField       scavengeRootNMethodsField;
   private static VirtualConstructor virtualConstructor;
 
   private CodeHeap heap;
@@ -49,6 +50,7 @@
     Type type = db.lookupType("CodeCache");
 
     heapField = type.getAddressField("_heap");
+    scavengeRootNMethodsField = type.getAddressField("_scavenge_root_nmethods");
 
     virtualConstructor = new VirtualConstructor(db);
     // Add mappings for all possible CodeBlob subclasses
@@ -67,6 +69,10 @@
     heap = (CodeHeap) VMObjectFactory.newObject(CodeHeap.class, heapField.getValue());
   }
 
+  public NMethod scavengeRootMethods() {
+    return (NMethod) VMObjectFactory.newObject(NMethod.class, scavengeRootNMethodsField.getValue());
+  }
+
   public boolean contains(Address p) {
     return getHeap().contains(p);
   }
--- a/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Tue Sep 15 11:09:34 2009 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Tue Sep 15 21:53:47 2009 -0700
@@ -40,7 +40,10 @@
   /** != InvocationEntryBci if this nmethod is an on-stack replacement method */
   private static CIntegerField entryBCIField;
   /** To support simple linked-list chaining of nmethods */
-  private static AddressField  linkField;
+  private static AddressField  osrLinkField;
+  private static AddressField  scavengeRootLinkField;
+  private static CIntegerField scavengeRootStateField;
+
   /** Offsets for different nmethod parts */
   private static CIntegerField exceptionOffsetField;
   private static CIntegerField deoptOffsetField;
@@ -87,7 +90,10 @@
     zombieInstructionSizeField  = type.getCIntegerField("_zombie_instruction_size");
     methodField                 = type.getOopField("_method");
     entryBCIField               = type.getCIntegerField("_entry_bci");
-    linkField                   = type.getAddressField("_link");
+    osrLinkField                = type.getAddressField("_osr_link");
+    scavengeRootLinkField       = type.getAddressField("_scavenge_root_link");
+    scavengeRootStateField      = type.getCIntegerField("_scavenge_root_state");
+
     exceptionOffsetField        = type.getCIntegerField("_exception_offset");
     deoptOffsetField            = type.getCIntegerField("_deoptimize_offset");
     origPCOffsetField           = type.getCIntegerField("_orig_pc_offset");
@@ -219,10 +225,19 @@
     return getEntryBCI();
   }
 
-  public NMethod getLink() {
-    return (NMethod) VMObjectFactory.newObject(NMethod.class, linkField.getValue(addr));
+  public NMethod getOSRLink() {
+    return (NMethod) VMObjectFactory.newObject(NMethod.class, osrLinkField.getValue(addr));
   }
 
+  public NMethod getScavengeRootLink() {
+    return (NMethod) VMObjectFactory.newObject(NMethod.class, scavengeRootLinkField.getValue(addr));
+  }
+
+  public int getScavengeRootState() {
+    return (int) scavengeRootStateField.getValue(addr);
+  }
+
+
   /** Tells whether frames described by this nmethod can be
       deoptimized. Note: native wrappers cannot be deoptimized. */
   public boolean canBeDeoptimized() { return isJavaMethod(); }
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -2171,7 +2171,7 @@
     // subtype which we can't check or src is the same array as dst
     // but not necessarily exactly of type default_type.
     Label known_ok, halt;
-    jobject2reg(op->expected_type()->encoding(), tmp);
+    jobject2reg(op->expected_type()->constant_encoding(), tmp);
     __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
     if (basic_type != T_OBJECT) {
       __ cmp(tmp, tmp2);
@@ -2429,7 +2429,7 @@
       assert(data->is_BitData(), "need BitData for checkcast");
       Register mdo      = k_RInfo;
       Register data_val = Rtmp1;
-      jobject2reg(md->encoding(), mdo);
+      jobject2reg(md->constant_encoding(), mdo);
 
       int mdo_offset_bias = 0;
       if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
@@ -2452,7 +2452,7 @@
     // patching may screw with our temporaries on sparc,
     // so let's do it before loading the class
     if (k->is_loaded()) {
-      jobject2reg(k->encoding(), k_RInfo);
+      jobject2reg(k->constant_encoding(), k_RInfo);
     } else {
       jobject2reg_with_patching(k_RInfo, op->info_for_patch());
     }
@@ -2513,7 +2513,7 @@
     // patching may screw with our temporaries on sparc,
     // so let's do it before loading the class
     if (k->is_loaded()) {
-      jobject2reg(k->encoding(), k_RInfo);
+      jobject2reg(k->constant_encoding(), k_RInfo);
     } else {
       jobject2reg_with_patching(k_RInfo, op->info_for_patch());
     }
@@ -2717,7 +2717,7 @@
   assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
   Register mdo  = op->mdo()->as_register();
   Register tmp1 = op->tmp1()->as_register();
-  jobject2reg(md->encoding(), mdo);
+  jobject2reg(md->constant_encoding(), mdo);
   int mdo_offset_bias = 0;
   if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
                             data->size_in_bytes())) {
@@ -2774,7 +2774,7 @@
         if (receiver == NULL) {
           Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
                             mdo_offset_bias);
-          jobject2reg(known_klass->encoding(), tmp1);
+          jobject2reg(known_klass->constant_encoding(), tmp1);
           __ st_ptr(tmp1, recv_addr);
           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
                             mdo_offset_bias);
--- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -896,7 +896,7 @@
   LIR_Opr len = length.result();
   BasicType elem_type = x->elt_type();
 
-  __ oop2reg(ciTypeArrayKlass::make(elem_type)->encoding(), klass_reg);
+  __ oop2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
 
   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -1638,7 +1638,7 @@
       jobject2reg_with_patching(k_RInfo, op->info_for_patch());
     } else {
 #ifdef _LP64
-      __ movoop(k_RInfo, k->encoding());
+      __ movoop(k_RInfo, k->constant_encoding());
 #else
       k_RInfo = noreg;
 #endif // _LP64
@@ -1661,7 +1661,7 @@
       assert(data != NULL,       "need data for checkcast");
       assert(data->is_BitData(), "need BitData for checkcast");
       Register mdo  = klass_RInfo;
-      __ movoop(mdo, md->encoding());
+      __ movoop(mdo, md->constant_encoding());
       Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
       int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
       __ orl(data_addr, header_bits);
@@ -1679,7 +1679,7 @@
 #ifdef _LP64
         __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
 #else
-        __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->encoding());
+        __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
 #endif // _LP64
       } else {
         __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
@@ -1696,7 +1696,7 @@
 #ifdef _LP64
         __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
 #else
-        __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->encoding());
+        __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
 #endif // _LP64
         if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
           __ jcc(Assembler::notEqual, *stub->entry());
@@ -1707,7 +1707,7 @@
 #ifdef _LP64
           __ cmpptr(klass_RInfo, k_RInfo);
 #else
-          __ cmpoop(klass_RInfo, k->encoding());
+          __ cmpoop(klass_RInfo, k->constant_encoding());
 #endif // _LP64
           __ jcc(Assembler::equal, done);
 
@@ -1715,7 +1715,7 @@
 #ifdef _LP64
           __ push(k_RInfo);
 #else
-          __ pushoop(k->encoding());
+          __ pushoop(k->constant_encoding());
 #endif // _LP64
           __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
           __ pop(klass_RInfo);
@@ -1763,7 +1763,7 @@
     if (!k->is_loaded()) {
       jobject2reg_with_patching(k_RInfo, op->info_for_patch());
     } else {
-      LP64_ONLY(__ movoop(k_RInfo, k->encoding()));
+      LP64_ONLY(__ movoop(k_RInfo, k->constant_encoding()));
     }
     assert(obj != k_RInfo, "must be different");
 
@@ -1774,7 +1774,7 @@
       // get object class
       // not a safepoint as obj null check happens earlier
       if (LP64_ONLY(false &&) k->is_loaded()) {
-        NOT_LP64(__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->encoding()));
+        NOT_LP64(__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()));
         k_RInfo = noreg;
       } else {
         __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
@@ -1791,14 +1791,14 @@
 #ifndef _LP64
       if (k->is_loaded()) {
         // See if we get an immediate positive hit
-        __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->encoding());
+        __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
         __ jcc(Assembler::equal, one);
         if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() == k->super_check_offset()) {
           // check for self
-          __ cmpoop(klass_RInfo, k->encoding());
+          __ cmpoop(klass_RInfo, k->constant_encoding());
           __ jcc(Assembler::equal, one);
           __ push(klass_RInfo);
-          __ pushoop(k->encoding());
+          __ pushoop(k->constant_encoding());
           __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
           __ pop(klass_RInfo);
           __ pop(dst);
@@ -3112,7 +3112,7 @@
     // subtype which we can't check or src is the same array as dst
     // but not necessarily exactly of type default_type.
     Label known_ok, halt;
-    __ movoop(tmp, default_type->encoding());
+    __ movoop(tmp, default_type->constant_encoding());
     if (basic_type != T_OBJECT) {
       __ cmpptr(tmp, dst_klass_addr);
       __ jcc(Assembler::notEqual, halt);
@@ -3200,7 +3200,7 @@
   assert(data->is_CounterData(), "need CounterData for calls");
   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
   Register mdo  = op->mdo()->as_register();
-  __ movoop(mdo, md->encoding());
+  __ movoop(mdo, md->constant_encoding());
   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
   __ addl(counter_addr, DataLayout::counter_increment);
   Bytecodes::Code bc = method->java_code_at_bci(bci);
@@ -3240,7 +3240,7 @@
         ciKlass* receiver = vc_data->receiver(i);
         if (receiver == NULL) {
           Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
-          __ movoop(recv_addr, known_klass->encoding());
+          __ movoop(recv_addr, known_klass->constant_encoding());
           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
           __ addl(data_addr, DataLayout::counter_increment);
           return;
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -994,7 +994,7 @@
   LIR_Opr len = length.result();
   BasicType elem_type = x->elt_type();
 
-  __ oop2reg(ciTypeArrayKlass::make(elem_type)->encoding(), klass_reg);
+  __ oop2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
 
   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
--- a/src/cpu/x86/vm/x86_32.ad	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/cpu/x86/vm/x86_32.ad	Tue Sep 15 21:53:47 2009 -0700
@@ -379,7 +379,7 @@
         int format) {
 #ifdef ASSERT
   if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (int)Universe::non_oop_word()) {
-    assert(oop(d32)->is_oop() && oop(d32)->is_perm(), "cannot embed non-perm oops in code");
+    assert(oop(d32)->is_oop() && (ScavengeRootsInCode || !oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
   }
 #endif
   cbuf.relocate(cbuf.inst_mark(), rspec, format);
--- a/src/cpu/x86/vm/x86_64.ad	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/cpu/x86/vm/x86_64.ad	Tue Sep 15 21:53:47 2009 -0700
@@ -683,7 +683,7 @@
 #ifdef ASSERT
   if (rspec.reloc()->type() == relocInfo::oop_type &&
       d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
-    assert(oop((intptr_t)d32)->is_oop() && oop((intptr_t)d32)->is_perm(), "cannot embed non-perm oops in code");
+    assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
   }
 #endif
   cbuf.relocate(cbuf.inst_mark(), rspec, format);
@@ -721,8 +721,8 @@
 #ifdef ASSERT
   if (rspec.reloc()->type() == relocInfo::oop_type &&
       d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
-    assert(oop(d64)->is_oop() && oop(d64)->is_perm(),
-           "cannot embed non-perm oops in code");
+    assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()),
+           "cannot embed scavengable oops in code");
   }
 #endif
   cbuf.relocate(cbuf.inst_mark(), rspec, format);
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -1442,7 +1442,7 @@
         switch (field_type) {
         case T_ARRAY:
         case T_OBJECT:
-          if (field_val.as_object()->has_encoding()) {
+          if (field_val.as_object()->should_be_constant()) {
             constant =  new Constant(as_ValueType(field_val));
           }
           break;
--- a/src/share/vm/c1/c1_InstructionPrinter.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/c1/c1_InstructionPrinter.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -133,12 +133,12 @@
       ciMethod* m = (ciMethod*)value;
       output()->print("<method %s.%s>", m->holder()->name()->as_utf8(), m->name()->as_utf8());
     } else {
-      output()->print("<object 0x%x>", value->encoding());
+      output()->print("<object 0x%x>", value->constant_encoding());
     }
   } else if (type->as_InstanceConstant() != NULL) {
-    output()->print("<instance 0x%x>", type->as_InstanceConstant()->value()->encoding());
+    output()->print("<instance 0x%x>", type->as_InstanceConstant()->value()->constant_encoding());
   } else if (type->as_ArrayConstant() != NULL) {
-    output()->print("<array 0x%x>", type->as_ArrayConstant()->value()->encoding());
+    output()->print("<array 0x%x>", type->as_ArrayConstant()->value()->constant_encoding());
   } else if (type->as_ClassConstant() != NULL) {
     ciInstanceKlass* klass = type->as_ClassConstant()->value();
     if (!klass->is_loaded()) {
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -440,7 +440,7 @@
     __ oop2reg_patch(NULL, r, info);
   } else {
     // no patching needed
-    __ oop2reg(obj->encoding(), r);
+    __ oop2reg(obj->constant_encoding(), r);
   }
 }
 
@@ -831,7 +831,7 @@
     int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
     int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
     LIR_Opr md_reg = new_register(T_OBJECT);
-    __ move(LIR_OprFact::oopConst(md->encoding()), md_reg);
+    __ move(LIR_OprFact::oopConst(md->constant_encoding()), md_reg);
     LIR_Opr data_offset_reg = new_register(T_INT);
     __ cmove(lir_cond(cond),
              LIR_OprFact::intConst(taken_count_offset),
@@ -1071,7 +1071,7 @@
     LIR_OprList* args = new LIR_OprList();
     args->append(getThreadPointer());
     LIR_Opr meth = new_register(T_OBJECT);
-    __ oop2reg(method()->encoding(), meth);
+    __ oop2reg(method()->constant_encoding(), meth);
     args->append(meth);
     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
   }
@@ -1784,7 +1784,7 @@
     LIR_OprList* args = new LIR_OprList();
     args->append(getThreadPointer());
     LIR_Opr meth = new_register(T_OBJECT);
-    __ oop2reg(method()->encoding(), meth);
+    __ oop2reg(method()->constant_encoding(), meth);
     args->append(meth);
     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
   }
@@ -2207,7 +2207,7 @@
     LIR_OprList* args = new LIR_OprList();
     args->append(getThreadPointer());
     LIR_Opr meth = new_register(T_OBJECT);
-    __ oop2reg(method()->encoding(), meth);
+    __ oop2reg(method()->constant_encoding(), meth);
     args->append(meth);
     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
   }
@@ -2216,7 +2216,7 @@
     LIR_Opr obj;
     if (method()->is_static()) {
       obj = new_register(T_OBJECT);
-      __ oop2reg(method()->holder()->java_mirror()->encoding(), obj);
+      __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
     } else {
       Local* receiver = x->state()->local_at(0)->as_Local();
       assert(receiver != NULL, "must already exist");
@@ -2660,7 +2660,7 @@
     }
 
     LIR_Opr meth = new_register(T_OBJECT);
-    __ oop2reg(method()->encoding(), meth);
+    __ oop2reg(method()->constant_encoding(), meth);
     LIR_Opr result = increment_and_return_counter(meth, offset, InvocationCounter::count_increment);
     __ cmp(lir_cond_aboveEqual, result, LIR_OprFact::intConst(limit));
     CodeStub* overflow = new CounterOverflowStub(info, info->bci());
--- a/src/share/vm/c1/c1_ValueType.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/c1/c1_ValueType.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -86,7 +86,7 @@
 
 jobject ObjectType::encoding() const {
   assert(is_constant(), "must be");
-  return constant_value()->encoding();
+  return constant_value()->constant_encoding();
 }
 
 bool ObjectType::is_loaded() const {
--- a/src/share/vm/ci/ciEnv.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/ci/ciEnv.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -257,7 +257,7 @@
 
 // ------------------------------------------------------------------
 // ciEnv::make_array
-ciArray* ciEnv::make_array(GrowableArray<ciObject*>* objects) {
+ciArray* ciEnv::make_system_array(GrowableArray<ciObject*>* objects) {
   VM_ENTRY_MARK;
   int length = objects->length();
   objArrayOop a = oopFactory::new_system_objArray(length, THREAD);
--- a/src/share/vm/ci/ciEnv.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/ci/ciEnv.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -339,8 +339,8 @@
   // but consider adding to vmSymbols.hpp instead.
 
   // Use this to make a holder for non-perm compile time constants.
-  // The resulting array is guaranteed to satisfy "has_encoding".
-  ciArray*  make_array(GrowableArray<ciObject*>* objects);
+  // The resulting array is guaranteed to satisfy "can_be_constant".
+  ciArray*  make_system_array(GrowableArray<ciObject*>* objects);
 
   // converts the ciKlass* representing the holder of a method into a
   // ciInstanceKlass*.  This is needed since the holder of a method in
--- a/src/share/vm/ci/ciObject.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/ci/ciObject.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -55,6 +55,7 @@
   }
   _klass = NULL;
   _ident = 0;
+  init_flags_from(o);
 }
 
 // ------------------------------------------------------------------
@@ -69,6 +70,7 @@
   }
   _klass = NULL;
   _ident = 0;
+  init_flags_from(h());
 }
 
 // ------------------------------------------------------------------
@@ -158,7 +160,7 @@
 }
 
 // ------------------------------------------------------------------
-// ciObject::encoding
+// ciObject::constant_encoding
 //
 // The address which the compiler should embed into the
 // generated code to represent this oop.  This address
@@ -172,16 +174,24 @@
 //
 // This method should be changed to return an generified address
 // to discourage use of the JNI handle.
-jobject ciObject::encoding() {
+jobject ciObject::constant_encoding() {
   assert(is_null_object() || handle() != NULL, "cannot embed null pointer");
-  assert(has_encoding(), "oop must be NULL or perm");
+  assert(can_be_constant(), "oop must be NULL or perm");
   return handle();
 }
 
 // ------------------------------------------------------------------
-// ciObject::has_encoding
-bool ciObject::has_encoding() {
-  return handle() == NULL || is_perm();
+// ciObject::can_be_constant
+bool ciObject::can_be_constant() {
+  if (ScavengeRootsInCode >= 1)  return true;  // now everybody can encode as a constant
+  return handle() == NULL || !is_scavengable();
+}
+
+// ------------------------------------------------------------------
+// ciObject::should_be_constant()
+bool ciObject::should_be_constant() {
+  if (ScavengeRootsInCode >= 2)  return true;  // force everybody to be a constant
+  return handle() == NULL || !is_scavengable();
 }
 
 
@@ -195,8 +205,9 @@
 void ciObject::print(outputStream* st) {
   st->print("<%s", type_string());
   GUARDED_VM_ENTRY(print_impl(st);)
-  st->print(" ident=%d %s address=0x%x>", ident(),
+  st->print(" ident=%d %s%s address=0x%x>", ident(),
         is_perm() ? "PERM" : "",
+        is_scavengable() ? "SCAVENGABLE" : "",
         (address)this);
 }
 
--- a/src/share/vm/ci/ciObject.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/ci/ciObject.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -51,9 +51,10 @@
   ciKlass* _klass;
   uint     _ident;
 
-  enum { FLAG_BITS   = 1};
+  enum { FLAG_BITS   = 2 };
   enum {
-         PERM_FLAG    = 1
+         PERM_FLAG        = 1,
+         SCAVENGABLE_FLAG = 2
        };
 protected:
   ciObject();
@@ -68,8 +69,15 @@
     return JNIHandles::resolve_non_null(_handle);
   }
 
-  void set_perm() {
-    _ident |=  PERM_FLAG;
+  void init_flags_from(oop x) {
+    int flags = 0;
+    if (x != NULL) {
+      if (x->is_perm())
+        flags |= PERM_FLAG;
+      if (x->is_scavengable())
+        flags |= SCAVENGABLE_FLAG;
+    }
+    _ident |= flags;
   }
 
   // Virtual behavior of the print() method.
@@ -91,17 +99,27 @@
   // A hash value for the convenience of compilers.
   int hash();
 
-  // Tells if this oop has an encoding.  (I.e., is it null or perm?)
+  // Tells if this oop has an encoding as a constant.
+  // True if is_scavengable is false.
+  // Also true if ScavengeRootsInCode is non-zero.
   // If it does not have an encoding, the compiler is responsible for
   // making other arrangements for dealing with the oop.
-  // See ciEnv::make_perm_array
-  bool has_encoding();
+  // See ciEnv::make_array
+  bool can_be_constant();
+
+  // Tells if this oop should be made a constant.
+  // True if is_scavengable is false or ScavengeRootsInCode > 1.
+  bool should_be_constant();
 
   // Is this object guaranteed to be in the permanent part of the heap?
   // If so, CollectedHeap::can_elide_permanent_oop_store_barriers is relevant.
   // If the answer is false, no guarantees are made.
   bool is_perm() { return (_ident & PERM_FLAG) != 0; }
 
+  // Might this object possibly move during a scavenge operation?
+  // If the answer is true and ScavengeRootsInCode==0, the oop cannot be embedded in code.
+  bool is_scavengable() { return (_ident & SCAVENGABLE_FLAG) != 0; }
+
   // The address which the compiler should embed into the
   // generated code to represent this oop.  This address
   // is not the true address of the oop -- it will get patched
@@ -109,7 +127,7 @@
   //
   // Usage note: no address arithmetic allowed.  Oop must
   // be registered with the oopRecorder.
-  jobject encoding();
+  jobject constant_encoding();
 
   // What kind of ciObject is this?
   virtual bool is_null_object() const       { return false; }
--- a/src/share/vm/ci/ciObjectFactory.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/ci/ciObjectFactory.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -261,12 +261,11 @@
     ciObject* new_object = create_new_object(keyHandle());
     assert(keyHandle() == new_object->get_oop(), "must be properly recorded");
     init_ident_of(new_object);
-    if (!keyHandle->is_perm()) {
+    if (!new_object->is_perm()) {
       // Not a perm-space object.
       insert_non_perm(bucket, keyHandle(), new_object);
       return new_object;
     }
-    new_object->set_perm();
     if (len != _ci_objects->length()) {
       // creating the new object has recursively entered new objects
       // into the table.  We need to recompute our index.
--- a/src/share/vm/classfile/systemDictionary.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/classfile/systemDictionary.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -2414,6 +2414,8 @@
                          vmSymbols::makeSite_name(), vmSymbols::makeSite_signature(),
                          &args, CHECK_(empty));
   oop call_site_oop = (oop) result.get_jobject();
+  assert(call_site_oop->is_oop()
+         /*&& sun_dyn_CallSiteImpl::is_instance(call_site_oop)*/, "must be sane");
   sun_dyn_CallSiteImpl::set_vmmethod(call_site_oop, mh_invdyn());
   if (TraceMethodHandles) {
     tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop);
@@ -2450,6 +2452,8 @@
   oop boot_method_oop = (oop) result.get_jobject();
 
   if (boot_method_oop != NULL) {
+    assert(boot_method_oop->is_oop()
+           && java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
     // probably no race conditions, but let's be careful:
     if (Atomic::cmpxchg_ptr(boot_method_oop, ik->adr_bootstrap_method(), NULL) == NULL)
       ik->set_bootstrap_method(boot_method_oop);
--- a/src/share/vm/code/codeBlob.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/code/codeBlob.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -175,6 +175,8 @@
                             OopClosure* keep_alive,
                             bool unloading_occurred);
   virtual void oops_do(OopClosure* f) = 0;
+  // (All CodeBlob subtypes other than NMethod currently have
+  // an empty oops_do() method.
 
   // OopMap for frame
   OopMapSet* oop_maps() const                    { return _oop_maps; }
--- a/src/share/vm/code/codeCache.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/code/codeCache.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -95,6 +95,7 @@
 int CodeCache::_number_of_blobs = 0;
 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 bool CodeCache::_needs_cache_clean = false;
+nmethod* CodeCache::_scavenge_root_nmethods = NULL;
 
 
 CodeBlob* CodeCache::first() {
@@ -148,10 +149,7 @@
     }
   }
   verify_if_often();
-  if (PrintCodeCache2) {        // Need to add a new flag
-      ResourceMark rm;
-      tty->print_cr("CodeCache allocation:  addr: " INTPTR_FORMAT ", size: 0x%x\n", cb, size);
-  }
+  print_trace("allocation", cb, size);
   return cb;
 }
 
@@ -159,10 +157,7 @@
   assert_locked_or_safepoint(CodeCache_lock);
   verify_if_often();
 
-  if (PrintCodeCache2) {        // Need to add a new flag
-      ResourceMark rm;
-      tty->print_cr("CodeCache free:  addr: " INTPTR_FORMAT ", size: 0x%x\n", cb, cb->size());
-  }
+  print_trace("free", cb);
   if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
     _number_of_nmethods_with_dependencies--;
   }
@@ -260,14 +255,148 @@
   }
 }
 
-void CodeCache::oops_do(OopClosure* f) {
+void CodeCache::blobs_do(CodeBlobClosure* f) {
   assert_locked_or_safepoint(CodeCache_lock);
   FOR_ALL_ALIVE_BLOBS(cb) {
-    cb->oops_do(f);
+    f->do_code_blob(cb);
+
+#ifdef ASSERT
+    if (cb->is_nmethod())
+      ((nmethod*)cb)->verify_scavenge_root_oops();
+#endif //ASSERT
   }
 }
 
+// Walk the list of methods which might contain non-perm oops.
+void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  debug_only(mark_scavenge_root_nmethods());
+
+  for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
+    debug_only(cur->clear_scavenge_root_marked());
+    assert(cur->scavenge_root_not_marked(), "");
+    assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
+
+    bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
+#ifndef PRODUCT
+    if (TraceScavenge) {
+      cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
+    }
+#endif //PRODUCT
+    if (is_live)
+      // Perform cur->oops_do(f), maybe just once per nmethod.
+      f->do_code_blob(cur);
+  }
+
+  // Check for stray marks.
+  debug_only(verify_perm_nmethods(NULL));
+}
+
+void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  nm->set_on_scavenge_root_list();
+  nm->set_scavenge_root_link(_scavenge_root_nmethods);
+  set_scavenge_root_nmethods(nm);
+  print_trace("add_scavenge_root", nm);
+}
+
+void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  print_trace("drop_scavenge_root", nm);
+  nmethod* last = NULL;
+  nmethod* cur = scavenge_root_nmethods();
+  while (cur != NULL) {
+    nmethod* next = cur->scavenge_root_link();
+    if (cur == nm) {
+      if (last != NULL)
+            last->set_scavenge_root_link(next);
+      else  set_scavenge_root_nmethods(next);
+      nm->set_scavenge_root_link(NULL);
+      nm->clear_on_scavenge_root_list();
+      return;
+    }
+    last = cur;
+    cur = next;
+  }
+  assert(false, "should have been on list");
+}
+
+void CodeCache::prune_scavenge_root_nmethods() {
+  assert_locked_or_safepoint(CodeCache_lock);
+  debug_only(mark_scavenge_root_nmethods());
+
+  nmethod* last = NULL;
+  nmethod* cur = scavenge_root_nmethods();
+  while (cur != NULL) {
+    nmethod* next = cur->scavenge_root_link();
+    debug_only(cur->clear_scavenge_root_marked());
+    assert(cur->scavenge_root_not_marked(), "");
+    assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
+
+    if (!cur->is_zombie() && !cur->is_unloaded()
+        && cur->detect_scavenge_root_oops()) {
+      // Keep it.  Advance 'last' to prevent deletion.
+      last = cur;
+    } else {
+      // Prune it from the list, so we don't have to look at it any more.
+      print_trace("prune_scavenge_root", cur);
+      cur->set_scavenge_root_link(NULL);
+      cur->clear_on_scavenge_root_list();
+      if (last != NULL)
+            last->set_scavenge_root_link(next);
+      else  set_scavenge_root_nmethods(next);
+    }
+    cur = next;
+  }
+
+  // Check for stray marks.
+  debug_only(verify_perm_nmethods(NULL));
+}
+
+#ifndef PRODUCT
+void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
+  // While we are here, verify the integrity of the list.
+  mark_scavenge_root_nmethods();
+  for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
+    assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
+    cur->clear_scavenge_root_marked();
+  }
+  verify_perm_nmethods(f);
+}
+
+// Temporarily mark nmethods that are claimed to be on the non-perm list.
+void CodeCache::mark_scavenge_root_nmethods() {
+  FOR_ALL_ALIVE_BLOBS(cb) {
+    if (cb->is_nmethod()) {
+      nmethod *nm = (nmethod*)cb;
+      assert(nm->scavenge_root_not_marked(), "clean state");
+      if (nm->on_scavenge_root_list())
+        nm->set_scavenge_root_marked();
+    }
+  }
+}
+
+// If the closure is given, run it on the unlisted nmethods.
+// Also make sure that the effects of mark_scavenge_root_nmethods is gone.
+void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
+  FOR_ALL_ALIVE_BLOBS(cb) {
+    bool call_f = (f_or_null != NULL);
+    if (cb->is_nmethod()) {
+      nmethod *nm = (nmethod*)cb;
+      assert(nm->scavenge_root_not_marked(), "must be already processed");
+      if (nm->on_scavenge_root_list())
+        call_f = false;  // don't show this one to the client
+      nm->verify_scavenge_root_oops();
+    } else {
+      call_f = false;   // not an nmethod
+    }
+    if (call_f)  f_or_null->do_code_blob(cb);
+  }
+}
+#endif //PRODUCT
+
 void CodeCache::gc_prologue() {
+  assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
 }
 
 
@@ -285,6 +414,8 @@
     cb->fix_oop_relocations();
   }
   set_needs_cache_clean(false);
+  prune_scavenge_root_nmethods();
+  assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
 }
 
 
@@ -508,6 +639,14 @@
   }
 }
 
+void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
+  if (PrintCodeCache2) {  // Need to add a new flag
+    ResourceMark rm;
+    if (size == 0)  size = cb->size();
+    tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
+  }
+}
+
 void CodeCache::print_internals() {
   int nmethodCount = 0;
   int runtimeStubCount = 0;
--- a/src/share/vm/code/codeCache.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/code/codeCache.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -45,8 +45,13 @@
   static int _number_of_blobs;
   static int _number_of_nmethods_with_dependencies;
   static bool _needs_cache_clean;
+  static nmethod* _scavenge_root_nmethods;  // linked via nm->scavenge_root_link()
 
   static void verify_if_often() PRODUCT_RETURN;
+
+  static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
+  static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
+
  public:
 
   // Initialization
@@ -61,6 +66,7 @@
   static void flush();                              // flushes all CodeBlobs
   static bool contains(void *p);                    // returns whether p is included
   static void blobs_do(void f(CodeBlob* cb));       // iterates over all CodeBlobs
+  static void blobs_do(CodeBlobClosure* f);         // iterates over all CodeBlobs
   static void nmethods_do(void f(nmethod* nm));     // iterates over all nmethods
 
   // Lookup
@@ -106,12 +112,24 @@
   static void do_unloading(BoolObjectClosure* is_alive,
                            OopClosure* keep_alive,
                            bool unloading_occurred);
-  static void oops_do(OopClosure* f);
+  static void oops_do(OopClosure* f) {
+    CodeBlobToOopClosure oopc(f, /*do_marking=*/ false);
+    blobs_do(&oopc);
+  }
+  static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
+  static void scavenge_root_nmethods_do(CodeBlobClosure* f);
+
+  static nmethod* scavenge_root_nmethods()          { return _scavenge_root_nmethods; }
+  static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
+  static void add_scavenge_root_nmethod(nmethod* nm);
+  static void drop_scavenge_root_nmethod(nmethod* nm);
+  static void prune_scavenge_root_nmethods();
 
   // Printing/debugging
   static void print()   PRODUCT_RETURN;          // prints summary
   static void print_internals();
   static void verify();                          // verifies the code cache
+  static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
 
   // The full limits of the codeCache
   static address  low_bound()                    { return (address) _heap->low_boundary(); }
--- a/src/share/vm/code/debugInfoRec.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/code/debugInfoRec.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -299,7 +299,7 @@
   stream()->write_int(sender_stream_offset);
 
   // serialize scope
-  jobject method_enc = (method == NULL)? NULL: method->encoding();
+  jobject method_enc = (method == NULL)? NULL: method->constant_encoding();
   stream()->write_int(oop_recorder()->find_index(method_enc));
   stream()->write_bci(bci);
   assert(method == NULL ||
--- a/src/share/vm/code/dependencies.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/code/dependencies.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -302,7 +302,7 @@
       bytes.write_byte(code_byte);
       for (int j = 0; j < stride; j++) {
         if (j == skipj)  continue;
-        bytes.write_int(_oop_recorder->find_index(deps->at(i+j)->encoding()));
+        bytes.write_int(_oop_recorder->find_index(deps->at(i+j)->constant_encoding()));
       }
     }
   }
--- a/src/share/vm/code/nmethod.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/code/nmethod.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -581,10 +581,13 @@
     debug_only(No_Safepoint_Verifier nsv;)
     assert_locked_or_safepoint(CodeCache_lock);
 
-    NOT_PRODUCT(_has_debug_info = false; )
+    NOT_PRODUCT(_has_debug_info = false);
+    _oops_do_mark_link       = NULL;
     _method                  = method;
     _entry_bci               = InvocationEntryBci;
-    _link                    = NULL;
+    _osr_link                = NULL;
+    _scavenge_root_link      = NULL;
+    _scavenge_root_state     = 0;
     _compiler                = NULL;
     // We have no exception handler or deopt handler make the
     // values something that will never match a pc like the nmethod vtable entry
@@ -618,7 +621,7 @@
     _stack_traversal_mark    = 0;
 
     code_buffer->copy_oops_to(this);
-    debug_only(check_store();)
+    debug_only(verify_scavenge_root_oops());
     CodeCache::commit(this);
     VTune::create_nmethod(this);
   }
@@ -668,10 +671,13 @@
     debug_only(No_Safepoint_Verifier nsv;)
     assert_locked_or_safepoint(CodeCache_lock);
 
-    NOT_PRODUCT(_has_debug_info = false; )
+    NOT_PRODUCT(_has_debug_info = false);
+    _oops_do_mark_link       = NULL;
     _method                  = method;
     _entry_bci               = InvocationEntryBci;
-    _link                    = NULL;
+    _osr_link                = NULL;
+    _scavenge_root_link      = NULL;
+    _scavenge_root_state     = 0;
     _compiler                = NULL;
     // We have no exception handler or deopt handler make the
     // values something that will never match a pc like the nmethod vtable entry
@@ -703,7 +709,7 @@
     _stack_traversal_mark    = 0;
 
     code_buffer->copy_oops_to(this);
-    debug_only(check_store();)
+    debug_only(verify_scavenge_root_oops());
     CodeCache::commit(this);
     VTune::create_nmethod(this);
   }
@@ -770,12 +776,15 @@
     debug_only(No_Safepoint_Verifier nsv;)
     assert_locked_or_safepoint(CodeCache_lock);
 
-    NOT_PRODUCT(_has_debug_info = false; )
+    NOT_PRODUCT(_has_debug_info = false);
+    _oops_do_mark_link       = NULL;
     _method                  = method;
     _compile_id              = compile_id;
     _comp_level              = comp_level;
     _entry_bci               = entry_bci;
-    _link                    = NULL;
+    _osr_link                = NULL;
+    _scavenge_root_link      = NULL;
+    _scavenge_root_state     = 0;
     _compiler                = compiler;
     _orig_pc_offset          = orig_pc_offset;
 #ifdef HAVE_DTRACE_H
@@ -813,7 +822,10 @@
     code_buffer->copy_oops_to(this);
     debug_info->copy_to(this);
     dependencies->copy_to(this);
-    debug_only(check_store();)
+    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
+      CodeCache::add_scavenge_root_nmethod(this);
+    }
+    debug_only(verify_scavenge_root_oops());
 
     CodeCache::commit(this);
 
@@ -902,23 +914,30 @@
   if (st != NULL) {
     ttyLocker ttyl;
     // Print a little tag line that looks like +PrintCompilation output:
-    st->print("%3d%c  %s",
+    int tlen = (int) strlen(title);
+    bool do_nl = false;
+    if (tlen > 0 && title[tlen-1] == '\n') { tlen--; do_nl = true; }
+    st->print("%3d%c  %.*s",
               compile_id(),
               is_osr_method() ? '%' :
               method() != NULL &&
               is_native_method() ? 'n' : ' ',
-              title);
+              tlen, title);
 #ifdef TIERED
     st->print(" (%d) ", comp_level());
 #endif // TIERED
     if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this);
-    if (method() != NULL) {
-      method()->print_short_name(st);
+    if (Universe::heap()->is_gc_active() && method() != NULL) {
+      st->print("(method)");
+    } else if (method() != NULL) {
+        method()->print_short_name(st);
       if (is_osr_method())
         st->print(" @ %d", osr_entry_bci());
       if (method()->code_size() > 0)
         st->print(" (%d bytes)", method()->code_size());
     }
+
+    if (do_nl)  st->cr();
   }
 }
 
@@ -1033,6 +1052,7 @@
   }
 }
 
+// This is a private interface with the sweeper.
 void nmethod::mark_as_seen_on_stack() {
   assert(is_not_entrant(), "must be a non-entrant method");
   set_stack_traversal_mark(NMethodSweeper::traversal_count());
@@ -1077,7 +1097,8 @@
                   " unloadable], methodOop(" INTPTR_FORMAT
                   "), cause(" INTPTR_FORMAT ")",
                   this, (address)_method, (address)cause);
-    cause->klass()->print();
+    if (!Universe::heap()->is_gc_active())
+      cause->klass()->print();
   }
   // If _method is already NULL the methodOop is about to be unloaded,
   // so we don't have to break the cycle. Note that it is possible to
@@ -1105,7 +1126,8 @@
   // The methodOop is gone at this point
   assert(_method == NULL, "Tautology");
 
-  set_link(NULL);
+  set_osr_link(NULL);
+  //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
   NMethodSweeper::notify(this);
 }
 
@@ -1291,6 +1313,10 @@
     ec = next;
   }
 
+  if (on_scavenge_root_list()) {
+    CodeCache::drop_scavenge_root_nmethod(this);
+  }
+
   ((CodeBlob*)(this))->flush();
 
   CodeCache::free(this);
@@ -1350,7 +1376,10 @@
       return false;
     }
   }
-  assert(unloading_occurred, "Inconsistency in unloading");
+  // If ScavengeRootsInCode is true, an nmethod might be unloaded
+  // simply because one of its constant oops has gone dead.
+  // No actual classes need to be unloaded in order for this to occur.
+  assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
   make_unloaded(is_alive, obj);
   return true;
 }
@@ -1558,12 +1587,108 @@
   }
 
   // Scopes
+  // This includes oop constants not inlined in the code stream.
   for (oop* p = oops_begin(); p < oops_end(); p++) {
     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
     f->do_oop(p);
   }
 }
 
+#define NMETHOD_SENTINEL ((nmethod*)badAddress)
+
+nmethod* volatile nmethod::_oops_do_mark_nmethods;
+
+// An nmethod is "marked" if its _mark_link is set non-null.
+// Even if it is the end of the linked list, it will have a non-null link value,
+// as long as it is on the list.
+// This code must be MP safe, because it is used from parallel GC passes.
+bool nmethod::test_set_oops_do_mark() {
+  assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
+  nmethod* observed_mark_link = _oops_do_mark_link;
+  if (observed_mark_link == NULL) {
+    // Claim this nmethod for this thread to mark.
+    observed_mark_link = (nmethod*)
+      Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
+    if (observed_mark_link == NULL) {
+
+      // Atomically append this nmethod (now claimed) to the head of the list:
+      nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
+      for (;;) {
+        nmethod* required_mark_nmethods = observed_mark_nmethods;
+        _oops_do_mark_link = required_mark_nmethods;
+        observed_mark_nmethods = (nmethod*)
+          Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
+        if (observed_mark_nmethods == required_mark_nmethods)
+          break;
+      }
+      // Mark was clear when we first saw this guy.
+      NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark\n"));
+      return false;
+    }
+  }
+  // On fall through, another racing thread marked this nmethod before we did.
+  return true;
+}
+
+void nmethod::oops_do_marking_prologue() {
+  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("[oops_do_marking_prologue"));
+  assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
+  // We use cmpxchg_ptr instead of regular assignment here because the user
+  // may fork a bunch of threads, and we need them all to see the same state.
+  void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
+  guarantee(observed == NULL, "no races in this sequential code");
+}
+
+void nmethod::oops_do_marking_epilogue() {
+  assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
+  nmethod* cur = _oops_do_mark_nmethods;
+  while (cur != NMETHOD_SENTINEL) {
+    assert(cur != NULL, "not NULL-terminated");
+    nmethod* next = cur->_oops_do_mark_link;
+    cur->_oops_do_mark_link = NULL;
+    NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark\n"));
+    cur = next;
+  }
+  void* required = _oops_do_mark_nmethods;
+  void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
+  guarantee(observed == required, "no races in this sequential code");
+  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("oops_do_marking_epilogue]"));
+}
+
+class DetectScavengeRoot: public OopClosure {
+  bool     _detected_scavenge_root;
+public:
+  DetectScavengeRoot() : _detected_scavenge_root(false)
+  { NOT_PRODUCT(_print_nm = NULL); }
+  bool detected_scavenge_root() { return _detected_scavenge_root; }
+  virtual void do_oop(oop* p) {
+    if ((*p) != NULL && (*p)->is_scavengable()) {
+      NOT_PRODUCT(maybe_print(p));
+      _detected_scavenge_root = true;
+    }
+  }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+
+#ifndef PRODUCT
+  nmethod* _print_nm;
+  void maybe_print(oop* p) {
+    if (_print_nm == NULL)  return;
+    if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
+    tty->print_cr(""PTR_FORMAT"[offset=%d] detected non-perm oop "PTR_FORMAT" (found at "PTR_FORMAT")",
+                  _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
+                  (intptr_t)(*p), (intptr_t)p);
+    (*p)->print();
+  }
+#endif //PRODUCT
+};
+
+bool nmethod::detect_scavenge_root_oops() {
+  DetectScavengeRoot detect_scavenge_root;
+  NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
+  oops_do(&detect_scavenge_root);
+  return detect_scavenge_root.detected_scavenge_root();
+}
+
 // Method that knows how to preserve outgoing arguments at call. This method must be
 // called with a frame corresponding to a Java invoke
 void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
@@ -1878,6 +2003,24 @@
 // -----------------------------------------------------------------------------
 // Verification
 
+class VerifyOopsClosure: public OopClosure {
+  nmethod* _nm;
+  bool     _ok;
+public:
+  VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
+  bool ok() { return _ok; }
+  virtual void do_oop(oop* p) {
+    if ((*p) == NULL || (*p)->is_oop())  return;
+    if (_ok) {
+      _nm->print_nmethod(true);
+      _ok = false;
+    }
+    tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
+                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
+  }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+};
+
 void nmethod::verify() {
 
   // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
@@ -1911,6 +2054,11 @@
     }
   }
 
+  VerifyOopsClosure voc(this);
+  oops_do(&voc);
+  assert(voc.ok(), "embedded oops must be OK");
+  verify_scavenge_root_oops();
+
   verify_scopes();
 }
 
@@ -1974,19 +2122,34 @@
 // Non-product code
 #ifndef PRODUCT
 
-void nmethod::check_store() {
-  // Make sure all oops in the compiled code are tenured
+class DebugScavengeRoot: public OopClosure {
+  nmethod* _nm;
+  bool     _ok;
+public:
+  DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
+  bool ok() { return _ok; }
+  virtual void do_oop(oop* p) {
+    if ((*p) == NULL || !(*p)->is_scavengable())  return;
+    if (_ok) {
+      _nm->print_nmethod(true);
+      _ok = false;
+    }
+    tty->print_cr("*** non-perm oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
+                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
+    (*p)->print();
+  }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+};
 
-  RelocIterator iter(this);
-  while (iter.next()) {
-    if (iter.type() == relocInfo::oop_type) {
-      oop_Relocation* reloc = iter.oop_reloc();
-      oop obj = reloc->oop_value();
-      if (obj != NULL && !obj->is_perm()) {
-        fatal("must be permanent oop in compiled code");
-      }
-    }
+void nmethod::verify_scavenge_root_oops() {
+  if (!on_scavenge_root_list()) {
+    // Actually look inside, to verify the claim that it's clean.
+    DebugScavengeRoot debug_scavenge_root(this);
+    oops_do(&debug_scavenge_root);
+    if (!debug_scavenge_root.ok())
+      fatal("found an unadvertised bad non-perm oop in the code cache");
   }
+  assert(scavenge_root_not_marked(), "");
 }
 
 #endif // PRODUCT
@@ -2019,6 +2182,7 @@
     if (is_not_entrant()) tty->print("not_entrant ");
     if (is_zombie())      tty->print("zombie ");
     if (is_unloaded())    tty->print("unloaded ");
+    if (on_scavenge_root_list())  tty->print("scavenge_root ");
     tty->print_cr("}:");
   }
   if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
--- a/src/share/vm/code/nmethod.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/code/nmethod.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -125,6 +125,7 @@
 class nmethod : public CodeBlob {
   friend class VMStructs;
   friend class NMethodSweeper;
+  friend class CodeCache;  // non-perm oops
  private:
   // Shared fields for all nmethod's
   static int _zombie_instruction_size;
@@ -132,7 +133,12 @@
   methodOop _method;
   int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
 
-  nmethod*  _link;             // To support simple linked-list chaining of nmethods
+  // To support simple linked-list chaining of nmethods:
+  nmethod*  _osr_link;         // from instanceKlass::osr_nmethods_head
+  nmethod*  _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
+
+  static nmethod* volatile _oops_do_mark_nmethods;
+  nmethod*        volatile _oops_do_mark_link;
 
   AbstractCompiler* _compiler; // The compiler which compiled this nmethod
 
@@ -174,6 +180,8 @@
   // used by jvmti to track if an unload event has been posted for this nmethod.
   bool _unload_reported;
 
+  jbyte _scavenge_root_state;
+
   NOT_PRODUCT(bool _has_debug_info; )
 
   // Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
@@ -242,7 +250,6 @@
 
   // helper methods
   void* operator new(size_t size, int nmethod_size);
-  void check_store();
 
   const char* reloc_string_for(u_char* begin, u_char* end);
   void make_not_entrant_or_zombie(int state);
@@ -407,6 +414,24 @@
   int   version() const                           { return flags.version; }
   void  set_version(int v);
 
+  // Non-perm oop support
+  bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
+ protected:
+  enum { npl_on_list = 0x01, npl_marked = 0x10 };
+  void  set_on_scavenge_root_list()                    { _scavenge_root_state = npl_on_list; }
+  void  clear_on_scavenge_root_list()                  { _scavenge_root_state = 0; }
+  // assertion-checking and pruning logic uses the bits of _scavenge_root_state
+#ifndef PRODUCT
+  void  set_scavenge_root_marked()                     { _scavenge_root_state |= npl_marked; }
+  void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~npl_marked; }
+  bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ npl_on_list) == 0; }
+  // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
+#endif //PRODUCT
+  nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
+  void     set_scavenge_root_link(nmethod *n)          { _scavenge_root_link = n; }
+
+ public:
+
   // Sweeper support
   long  stack_traversal_mark()                    { return _stack_traversal_mark; }
   void  set_stack_traversal_mark(long l)          { _stack_traversal_mark = l; }
@@ -425,8 +450,8 @@
   int   osr_entry_bci() const                     { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; }
   address  osr_entry() const                      { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; }
   void  invalidate_osr_method();
-  nmethod* link() const                           { return _link; }
-  void     set_link(nmethod *n)                   { _link = n; }
+  nmethod* osr_link() const                       { return _osr_link; }
+  void     set_osr_link(nmethod *n)               { _osr_link = n; }
 
   // tells whether frames described by this nmethod can be deoptimized
   // note: native wrappers cannot be deoptimized.
@@ -467,6 +492,14 @@
   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
                                      OopClosure* f);
   void oops_do(OopClosure* f);
+  bool detect_scavenge_root_oops();
+  void verify_scavenge_root_oops() PRODUCT_RETURN;
+
+  bool test_set_oops_do_mark();
+  static void oops_do_marking_prologue();
+  static void oops_do_marking_epilogue();
+  static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
+  DEBUG_ONLY(bool test_oops_do_mark() { return _oops_do_mark_link != NULL; })
 
   // ScopeDesc for an instruction
   ScopeDesc* scope_desc_at(address pc);
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -53,14 +53,12 @@
  public:
   MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap,
                       bool should_do_nmethods);
+  bool should_do_nmethods() { return _should_do_nmethods; }
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
   inline void do_oop_nv(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
   inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
   bool do_header() { return true; }
-  virtual const bool do_nmethods() const {
-    return _should_do_nmethods;
-  }
   Prefetch::style prefetch_style() {
     return Prefetch::do_read;
   }
@@ -79,14 +77,12 @@
  public:
   MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
                             CMSBitMap* cms_bm, bool should_do_nmethods);
+  bool should_do_nmethods() { return _should_do_nmethods; }
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
   inline void do_oop_nv(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
   inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
   bool do_header() { return true; }
-  virtual const bool do_nmethods() const {
-    return _should_do_nmethods;
-  }
   Prefetch::style prefetch_style() {
     return Prefetch::do_read;
   }
@@ -194,7 +190,6 @@
   inline void do_oop_nv(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
   inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
   bool do_header() { return true; }
-  virtual const bool do_nmethods() const { return true; }
   Prefetch::style prefetch_style() {
     return Prefetch::do_read;
   }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -2857,9 +2857,12 @@
 
   gch->gen_process_strong_roots(_cmsGen->level(),
                                 true,   // younger gens are roots
+                                true,   // activate StrongRootsScope
                                 true,   // collecting perm gen
                                 SharedHeap::ScanningOption(roots_scanning_options()),
-                                NULL, &notOlder);
+                                &notOlder,
+                                true,   // walk code active on stacks
+                                NULL);
 
   // Now mark from the roots
   assert(_revisitStack.isEmpty(), "Should be empty");
@@ -2905,9 +2908,12 @@
   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
   gch->gen_process_strong_roots(_cmsGen->level(),
                                 true,   // younger gens are roots
+                                true,   // activate StrongRootsScope
                                 true,   // collecting perm gen
                                 SharedHeap::ScanningOption(roots_scanning_options()),
-                                NULL, &notOlder);
+                                &notOlder,
+                                true,   // walk code active on stacks
+                                NULL);
 
   // Now mark from the roots
   assert(_revisitStack.isEmpty(), "Should be empty");
@@ -3503,9 +3509,12 @@
     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
     gch->gen_process_strong_roots(_cmsGen->level(),
                                   true,   // younger gens are roots
+                                  true,   // activate StrongRootsScope
                                   true,   // collecting perm gen
                                   SharedHeap::ScanningOption(roots_scanning_options()),
-                                  NULL, &notOlder);
+                                  &notOlder,
+                                  true,   // walk all of code cache if (so & SO_CodeCache)
+                                  NULL);
   }
 
   // Clear mod-union table; it will be dirtied in the prologue of
@@ -5015,9 +5024,15 @@
   _timer.start();
   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
                                 false,     // yg was scanned above
+                                false,     // this is parallel code
                                 true,      // collecting perm gen
                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
-                                NULL, &par_mrias_cl);
+                                &par_mrias_cl,
+                                true,   // walk all of code cache if (so & SO_CodeCache)
+                                NULL);
+  assert(_collector->should_unload_classes()
+         || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
+         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
   _timer.stop();
   if (PrintCMSStatistics != 0) {
     gclog_or_tty->print_cr(
@@ -5398,7 +5413,6 @@
 
   // Set up for parallel process_strong_roots work.
   gch->set_par_threads(n_workers);
-  gch->change_strong_roots_parity();
   // We won't be iterating over the cards in the card table updating
   // the younger_gen cards, so we shouldn't call the following else
   // the verification code as well as subsequent younger_refs_iterate
@@ -5429,8 +5443,10 @@
   if (n_workers > 1) {
     // Make refs discovery MT-safe
     ReferenceProcessorMTMutator mt(ref_processor(), true);
+    GenCollectedHeap::StrongRootsScope srs(gch);
     workers->run_task(&tsk);
   } else {
+    GenCollectedHeap::StrongRootsScope srs(gch);
     tsk.work(0);
   }
   gch->set_par_threads(0);  // 0 ==> non-parallel.
@@ -5514,11 +5530,18 @@
     verify_work_stacks_empty();
 
     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+    GenCollectedHeap::StrongRootsScope srs(gch);
     gch->gen_process_strong_roots(_cmsGen->level(),
                                   true,  // younger gens as roots
+                                  false, // use the local StrongRootsScope
                                   true,  // collecting perm gen
                                   SharedHeap::ScanningOption(roots_scanning_options()),
-                                  NULL, &mrias_cl);
+                                  &mrias_cl,
+                                  true,   // walk code active on stacks
+                                  NULL);
+    assert(should_unload_classes()
+           || (roots_scanning_options() & SharedHeap::SO_CodeCache),
+           "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
   }
   verify_work_stacks_empty();
   // Restore evacuated mark words, if any, used for overflow list links
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -747,10 +747,11 @@
   // clear the mark bitmap (no grey objects to start with)
   _nextMarkBitMap->clearAll();
   PrintReachableClosure prcl(_nextMarkBitMap);
-  g1h->process_strong_roots(
+  g1h->process_strong_roots(true,    // activate StrongRootsScope
                             false,   // fake perm gen collection
                             SharedHeap::SO_AllClasses,
                             &prcl, // Regular roots
+                            NULL,  // do not visit active blobs
                             &prcl    // Perm Gen Roots
                             );
   // The root iteration above "consumed" dirty cards in the perm gen.
@@ -866,9 +867,11 @@
   g1h->set_marking_started();
   g1h->rem_set()->prepare_for_younger_refs_iterate(false);
 
-  g1h->process_strong_roots(false,   // fake perm gen collection
+  g1h->process_strong_roots(true,    // activate StrongRootsScope
+                            false,   // fake perm gen collection
                             SharedHeap::SO_AllClasses,
                             &notOlder, // Regular roots
+                            NULL,     // do not visit active blobs
                             &older    // Perm Gen Roots
                             );
   checkpointRootsInitialPost();
@@ -1963,7 +1966,7 @@
   g1h->ensure_parsability(false);
 
   if (ParallelGCThreads > 0) {
-    g1h->change_strong_roots_parity();
+    G1CollectedHeap::StrongRootsScope srs(g1h);
     // this is remark, so we'll use up all available threads
     int active_workers = ParallelGCThreads;
     set_phase(active_workers, false);
@@ -1980,7 +1983,7 @@
     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
     guarantee( satb_mq_set.completed_buffers_num() == 0, "invariant" );
   } else {
-    g1h->change_strong_roots_parity();
+    G1CollectedHeap::StrongRootsScope srs(g1h);
     // this is remark, so we'll use up all available threads
     int active_workers = 1;
     set_phase(active_workers, false);
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -2299,9 +2299,12 @@
   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
     if (!silent) { gclog_or_tty->print("roots "); }
     VerifyRootsClosure rootsCl(use_prev_marking);
-    process_strong_roots(false,
+    CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
+    process_strong_roots(true,  // activate StrongRootsScope
+                         false,
                          SharedHeap::SO_AllClasses,
                          &rootsCl,
+                         &blobsCl,
                          &rootsCl);
     rem_set()->invalidate(perm_gen()->used_region(), false);
     if (!silent) { gclog_or_tty->print("heapRegions "); }
@@ -3992,8 +3995,14 @@
   BufferingOopsInGenClosure buf_scan_perm(scan_perm);
   buf_scan_perm.set_generation(perm_gen());
 
-  process_strong_roots(collecting_perm_gen, so,
+  // Walk the code cache w/o buffering, because StarTask cannot handle
+  // unaligned oop locations.
+  CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true);
+
+  process_strong_roots(false, // no scoping; this is parallel code
+                       collecting_perm_gen, so,
                        &buf_scan_non_heap_roots,
+                       &eager_scan_code_roots,
                        &buf_scan_perm);
   // Finish up any enqueued closure apps.
   buf_scan_non_heap_roots.done();
@@ -4083,7 +4092,8 @@
 void
 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
                                        OopClosure* non_root_closure) {
-  SharedHeap::process_weak_roots(root_closure, non_root_closure);
+  CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
+  SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
 }
 
 
@@ -4117,15 +4127,16 @@
 
   init_for_evac_failure(NULL);
 
-  change_strong_roots_parity();  // In preparation for parallel strong roots.
   rem_set()->prepare_for_younger_refs_iterate(true);
 
   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
   double start_par = os::elapsedTime();
   if (ParallelGCThreads > 0) {
     // The individual threads will set their evac-failure closures.
+    StrongRootsScope srs(this);
     workers()->run_task(&g1_par_task);
   } else {
+    StrongRootsScope srs(this);
     g1_par_task.work(0);
   }
 
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -116,9 +116,11 @@
 
   SharedHeap* sh = SharedHeap::heap();
 
-  sh->process_strong_roots(true,  // Collecting permanent generation.
+  sh->process_strong_roots(true,  // activeate StrongRootsScope
+                           true,  // Collecting permanent generation.
                            SharedHeap::SO_SystemClasses,
                            &GenMarkSweep::follow_root_closure,
+                           &GenMarkSweep::follow_code_root_closure,
                            &GenMarkSweep::follow_root_closure);
 
   // Process reference objects found during marking
@@ -276,9 +278,11 @@
 
   SharedHeap* sh = SharedHeap::heap();
 
-  sh->process_strong_roots(true,  // Collecting permanent generation.
+  sh->process_strong_roots(true,  // activate StrongRootsScope
+                           true,  // Collecting permanent generation.
                            SharedHeap::SO_AllClasses,
                            &GenMarkSweep::adjust_root_pointer_closure,
+                           NULL,  // do not touch code cache here
                            &GenMarkSweep::adjust_pointer_closure);
 
   g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
--- a/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Tue Sep 15 21:53:47 2009 -0700
@@ -372,6 +372,7 @@
 psScavenge.inline.hpp                   psPromotionManager.hpp
 psScavenge.inline.hpp                   psScavenge.hpp
 
+pcTasks.cpp                             codeCache.hpp
 pcTasks.cpp                             collectedHeap.hpp
 pcTasks.cpp                             fprofiler.hpp
 pcTasks.cpp                             jniHandles.hpp
@@ -391,6 +392,7 @@
 pcTasks.hpp				psTasks.hpp
 
 psTasks.cpp                             cardTableExtension.hpp
+psTasks.cpp                             codeCache.hpp
 psTasks.cpp                             fprofiler.hpp
 psTasks.cpp                             gcTaskManager.hpp
 psTasks.cpp                             iterator.hpp
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -480,12 +480,14 @@
 
   par_scan_state.start_strong_roots();
   gch->gen_process_strong_roots(_gen->level(),
-                                true, // Process younger gens, if any,
-                                      // as strong roots.
-                                false,// not collecting perm generation.
+                                true,  // Process younger gens, if any,
+                                       // as strong roots.
+                                false, // no scope; this is parallel code
+                                false, // not collecting perm generation.
                                 SharedHeap::SO_AllClasses,
-                                &par_scan_state.older_gen_closure(),
-                                &par_scan_state.to_space_root_closure());
+                                &par_scan_state.to_space_root_closure(),
+                                true,   // walk *all* scavengable nmethods
+                                &par_scan_state.older_gen_closure());
   par_scan_state.end_strong_roots();
 
   // "evacuate followers".
@@ -799,15 +801,16 @@
   ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
   int n_workers = workers->total_workers();
   gch->set_par_threads(n_workers);
-  gch->change_strong_roots_parity();
   gch->rem_set()->prepare_for_younger_refs_iterate(true);
   // It turns out that even when we're using 1 thread, doing the work in a
   // separate thread causes wide variance in run times.  We can't help this
   // in the multi-threaded case, but we special-case n=1 here to get
   // repeatable measurements of the 1-thread overhead of the parallel code.
   if (n_workers > 1) {
+    GenCollectedHeap::StrongRootsScope srs(gch);
     workers->run_task(&tsk);
   } else {
+    GenCollectedHeap::StrongRootsScope srs(gch);
     tsk.work(0);
   }
   thread_state_set.reset();
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -962,6 +962,14 @@
   _old_gen->resize(desired_free_space);
 }
 
+ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
+  // nothing particular
+}
+
+ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
+  // nothing particular
+}
+
 #ifndef PRODUCT
 void ParallelScavengeHeap::record_gen_tops_before_GC() {
   if (ZapUnusedHeapArea) {
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -234,6 +234,13 @@
 
   // Mangle the unused parts of all spaces in the heap
   void gen_mangle_unused_area() PRODUCT_RETURN;
+
+  // Call these in sequential code around the processing of strong roots.
+  class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
+  public:
+    ParStrongRootsScope();
+    ~ParStrongRootsScope();
+  };
 };
 
 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -39,12 +39,13 @@
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
+  CodeBlobToOopClosure mark_and_push_in_blobs(&mark_and_push_closure, /*do_marking=*/ true);
 
   if (_java_thread != NULL)
-    _java_thread->oops_do(&mark_and_push_closure);
+    _java_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
 
   if (_vm_thread != NULL)
-    _vm_thread->oops_do(&mark_and_push_closure);
+    _vm_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
 
   // Do the real work
   cm->drain_marking_stacks(&mark_and_push_closure);
@@ -79,7 +80,8 @@
     case threads:
     {
       ResourceMark rm;
-      Threads::oops_do(&mark_and_push_closure);
+      CodeBlobToOopClosure each_active_code_blob(&mark_and_push_closure, /*do_marking=*/ true);
+      Threads::oops_do(&mark_and_push_closure, &each_active_code_blob);
     }
     break;
 
@@ -107,6 +109,11 @@
       vmSymbols::oops_do(&mark_and_push_closure);
       break;
 
+    case code_cache:
+      // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
+      //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
+      break;
+
     default:
       fatal("Unknown root type");
   }
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -92,7 +92,8 @@
     jvmti                 = 7,
     system_dictionary     = 8,
     vm_symbols            = 9,
-    reference_processing  = 10
+    reference_processing  = 10,
+    code_cache            = 11
   };
  private:
   RootType _root_type;
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -507,16 +507,22 @@
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 
   // General strong roots.
-  Universe::oops_do(mark_and_push_closure());
-  ReferenceProcessor::oops_do(mark_and_push_closure());
-  JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
-  Threads::oops_do(mark_and_push_closure());
-  ObjectSynchronizer::oops_do(mark_and_push_closure());
-  FlatProfiler::oops_do(mark_and_push_closure());
-  Management::oops_do(mark_and_push_closure());
-  JvmtiExport::oops_do(mark_and_push_closure());
-  SystemDictionary::always_strong_oops_do(mark_and_push_closure());
-  vmSymbols::oops_do(mark_and_push_closure());
+  {
+    ParallelScavengeHeap::ParStrongRootsScope psrs;
+    Universe::oops_do(mark_and_push_closure());
+    ReferenceProcessor::oops_do(mark_and_push_closure());
+    JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
+    CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
+    Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
+    ObjectSynchronizer::oops_do(mark_and_push_closure());
+    FlatProfiler::oops_do(mark_and_push_closure());
+    Management::oops_do(mark_and_push_closure());
+    JvmtiExport::oops_do(mark_and_push_closure());
+    SystemDictionary::always_strong_oops_do(mark_and_push_closure());
+    vmSymbols::oops_do(mark_and_push_closure());
+    // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
+    //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
+  }
 
   // Flush marking stack.
   follow_stack();
@@ -609,7 +615,7 @@
   Universe::oops_do(adjust_root_pointer_closure());
   ReferenceProcessor::oops_do(adjust_root_pointer_closure());
   JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
-  Threads::oops_do(adjust_root_pointer_closure());
+  Threads::oops_do(adjust_root_pointer_closure(), NULL);
   ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
   FlatProfiler::oops_do(adjust_root_pointer_closure());
   Management::oops_do(adjust_root_pointer_closure());
@@ -617,6 +623,7 @@
   // SO_AllClasses
   SystemDictionary::oops_do(adjust_root_pointer_closure());
   vmSymbols::oops_do(adjust_root_pointer_closure());
+  //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -2322,6 +2322,7 @@
 
   {
     TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
+    ParallelScavengeHeap::ParStrongRootsScope psrs;
 
     GCTaskQueue* q = GCTaskQueue::create();
 
@@ -2335,6 +2336,7 @@
     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::vm_symbols));
+    q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
 
     if (parallel_gc_threads > 1) {
       for (uint j = 0; j < parallel_gc_threads; j++) {
@@ -2405,7 +2407,7 @@
   Universe::oops_do(adjust_root_pointer_closure());
   ReferenceProcessor::oops_do(adjust_root_pointer_closure());
   JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
-  Threads::oops_do(adjust_root_pointer_closure());
+  Threads::oops_do(adjust_root_pointer_closure(), NULL);
   ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
   FlatProfiler::oops_do(adjust_root_pointer_closure());
   Management::oops_do(adjust_root_pointer_closure());
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -799,8 +799,7 @@
     FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
     virtual void do_oop(oop* p);
     virtual void do_oop(narrowOop* p);
-    virtual const bool do_nmethods() const { return true; }
-  };
+ };
 
   class FollowStackClosure: public VoidClosure {
    private:
@@ -817,6 +816,8 @@
     AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
     virtual void do_oop(oop* p);
     virtual void do_oop(narrowOop* p);
+    // do not walk from thread stacks to the code cache on this phase
+    virtual void do_code_blob(CodeBlob* cb) const { }
   };
 
   // Closure for verifying update of pointers.  Does not
@@ -1062,7 +1063,6 @@
     MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
     virtual void do_oop(oop* p);
     virtual void do_oop(narrowOop* p);
-    virtual const bool do_nmethods() const { return true; }
   };
 
   PSParallelCompact();
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -358,6 +358,7 @@
     PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
     {
       // TraceTime("Roots");
+      ParallelScavengeHeap::ParStrongRootsScope psrs;
 
       GCTaskQueue* q = GCTaskQueue::create();
 
@@ -376,6 +377,7 @@
       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
+      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
 
       ParallelTaskTerminator terminator(
         gc_task_manager()->workers(),
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -66,7 +66,7 @@
     case threads:
     {
       ResourceMark rm;
-      Threads::oops_do(&roots_closure);
+      Threads::oops_do(&roots_closure, NULL);
     }
     break;
 
@@ -90,6 +90,14 @@
       JvmtiExport::oops_do(&roots_closure);
       break;
 
+
+    case code_cache:
+      {
+        CodeBlobToOopClosure each_scavengable_code_blob(&roots_closure, /*do_marking=*/ true);
+        CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob);
+      }
+      break;
+
     default:
       fatal("Unknown root type");
   }
@@ -107,12 +115,13 @@
 
   PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
   PSScavengeRootsClosure roots_closure(pm);
+  CodeBlobToOopClosure roots_in_blobs(&roots_closure, /*do_marking=*/ true);
 
   if (_java_thread != NULL)
-    _java_thread->oops_do(&roots_closure);
+    _java_thread->oops_do(&roots_closure, &roots_in_blobs);
 
   if (_vm_thread != NULL)
-    _vm_thread->oops_do(&roots_closure);
+    _vm_thread->oops_do(&roots_closure, &roots_in_blobs);
 
   // Do the real work
   pm->drain_stacks(false);
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -54,7 +54,8 @@
     flat_profiler         = 5,
     system_dictionary     = 6,
     management            = 7,
-    jvmti                 = 8
+    jvmti                 = 8,
+    code_cache            = 9
   };
  private:
   RootType _root_type;
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -69,6 +69,7 @@
 }
 
 MarkSweep::FollowRootClosure  MarkSweep::follow_root_closure;
+CodeBlobToOopClosure MarkSweep::follow_code_root_closure(&MarkSweep::follow_root_closure, /*do_marking=*/ true);
 
 void MarkSweep::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
 void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -57,14 +57,12 @@
    public:
     virtual void do_oop(oop* p);
     virtual void do_oop(narrowOop* p);
-    virtual const bool do_nmethods() const { return true; }
   };
 
   class MarkAndPushClosure: public OopClosure {
    public:
     virtual void do_oop(oop* p);
     virtual void do_oop(narrowOop* p);
-    virtual const bool do_nmethods() const { return true; }
   };
 
   class FollowStackClosure: public VoidClosure {
@@ -163,6 +161,7 @@
  public:
   // Public closures
   static FollowRootClosure    follow_root_closure;
+  static CodeBlobToOopClosure follow_code_root_closure; // => follow_root_closure
   static MarkAndPushClosure   mark_and_push_closure;
   static FollowStackClosure   follow_stack_closure;
   static AdjustPointerClosure adjust_root_pointer_closure;
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -256,6 +256,14 @@
     return p == NULL || is_in_permanent(p);
   }
 
+  // An object is scavengable if its location may move during a scavenge.
+  // (A scavenge is a GC which is not a full GC.)
+  // Currently, this just means it is not perm (and not null).
+  // This could change if we rethink what's in perm-gen.
+  bool is_scavengable(const void *p) const {
+    return !is_in_permanent_or_null(p);
+  }
+
   // Returns "TRUE" if "p" is a method oop in the
   // current heap, with high probability. This predicate
   // is not stable, in general.
--- a/src/share/vm/memory/defNewGeneration.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/memory/defNewGeneration.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -555,12 +555,14 @@
          "save marks have not been newly set.");
 
   gch->gen_process_strong_roots(_level,
-                                true, // Process younger gens, if any, as
-                                      // strong roots.
-                                false,// not collecting permanent generation.
+                                true,  // Process younger gens, if any,
+                                       // as strong roots.
+                                true,  // activate StrongRootsScope
+                                false, // not collecting perm generation.
                                 SharedHeap::SO_AllClasses,
-                                &fsc_with_gc_barrier,
-                                &fsc_with_no_gc_barrier);
+                                &fsc_with_no_gc_barrier,
+                                true,   // walk *all* scavengable nmethods
+                                &fsc_with_gc_barrier);
 
   // "evacuate followers".
   evacuate_followers.do_void();
--- a/src/share/vm/memory/genCollectedHeap.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -677,13 +677,23 @@
 void GenCollectedHeap::
 gen_process_strong_roots(int level,
                          bool younger_gens_as_roots,
+                         bool activate_scope,
                          bool collecting_perm_gen,
                          SharedHeap::ScanningOption so,
-                         OopsInGenClosure* older_gens,
-                         OopsInGenClosure* not_older_gens) {
+                         OopsInGenClosure* not_older_gens,
+                         bool do_code_roots,
+                         OopsInGenClosure* older_gens) {
   // General strong roots.
-  SharedHeap::process_strong_roots(collecting_perm_gen, so,
-                                   not_older_gens, older_gens);
+
+  if (!do_code_roots) {
+    SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
+                                     not_older_gens, NULL, older_gens);
+  } else {
+    bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
+    CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
+    SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
+                                     not_older_gens, &code_roots, older_gens);
+  }
 
   if (younger_gens_as_roots) {
     if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
@@ -706,8 +716,9 @@
 }
 
 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
+                                              CodeBlobClosure* code_roots,
                                               OopClosure* non_root_closure) {
-  SharedHeap::process_weak_roots(root_closure, non_root_closure);
+  SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
   // "Local" "weak" refs
   for (int i = 0; i < _n_gens; i++) {
     _gens[i]->ref_processor()->weak_oops_do(root_closure);
--- a/src/share/vm/memory/genCollectedHeap.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -408,16 +408,22 @@
   // "SO_SystemClasses" to all the "system" classes and loaders;
   // "SO_Symbols_and_Strings" applies the closure to all entries in
   // SymbolsTable and StringTable.
-  void gen_process_strong_roots(int level, bool younger_gens_as_roots,
+  void gen_process_strong_roots(int level,
+                                bool younger_gens_as_roots,
+                                // The remaining arguments are in an order
+                                // consistent with SharedHeap::process_strong_roots:
+                                bool activate_scope,
                                 bool collecting_perm_gen,
                                 SharedHeap::ScanningOption so,
-                                OopsInGenClosure* older_gens,
-                                OopsInGenClosure* not_older_gens);
+                                OopsInGenClosure* not_older_gens,
+                                bool do_code_roots,
+                                OopsInGenClosure* older_gens);
 
   // Apply "blk" to all the weak roots of the system.  These include
   // JNI weak roots, the code cache, system dictionary, symbol table,
   // string table, and referents of reachable weak refs.
   void gen_process_weak_roots(OopClosure* root_closure,
+                              CodeBlobClosure* code_roots,
                               OopClosure* non_root_closure);
 
   // Set the saved marks of generations, if that makes sense.
--- a/src/share/vm/memory/genMarkSweep.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/memory/genMarkSweep.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -240,9 +240,12 @@
 
   gch->gen_process_strong_roots(level,
                                 false, // Younger gens are not roots.
+                                true,  // activate StrongRootsScope
                                 true,  // Collecting permanent generation.
                                 SharedHeap::SO_SystemClasses,
-                                &follow_root_closure, &follow_root_closure);
+                                &follow_root_closure,
+                                true,   // walk code active on stacks
+                                &follow_root_closure);
 
   // Process reference objects found during marking
   {
@@ -330,14 +333,19 @@
 
   gch->gen_process_strong_roots(level,
                                 false, // Younger gens are not roots.
+                                true,  // activate StrongRootsScope
                                 true,  // Collecting permanent generation.
                                 SharedHeap::SO_AllClasses,
                                 &adjust_root_pointer_closure,
+                                false, // do not walk code
                                 &adjust_root_pointer_closure);
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
+  CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure,
+                                                   /*do_marking=*/ false);
   gch->gen_process_weak_roots(&adjust_root_pointer_closure,
+                              &adjust_code_pointer_closure,
                               &adjust_pointer_closure);
 
   adjust_marks();
--- a/src/share/vm/memory/iterator.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/memory/iterator.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -32,3 +32,42 @@
 void VoidClosure::do_void() {
   ShouldNotCallThis();
 }
+
+MarkingCodeBlobClosure::MarkScope::MarkScope(bool activate)
+  : _active(activate)
+{
+  if (_active)  nmethod::oops_do_marking_prologue();
+}
+
+MarkingCodeBlobClosure::MarkScope::~MarkScope() {
+  if (_active)  nmethod::oops_do_marking_epilogue();
+}
+
+void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
+  if (!cb->is_nmethod())  return;
+  nmethod* nm = (nmethod*) cb;
+  if (!nm->test_set_oops_do_mark()) {
+    NOT_PRODUCT(if (TraceScavenge)  nm->print_on(tty, "oops_do, 1st visit\n"));
+    do_newly_marked_nmethod(nm);
+  } else {
+    NOT_PRODUCT(if (TraceScavenge)  nm->print_on(tty, "oops_do, skipped on 2nd visit\n"));
+  }
+}
+
+void CodeBlobToOopClosure::do_newly_marked_nmethod(CodeBlob* cb) {
+  cb->oops_do(_cl);
+}
+
+void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
+  if (!_do_marking) {
+    NOT_PRODUCT(if (TraceScavenge && Verbose && cb->is_nmethod())  ((nmethod*)cb)->print_on(tty, "oops_do, unmarked visit\n"));
+    // This assert won't work, since there are lots of mini-passes
+    // (mostly in debug mode) that co-exist with marking phases.
+    //assert(!(cb->is_nmethod() && ((nmethod*)cb)->test_oops_do_mark()), "found marked nmethod during mark-free phase");
+    cb->oops_do(_cl);
+  } else {
+    MarkingCodeBlobClosure::do_code_blob(cb);
+  }
+}
+
+
--- a/src/share/vm/memory/iterator.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/memory/iterator.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -24,6 +24,7 @@
 
 // The following classes are C++ `closures` for iterating over objects, roots and spaces
 
+class CodeBlob;
 class ReferenceProcessor;
 
 // Closure provides abortability.
@@ -57,9 +58,6 @@
   virtual const bool should_remember_klasses() const { return false;    }
   virtual void remember_klass(Klass* k) { /* do nothing */ }
 
-  // If "true", invoke on nmethods (when scanning compiled frames).
-  virtual const bool do_nmethods() const { return false; }
-
   // The methods below control how object iterations invoking this closure
   // should be performed:
 
@@ -158,6 +156,51 @@
 };
 
 
+// CodeBlobClosure is used for iterating through code blobs
+// in the code cache or on thread stacks
+
+class CodeBlobClosure : public Closure {
+ public:
+  // Called for each code blob.
+  virtual void do_code_blob(CodeBlob* cb) = 0;
+};
+
+
+class MarkingCodeBlobClosure : public CodeBlobClosure {
+ public:
+  // Called for each code blob, but at most once per unique blob.
+  virtual void do_newly_marked_nmethod(CodeBlob* cb) = 0;
+
+  virtual void do_code_blob(CodeBlob* cb);
+    // = { if (!nmethod(cb)->test_set_oops_do_mark())  do_newly_marked_nmethod(cb); }
+
+  class MarkScope : public StackObj {
+  protected:
+    bool _active;
+  public:
+    MarkScope(bool activate = true);
+      // = { if (active) nmethod::oops_do_marking_prologue(); }
+    ~MarkScope();
+      // = { if (active) nmethod::oops_do_marking_epilogue(); }
+  };
+};
+
+
+// Applies an oop closure to all ref fields in code blobs
+// iterated over in an object iteration.
+class CodeBlobToOopClosure: public MarkingCodeBlobClosure {
+  OopClosure* _cl;
+  bool _do_marking;
+public:
+  virtual void do_newly_marked_nmethod(CodeBlob* cb);
+    // = { cb->oops_do(_cl); }
+  virtual void do_code_blob(CodeBlob* cb);
+    // = { if (_do_marking)  super::do_code_blob(cb); else cb->oops_do(_cl); }
+  CodeBlobToOopClosure(OopClosure* cl, bool do_marking)
+    : _cl(cl), _do_marking(do_marking) {}
+};
+
+
 
 // MonitorClosure is used for iterating over monitors in the monitors cache
 
--- a/src/share/vm/memory/sharedHeap.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/memory/sharedHeap.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -100,12 +100,27 @@
          "Not in range.");
 }
 
-void SharedHeap::process_strong_roots(bool collecting_perm_gen,
+SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
+  : MarkScope(activate)
+{
+  if (_active) {
+    outer->change_strong_roots_parity();
+  }
+}
+
+SharedHeap::StrongRootsScope::~StrongRootsScope() {
+  // nothing particular
+}
+
+void SharedHeap::process_strong_roots(bool activate_scope,
+                                      bool collecting_perm_gen,
                                       ScanningOption so,
                                       OopClosure* roots,
+                                      CodeBlobClosure* code_roots,
                                       OopsInGenClosure* perm_blk) {
+  StrongRootsScope srs(this, activate_scope);
   // General strong roots.
-  if (n_par_threads() == 0) change_strong_roots_parity();
+  assert(_strong_roots_parity != 0, "must have called prologue code");
   if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
     Universe::oops_do(roots);
     ReferenceProcessor::oops_do(roots);
@@ -117,9 +132,9 @@
     JNIHandles::oops_do(roots);
   // All threads execute this; the individual threads are task groups.
   if (ParallelGCThreads > 0) {
-    Threads::possibly_parallel_oops_do(roots);
+    Threads::possibly_parallel_oops_do(roots, code_roots);
   } else {
-    Threads::oops_do(roots);
+    Threads::oops_do(roots, code_roots);
   }
   if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
     ObjectSynchronizer::oops_do(roots);
@@ -156,11 +171,29 @@
   }
 
   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
-     if (so & SO_CodeCache) {
-       CodeCache::oops_do(roots);
-     }
+    if (so & SO_CodeCache) {
+      // (Currently, CMSCollector uses this to do intermediate-strength collections.)
+      assert(collecting_perm_gen, "scanning all of code cache");
+      assert(code_roots != NULL, "must supply closure for code cache");
+      if (code_roots != NULL) {
+        CodeCache::blobs_do(code_roots);
+      }
+    } else if (so & (SO_SystemClasses|SO_AllClasses)) {
+      if (!collecting_perm_gen) {
+        // If we are collecting from class statics, but we are not going to
+        // visit all of the CodeCache, collect from the non-perm roots if any.
+        // This makes the code cache function temporarily as a source of strong
+        // roots for oops, until the next major collection.
+        //
+        // If collecting_perm_gen is true, we require that this phase will call
+        // CodeCache::do_unloading.  This will kill off nmethods with expired
+        // weak references, such as stale invokedynamic targets.
+        CodeCache::scavenge_root_nmethods_do(code_roots);
+      }
+    }
     // Verify if the code cache contents are in the perm gen
-    NOT_PRODUCT(CodeCache::oops_do(&assert_is_perm_closure));
+    NOT_PRODUCT(CodeBlobToOopClosure assert_code_is_perm(&assert_is_perm_closure, /*do_marking=*/ false));
+    NOT_PRODUCT(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_perm));
   }
 
   // Roots that should point only into permanent generation.
@@ -220,11 +253,12 @@
 // just skip adjusting any shared entries in the string table.
 
 void SharedHeap::process_weak_roots(OopClosure* root_closure,
+                                    CodeBlobClosure* code_roots,
                                     OopClosure* non_root_closure) {
   // Global (weak) JNI handles
   JNIHandles::weak_oops_do(&always_true, root_closure);
 
-  CodeCache::oops_do(non_root_closure);
+  CodeCache::blobs_do(code_roots);
   SymbolTable::oops_do(root_closure);
   if (UseSharedSpaces && !DumpSharedSpaces) {
     SkipAdjustingSharedStrings skip_closure(root_closure);
--- a/src/share/vm/memory/sharedHeap.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/memory/sharedHeap.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -165,9 +165,21 @@
   //   c) to never return a distinguished value (zero) with which such
   //      task-claiming variables may be initialized, to indicate "never
   //      claimed".
+ private:
   void change_strong_roots_parity();
+ public:
   int strong_roots_parity() { return _strong_roots_parity; }
 
+  // Call these in sequential code around process_strong_roots.
+  // strong_roots_prologue calls change_strong_roots_parity, if
+  // parallel tasks are enabled.
+  class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
+  public:
+    StrongRootsScope(SharedHeap* outer, bool activate = true);
+    ~StrongRootsScope();
+  };
+  friend class StrongRootsScope;
+
   enum ScanningOption {
     SO_None                = 0x0,
     SO_AllClasses          = 0x1,
@@ -198,15 +210,18 @@
   // "SO_Symbols" applies the closure to all entries in SymbolsTable;
   // "SO_Strings" applies the closure to all entries in StringTable;
   // "SO_CodeCache" applies the closure to all elements of the CodeCache.
-  void process_strong_roots(bool collecting_perm_gen,
+  void process_strong_roots(bool activate_scope,
+                            bool collecting_perm_gen,
                             ScanningOption so,
                             OopClosure* roots,
+                            CodeBlobClosure* code_roots,
                             OopsInGenClosure* perm_blk);
 
   // Apply "blk" to all the weak roots of the system.  These include
   // JNI weak roots, the code cache, system dictionary, symbol table,
   // string table.
   void process_weak_roots(OopClosure* root_closure,
+                          CodeBlobClosure* code_roots,
                           OopClosure* non_root_closure);
 
 
--- a/src/share/vm/oops/instanceKlass.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/oops/instanceKlass.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -2025,7 +2025,7 @@
   // This is a short non-blocking critical region, so the no safepoint check is ok.
   OsrList_lock->lock_without_safepoint_check();
   assert(n->is_osr_method(), "wrong kind of nmethod");
-  n->set_link(osr_nmethods_head());
+  n->set_osr_link(osr_nmethods_head());
   set_osr_nmethods_head(n);
   // Remember to unlock again
   OsrList_lock->unlock();
@@ -2041,17 +2041,17 @@
   // Search for match
   while(cur != NULL && cur != n) {
     last = cur;
-    cur = cur->link();
+    cur = cur->osr_link();
   }
   if (cur == n) {
     if (last == NULL) {
       // Remove first element
-      set_osr_nmethods_head(osr_nmethods_head()->link());
+      set_osr_nmethods_head(osr_nmethods_head()->osr_link());
     } else {
-      last->set_link(cur->link());
+      last->set_osr_link(cur->osr_link());
     }
   }
-  n->set_link(NULL);
+  n->set_osr_link(NULL);
   // Remember to unlock again
   OsrList_lock->unlock();
 }
@@ -2068,7 +2068,7 @@
       OsrList_lock->unlock();
       return osr;
     }
-    osr = osr->link();
+    osr = osr->osr_link();
   }
   OsrList_lock->unlock();
   return NULL;
--- a/src/share/vm/oops/oop.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/oops/oop.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -330,6 +330,7 @@
 
   bool is_perm() const;
   bool is_perm_or_null() const;
+  bool is_scavengable() const;
   bool is_shared() const;
   bool is_shared_readonly() const;
   bool is_shared_readwrite() const;
--- a/src/share/vm/oops/oop.inline2.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/oops/oop.inline2.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -34,3 +34,7 @@
 inline bool oopDesc::is_perm_or_null() const {
   return this == NULL || is_perm();
 }
+
+inline bool oopDesc::is_scavengable() const {
+  return Universe::heap()->is_scavengable(this);
+}
--- a/src/share/vm/opto/output.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/opto/output.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -611,7 +611,7 @@
       assert(cik->is_instance_klass() ||
              cik->is_array_klass(), "Not supported allocation.");
       sv = new ObjectValue(spobj->_idx,
-                           new ConstantOopWriteValue(cik->encoding()));
+                           new ConstantOopWriteValue(cik->constant_encoding()));
       Compile::set_sv_for_object_node(objs, sv);
 
       uint first_ind = spobj->first_index();
@@ -702,13 +702,13 @@
   case Type::AryPtr:
   case Type::InstPtr:
   case Type::KlassPtr:          // fall through
-    array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->encoding()));
+    array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->constant_encoding()));
     break;
   case Type::NarrowOop:
     if (t == TypeNarrowOop::NULL_PTR) {
       array->append(new ConstantOopWriteValue(NULL));
     } else {
-      array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->encoding()));
+      array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->constant_encoding()));
     }
     break;
   case Type::Int:
@@ -871,7 +871,7 @@
           assert(cik->is_instance_klass() ||
                  cik->is_array_klass(), "Not supported allocation.");
           ObjectValue* sv = new ObjectValue(spobj->_idx,
-                                new ConstantOopWriteValue(cik->encoding()));
+                                new ConstantOopWriteValue(cik->constant_encoding()));
           Compile::set_sv_for_object_node(objs, sv);
 
           uint first_ind = spobj->first_index();
@@ -890,7 +890,7 @@
         }
       } else {
         const TypePtr *tp = obj_node->bottom_type()->make_ptr();
-        scval = new ConstantOopWriteValue(tp->is_instptr()->const_oop()->encoding());
+        scval = new ConstantOopWriteValue(tp->is_instptr()->const_oop()->constant_encoding());
       }
 
       OptoReg::Name box_reg = BoxLockNode::stack_slot(box_node);
--- a/src/share/vm/opto/parse.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/opto/parse.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -469,7 +469,7 @@
 
   // loading from a constant field or the constant pool
   // returns false if push failed (non-perm field constants only, not ldcs)
-  bool push_constant(ciConstant con);
+  bool push_constant(ciConstant con, bool require_constant = false);
 
   // implementation of object creation bytecodes
   void do_new();
--- a/src/share/vm/opto/parse2.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/opto/parse2.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -1325,7 +1325,8 @@
           }
         }
       }
-      push_constant(constant);
+      bool pushed = push_constant(constant, true);
+      guarantee(pushed, "must be possible to push this constant");
     }
 
     break;
--- a/src/share/vm/opto/parse3.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/opto/parse3.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -267,7 +267,7 @@
 }
 
 
-bool Parse::push_constant(ciConstant constant) {
+bool Parse::push_constant(ciConstant constant, bool require_constant) {
   switch (constant.basic_type()) {
   case T_BOOLEAN:  push( intcon(constant.as_boolean()) ); break;
   case T_INT:      push( intcon(constant.as_int())     ); break;
@@ -279,13 +279,16 @@
   case T_LONG:     push_pair( longcon(constant.as_long()) ); break;
   case T_ARRAY:
   case T_OBJECT: {
-    // the oop is in perm space if the ciObject "has_encoding"
+    // cases:
+    //   can_be_constant    = (oop not scavengable || ScavengeRootsInCode != 0)
+    //   should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
+    // An oop is not scavengable if it is in the perm gen.
     ciObject* oop_constant = constant.as_object();
     if (oop_constant->is_null_object()) {
       push( zerocon(T_OBJECT) );
       break;
-    } else if (oop_constant->has_encoding()) {
-      push( makecon(TypeOopPtr::make_from_constant(oop_constant)) );
+    } else if (require_constant || oop_constant->should_be_constant()) {
+      push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant)) );
       break;
     } else {
       // we cannot inline the oop, but we can use it later to narrow a type
--- a/src/share/vm/opto/type.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/opto/type.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -2411,14 +2411,13 @@
 
 //------------------------------make_from_constant-----------------------------
 // Make a java pointer from an oop constant
-const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o) {
+const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o, bool require_constant) {
   if (o->is_method_data() || o->is_method()) {
     // Treat much like a typeArray of bytes, like below, but fake the type...
-    assert(o->has_encoding(), "must be a perm space object");
     const Type* etype = (Type*)get_const_basic_type(T_BYTE);
     const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
     ciKlass *klass = ciTypeArrayKlass::make((BasicType) T_BYTE);
-    assert(o->has_encoding(), "method data oops should be tenured");
+    assert(o->can_be_constant(), "method data oops should be tenured");
     const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0);
     return arr;
   } else {
@@ -2427,8 +2426,9 @@
     ciKlass *klass = o->klass();
     if (klass->is_instance_klass()) {
       // Element is an instance
-      if (!o->has_encoding()) {  // not a perm-space constant
-        // %%% remove this restriction by rewriting non-perm ConPNodes in a later phase
+      if (require_constant) {
+        if (!o->can_be_constant())  return NULL;
+      } else if (!o->should_be_constant()) {
         return TypeInstPtr::make(TypePtr::NotNull, klass, true, NULL, 0);
       }
       return TypeInstPtr::make(o);
@@ -2440,8 +2440,9 @@
       // We used to pass NotNull in here, asserting that the sub-arrays
       // are all not-null.  This is not true in generally, as code can
       // slam NULLs down in the subarrays.
-      if (!o->has_encoding()) {  // not a perm-space constant
-        // %%% remove this restriction by rewriting non-perm ConPNodes in a later phase
+      if (require_constant) {
+        if (!o->can_be_constant())  return NULL;
+      } else if (!o->should_be_constant()) {
         return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0);
       }
       const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0);
@@ -2453,8 +2454,9 @@
       const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length()));
       // We used to pass NotNull in here, asserting that the array pointer
       // is not-null. That was not true in general.
-      if (!o->has_encoding()) {  // not a perm-space constant
-        // %%% remove this restriction by rewriting non-perm ConPNodes in a later phase
+      if (require_constant) {
+        if (!o->can_be_constant())  return NULL;
+      } else if (!o->should_be_constant()) {
         return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0);
       }
       const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0);
@@ -2483,7 +2485,7 @@
     ShouldNotReachHere();
   }
 
-  return (intptr_t)const_oop()->encoding();
+  return (intptr_t)const_oop()->constant_encoding();
 }
 
 
@@ -3338,14 +3340,19 @@
       ciObject* o = const_oop();
       if( _ptr == Constant ) {
         if( tap->const_oop() != NULL && !o->equals(tap->const_oop()) ) {
+          xk = (klass() == tap->klass());
           ptr = NotNull;
           o = NULL;
           instance_id = InstanceBot;
+        } else {
+          xk = true;
         }
       } else if( above_centerline(_ptr) ) {
         o = tap->const_oop();
+        xk = true;
+      } else {
+        xk = this->_klass_is_exact;
       }
-      xk = true;
       return TypeAryPtr::make( ptr, o, tary, tap->_klass, xk, off, instance_id );
     }
     case NotNull:
--- a/src/share/vm/opto/type.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/opto/type.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -711,7 +711,10 @@
     return make_from_klass_common(klass, false, false);
   }
   // Creates a singleton type given an object.
-  static const TypeOopPtr* make_from_constant(ciObject* o);
+  // If the object cannot be rendered as a constant,
+  // may return a non-singleton type.
+  // If require_constant, produce a NULL if a singleton is not possible.
+  static const TypeOopPtr* make_from_constant(ciObject* o, bool require_constant = false);
 
   // Make a generic (unclassed) pointer to an oop.
   static const TypeOopPtr* make(PTR ptr, int offset, int instance_id = InstanceBot);
--- a/src/share/vm/prims/jvmtiTagMap.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/prims/jvmtiTagMap.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -3126,6 +3126,12 @@
   // exceptions) will be visible.
   blk.set_kind(JVMTI_HEAP_REFERENCE_OTHER);
   Universe::oops_do(&blk);
+
+  // If there are any non-perm roots in the code cache, visit them.
+  blk.set_kind(JVMTI_HEAP_REFERENCE_OTHER);
+  CodeBlobToOopClosure look_in_blobs(&blk, false);
+  CodeCache::scavenge_root_nmethods_do(&look_in_blobs);
+
   return true;
 }
 
--- a/src/share/vm/runtime/arguments.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -2639,16 +2639,22 @@
 
   if (EnableInvokeDynamic && !EnableMethodHandles) {
     if (!FLAG_IS_DEFAULT(EnableMethodHandles)) {
-      warning("forcing EnableMethodHandles true to allow EnableInvokeDynamic");
+      warning("forcing EnableMethodHandles true because EnableInvokeDynamic is true");
     }
     EnableMethodHandles = true;
   }
   if (EnableMethodHandles && !AnonymousClasses) {
     if (!FLAG_IS_DEFAULT(AnonymousClasses)) {
-      warning("forcing AnonymousClasses true to enable EnableMethodHandles");
+      warning("forcing AnonymousClasses true because EnableMethodHandles is true");
     }
     AnonymousClasses = true;
   }
+  if ((EnableMethodHandles || AnonymousClasses) && ScavengeRootsInCode == 0) {
+    if (!FLAG_IS_DEFAULT(ScavengeRootsInCode)) {
+      warning("forcing ScavengeRootsInCode non-zero because EnableMethodHandles or AnonymousClasses is true");
+    }
+    ScavengeRootsInCode = 1;
+  }
 
   if (PrintGCDetails) {
     // Turn on -verbose:gc options as well
--- a/src/share/vm/runtime/frame.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/runtime/frame.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -1043,7 +1043,7 @@
   finder.oops_do();
 }
 
-void frame::oops_code_blob_do(OopClosure* f, const RegisterMap* reg_map) {
+void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) {
   assert(_cb != NULL, "sanity check");
   if (_cb->oop_maps() != NULL) {
     OopMapSet::oops_do(this, reg_map, f);
@@ -1058,21 +1058,9 @@
   // oops referenced from nmethods active on thread stacks so as to
   // prevent them from being collected. However, this visit should be
   // restricted to certain phases of the collection only. The
-  // closure answers whether it wants nmethods to be traced.
-  // (All CodeBlob subtypes other than NMethod currently have
-  // an empty oops_do() method.
-  if (f->do_nmethods()) {
-    _cb->oops_do(f);
-  }
-}
-
-void frame::nmethods_code_blob_do() {
-  assert(_cb != NULL, "sanity check");
-
-  // If we see an activation belonging to a non_entrant nmethod, we mark it.
-  if (_cb->is_nmethod() && ((nmethod *)_cb)->is_not_entrant()) {
-    ((nmethod*)_cb)->mark_as_seen_on_stack();
-  }
+  // closure decides how it wants nmethods to be traced.
+  if (cf != NULL)
+    cf->do_code_blob(_cb);
 }
 
 class CompiledArgumentOopFinder: public SignatureInfo {
@@ -1201,18 +1189,18 @@
 }
 
 
-void frame::oops_do_internal(OopClosure* f, RegisterMap* map, bool use_interpreter_oop_map_cache) {
+void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
          if (is_interpreted_frame())    { oops_interpreted_do(f, map, use_interpreter_oop_map_cache);
   } else if (is_entry_frame())          { oops_entry_do      (f, map);
-  } else if (CodeCache::contains(pc())) { oops_code_blob_do  (f, map);
+  } else if (CodeCache::contains(pc())) { oops_code_blob_do  (f, cf, map);
   } else {
     ShouldNotReachHere();
   }
 }
 
-void frame::nmethods_do() {
+void frame::nmethods_do(CodeBlobClosure* cf) {
   if (_cb != NULL && _cb->is_nmethod()) {
-    nmethods_code_blob_do();
+    cf->do_code_blob(_cb);
   }
 }
 
@@ -1358,7 +1346,7 @@
     }
   }
   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), "must be empty before verify");)
-  oops_do_internal(&VerifyOopClosure::verify_oop, (RegisterMap*)map, false);
+  oops_do_internal(&VerifyOopClosure::verify_oop, NULL, (RegisterMap*)map, false);
 }
 
 
--- a/src/share/vm/runtime/frame.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/runtime/frame.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -384,16 +384,14 @@
   void oops_interpreted_arguments_do(symbolHandle signature, bool is_static, OopClosure* f);
 
   // Iteration of oops
-  void oops_do_internal(OopClosure* f, RegisterMap* map, bool use_interpreter_oop_map_cache);
+  void oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
   void oops_entry_do(OopClosure* f, const RegisterMap* map);
-  void oops_code_blob_do(OopClosure* f, const RegisterMap* map);
+  void oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map);
   int adjust_offset(methodOop method, int index); // helper for above fn
-  // Iteration of nmethods
-  void nmethods_code_blob_do();
  public:
   // Memory management
-  void oops_do(OopClosure* f, RegisterMap* map) { oops_do_internal(f, map, true); }
-  void nmethods_do();
+  void oops_do(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cf, map, true); }
+  void nmethods_do(CodeBlobClosure* cf);
 
   void gc_prologue();
   void gc_epilogue();
--- a/src/share/vm/runtime/globals.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/runtime/globals.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -714,6 +714,11 @@
   diagnostic(bool, TraceNMethodInstalls, false,                             \
              "Trace nmethod intallation")                                   \
                                                                             \
+  diagnostic(intx, ScavengeRootsInCode, 0,                                  \
+             "0: do not allow scavengable oops in the code cache; "         \
+             "1: allow scavenging from the code cache; "                    \
+             "2: emit as many constants as the compiler can see")           \
+                                                                            \
   diagnostic(bool, TraceOSRBreakpoint, false,                               \
              "Trace OSR Breakpoint ")                                       \
                                                                             \
--- a/src/share/vm/runtime/sweeper.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/runtime/sweeper.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -34,6 +34,17 @@
 jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
 bool      NMethodSweeper::_rescan = false;
 
+class MarkActivationClosure: public CodeBlobClosure {
+public:
+  virtual void do_code_blob(CodeBlob* cb) {
+    // If we see an activation belonging to a non_entrant nmethod, we mark it.
+    if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
+      ((nmethod*)cb)->mark_as_seen_on_stack();
+    }
+  }
+};
+static MarkActivationClosure mark_activation_closure;
+
 void NMethodSweeper::sweep() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
   if (!MethodFlushing) return;
@@ -57,7 +68,7 @@
     if (PrintMethodFlushing) {
       tty->print_cr("### Sweep: stack traversal %d", _traversals);
     }
-    Threads::nmethods_do();
+    Threads::nmethods_do(&mark_activation_closure);
 
     // reset the flags since we started a scan from the beginning.
     _rescan = false;
--- a/src/share/vm/runtime/thread.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/runtime/thread.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -683,14 +683,15 @@
   return false;
 }
 
-void Thread::oops_do(OopClosure* f) {
+void Thread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
   active_handles()->oops_do(f);
   // Do oop for ThreadShadow
   f->do_oop((oop*)&_pending_exception);
   handle_area()->oops_do(f);
 }
 
-void Thread::nmethods_do() {
+void Thread::nmethods_do(CodeBlobClosure* cf) {
+  // no nmethods in a generic thread...
 }
 
 void Thread::print_on(outputStream* st) const {
@@ -2316,12 +2317,12 @@
 }
 
 
-void JavaThread::oops_do(OopClosure* f) {
+void JavaThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
   // The ThreadProfiler oops_do is done from FlatProfiler::oops_do
   // since there may be more than one thread using each ThreadProfiler.
 
   // Traverse the GCHandles
-  Thread::oops_do(f);
+  Thread::oops_do(f, cf);
 
   assert( (!has_last_Java_frame() && java_call_counter() == 0) ||
           (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
@@ -2347,7 +2348,7 @@
 
     // Traverse the execution stack
     for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
-      fst.current()->oops_do(f, fst.register_map());
+      fst.current()->oops_do(f, cf, fst.register_map());
     }
   }
 
@@ -2379,9 +2380,8 @@
   }
 }
 
-void JavaThread::nmethods_do() {
-  // Traverse the GCHandles
-  Thread::nmethods_do();
+void JavaThread::nmethods_do(CodeBlobClosure* cf) {
+  Thread::nmethods_do(cf);  // (super method is a no-op)
 
   assert( (!has_last_Java_frame() && java_call_counter() == 0) ||
           (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
@@ -2389,7 +2389,7 @@
   if (has_last_Java_frame()) {
     // Traverse the execution stack
     for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
-      fst.current()->nmethods_do();
+      fst.current()->nmethods_do(cf);
     }
   }
 }
@@ -2463,7 +2463,7 @@
 
 void JavaThread::verify() {
   // Verify oops in the thread.
-  oops_do(&VerifyOopClosure::verify_oop);
+  oops_do(&VerifyOopClosure::verify_oop, NULL);
 
   // Verify the stack frames.
   frames_do(frame_verify);
@@ -3602,14 +3602,14 @@
 // uses the Threads_lock to gurantee this property. It also makes sure that
 // all threads gets blocked when exiting or starting).
 
-void Threads::oops_do(OopClosure* f) {
+void Threads::oops_do(OopClosure* f, CodeBlobClosure* cf) {
   ALL_JAVA_THREADS(p) {
-    p->oops_do(f);
+    p->oops_do(f, cf);
   }
-  VMThread::vm_thread()->oops_do(f);
+  VMThread::vm_thread()->oops_do(f, cf);
 }
 
-void Threads::possibly_parallel_oops_do(OopClosure* f) {
+void Threads::possibly_parallel_oops_do(OopClosure* f, CodeBlobClosure* cf) {
   // Introduce a mechanism allowing parallel threads to claim threads as
   // root groups.  Overhead should be small enough to use all the time,
   // even in sequential code.
@@ -3618,12 +3618,12 @@
   int cp = SharedHeap::heap()->strong_roots_parity();
   ALL_JAVA_THREADS(p) {
     if (p->claim_oops_do(is_par, cp)) {
-      p->oops_do(f);
+      p->oops_do(f, cf);
     }
   }
   VMThread* vmt = VMThread::vm_thread();
   if (vmt->claim_oops_do(is_par, cp))
-    vmt->oops_do(f);
+    vmt->oops_do(f, cf);
 }
 
 #ifndef SERIALGC
@@ -3644,11 +3644,11 @@
 }
 #endif // SERIALGC
 
-void Threads::nmethods_do() {
+void Threads::nmethods_do(CodeBlobClosure* cf) {
   ALL_JAVA_THREADS(p) {
-    p->nmethods_do();
+    p->nmethods_do(cf);
   }
-  VMThread::vm_thread()->nmethods_do();
+  VMThread::vm_thread()->nmethods_do(cf);
 }
 
 void Threads::gc_epilogue() {
--- a/src/share/vm/runtime/thread.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/runtime/thread.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -374,7 +374,8 @@
 
   // GC support
   // Apply "f->do_oop" to all root oops in "this".
-  void oops_do(OopClosure* f);
+  // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
+  void oops_do(OopClosure* f, CodeBlobClosure* cf);
 
   // Handles the parallel case for the method below.
 private:
@@ -398,7 +399,7 @@
   }
 
   // Sweeper support
-  void nmethods_do();
+  void nmethods_do(CodeBlobClosure* cf);
 
   // Tells if adr belong to this thread. This is used
   // for checking if a lock is owned by the running thread.
@@ -1238,10 +1239,10 @@
   void frames_do(void f(frame*, const RegisterMap*));
 
   // Memory operations
-  void oops_do(OopClosure* f);
+  void oops_do(OopClosure* f, CodeBlobClosure* cf);
 
   // Sweeper operations
-  void nmethods_do();
+  void nmethods_do(CodeBlobClosure* cf);
 
   // Memory management operations
   void gc_epilogue();
@@ -1629,9 +1630,9 @@
 
   // Apply "f->do_oop" to all root oops in all threads.
   // This version may only be called by sequential code.
-  static void oops_do(OopClosure* f);
+  static void oops_do(OopClosure* f, CodeBlobClosure* cf);
   // This version may be called by sequential or parallel code.
-  static void possibly_parallel_oops_do(OopClosure* f);
+  static void possibly_parallel_oops_do(OopClosure* f, CodeBlobClosure* cf);
   // This creates a list of GCTasks, one per thread.
   static void create_thread_roots_tasks(GCTaskQueue* q);
   // This creates a list of GCTasks, one per thread, for marking objects.
@@ -1639,13 +1640,13 @@
 
   // Apply "f->do_oop" to roots in all threads that
   // are part of compiled frames
-  static void compiled_frame_oops_do(OopClosure* f);
+  static void compiled_frame_oops_do(OopClosure* f, CodeBlobClosure* cf);
 
   static void convert_hcode_pointers();
   static void restore_hcode_pointers();
 
   // Sweeper
-  static void nmethods_do();
+  static void nmethods_do(CodeBlobClosure* cf);
 
   static void gc_epilogue();
   static void gc_prologue();
--- a/src/share/vm/runtime/vmStructs.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/runtime/vmStructs.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -549,6 +549,7 @@
   /********************************/                                                                                                 \
                                                                                                                                      \
      static_field(CodeCache,                   _heap,                                         CodeHeap*)                             \
+     static_field(CodeCache,                   _scavenge_root_nmethods,                       nmethod*)                              \
                                                                                                                                      \
   /*******************************/                                                                                                  \
   /* CodeHeap (NOTE: incomplete) */                                                                                                  \
@@ -618,7 +619,9 @@
      static_field(nmethod,             _zombie_instruction_size,                      int)                                   \
   nonstatic_field(nmethod,             _method,                                       methodOop)                             \
   nonstatic_field(nmethod,             _entry_bci,                                    int)                                   \
-  nonstatic_field(nmethod,             _link,                                         nmethod*)                              \
+  nonstatic_field(nmethod,             _osr_link,                                     nmethod*)                              \
+  nonstatic_field(nmethod,             _scavenge_root_link,                           nmethod*)                              \
+  nonstatic_field(nmethod,             _scavenge_root_state,                          jbyte)                                 \
   nonstatic_field(nmethod,             _exception_offset,                             int)                                   \
   nonstatic_field(nmethod,             _deoptimize_offset,                            int)                                   \
   nonstatic_field(nmethod,             _orig_pc_offset,                               int)                                   \
--- a/src/share/vm/runtime/vmThread.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/runtime/vmThread.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -619,8 +619,8 @@
 }
 
 
-void VMThread::oops_do(OopClosure* f) {
-  Thread::oops_do(f);
+void VMThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
+  Thread::oops_do(f, cf);
   _vm_queue->oops_do(f);
 }
 
@@ -652,5 +652,5 @@
 #endif
 
 void VMThread::verify() {
-  oops_do(&VerifyOopClosure::verify_oop);
+  oops_do(&VerifyOopClosure::verify_oop, NULL);
 }
--- a/src/share/vm/runtime/vmThread.hpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/runtime/vmThread.hpp	Tue Sep 15 21:53:47 2009 -0700
@@ -121,7 +121,7 @@
   static VMThread* vm_thread()                    { return _vm_thread; }
 
   // GC support
-  void oops_do(OopClosure* f);
+  void oops_do(OopClosure* f, CodeBlobClosure* cf);
 
   // Debugging
   void print_on(outputStream* st) const;
--- a/src/share/vm/utilities/debug.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/utilities/debug.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -702,11 +702,14 @@
   tty->print_cr("Searching strong roots:");
   Universe::oops_do(&lookFor, false);
   JNIHandles::oops_do(&lookFor);   // Global (strong) JNI handles
-  Threads::oops_do(&lookFor);
+  Threads::oops_do(&lookFor, NULL);
   ObjectSynchronizer::oops_do(&lookFor);
   //FlatProfiler::oops_do(&lookFor);
   SystemDictionary::oops_do(&lookFor);
 
+  tty->print_cr("Searching code cache:");
+  CodeCache::oops_do(&lookFor);
+
   tty->print_cr("Done.");
 }