diff src/share/vm/gc_implementation/parNew/parNewGeneration.cpp @ 989:148e5441d916

6863023: need non-perm oops in code cache for JSR 292 Summary: Make a special root-list for those few nmethods which might contain non-perm oops. Reviewed-by: twisti, kvn, never, jmasa, ysr
author jrose
date Tue, 15 Sep 2009 21:53:47 -0700
parents becb17ad5e51
children e018e6884bd8
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Sep 15 11:09:34 2009 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Sep 15 21:53:47 2009 -0700
@@ -480,12 +480,14 @@
 
   par_scan_state.start_strong_roots();
   gch->gen_process_strong_roots(_gen->level(),
-                                true, // Process younger gens, if any,
-                                      // as strong roots.
-                                false,// not collecting perm generation.
+                                true,  // Process younger gens, if any,
+                                       // as strong roots.
+                                false, // no scope; this is parallel code
+                                false, // not collecting perm generation.
                                 SharedHeap::SO_AllClasses,
-                                &par_scan_state.older_gen_closure(),
-                                &par_scan_state.to_space_root_closure());
+                                &par_scan_state.to_space_root_closure(),
+                                true,   // walk *all* scavengable nmethods
+                                &par_scan_state.older_gen_closure());
   par_scan_state.end_strong_roots();
 
   // "evacuate followers".
@@ -799,15 +801,16 @@
   ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
   int n_workers = workers->total_workers();
   gch->set_par_threads(n_workers);
-  gch->change_strong_roots_parity();
   gch->rem_set()->prepare_for_younger_refs_iterate(true);
   // It turns out that even when we're using 1 thread, doing the work in a
   // separate thread causes wide variance in run times.  We can't help this
   // in the multi-threaded case, but we special-case n=1 here to get
   // repeatable measurements of the 1-thread overhead of the parallel code.
   if (n_workers > 1) {
+    GenCollectedHeap::StrongRootsScope srs(gch);
     workers->run_task(&tsk);
   } else {
+    GenCollectedHeap::StrongRootsScope srs(gch);
     tsk.work(0);
   }
   thread_state_set.reset();