diff src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 14909:4ca6dc0799b6

Backout jdk9 merge
author Gilles Duboscq <duboscq@ssw.jku.at>
date Tue, 01 Apr 2014 13:57:07 +0200
parents d8041d695d19
children 52b4284cb496
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Apr 01 14:09:03 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Apr 01 13:57:07 2014 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,8 +50,8 @@
 #include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_implementation/shared/isGCActiveMark.hpp"
 #include "memory/gcLocker.inline.hpp"
+#include "memory/genOopClosures.inline.hpp"
 #include "memory/generationSpec.hpp"
-#include "memory/iterator.hpp"
 #include "memory/referenceProcessor.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/oop.pcgc.inline.hpp"
@@ -1575,6 +1575,8 @@
 void
 G1CollectedHeap::
 resize_if_necessary_after_full_collection(size_t word_size) {
+  assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
+
   // Include the current allocation, if any, and bytes that will be
   // pre-allocated to support collections, as "used".
   const size_t used_after_gc = used();
@@ -2266,7 +2268,7 @@
                                 // (for efficiency/performance)
                            false);
                                 // Setting next fields of discovered
-                                // lists does not require a barrier.
+                                // lists requires a barrier.
 }
 
 size_t G1CollectedHeap::capacity() const {
@@ -2374,6 +2376,25 @@
   return blk.result();
 }
 
+size_t G1CollectedHeap::unsafe_max_alloc() {
+  if (free_regions() > 0) return HeapRegion::GrainBytes;
+  // otherwise, is there space in the current allocation region?
+
+  // We need to store the current allocation region in a local variable
+  // here. The problem is that this method doesn't take any locks and
+  // there may be other threads which overwrite the current allocation
+  // region field. attempt_allocation(), for example, sets it to NULL
+  // and this can happen *after* the NULL check here but before the call
+  // to free(), resulting in a SIGSEGV. Note that this doesn't appear
+  // to be a problem in the optimized build, since the two loads of the
+  // current allocation region field are optimized away.
+  HeapRegion* hr = _mutator_alloc_region.get();
+  if (hr == NULL) {
+    return 0;
+  }
+  return hr->free();
+}
+
 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
   switch (cause) {
     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
@@ -3004,17 +3025,7 @@
 }
 
 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
-  return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes;
-}
-
-size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
-  return young_list()->eden_used_bytes();
-}
-
-// For G1 TLABs should not contain humongous objects, so the maximum TLAB size
-// must be smaller than the humongous object limit.
-size_t G1CollectedHeap::max_tlab_size() const {
-  return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
+  return HeapRegion::GrainBytes;
 }
 
 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
@@ -3026,11 +3037,11 @@
   // humongous objects.
 
   HeapRegion* hr = _mutator_alloc_region.get();
-  size_t max_tlab = max_tlab_size() * wordSize;
+  size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
   if (hr == NULL) {
-    return max_tlab;
+    return max_tlab_size;
   } else {
-    return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
+    return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
   }
 }
 
@@ -3095,7 +3106,11 @@
   return NULL; // keep some compilers happy
 }
 
-class VerifyRootsClosure: public OopClosure {
+// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
+//       pass it as the perm_blk to SharedHeap::process_strong_roots.
+//       When process_strong_roots stop calling perm_blk->younger_refs_iterate
+//       we can change this closure to extend the simpler OopClosure.
+class VerifyRootsClosure: public OopsInGenClosure {
 private:
   G1CollectedHeap* _g1h;
   VerifyOption     _vo;
@@ -3131,7 +3146,7 @@
   void do_oop(narrowOop* p) { do_oop_nv(p); }
 };
 
-class G1VerifyCodeRootOopClosure: public OopClosure {
+class G1VerifyCodeRootOopClosure: public OopsInGenClosure {
   G1CollectedHeap* _g1h;
   OopClosure* _root_cl;
   nmethod* _nm;
@@ -3404,27 +3419,26 @@
 
     if (!silent) { gclog_or_tty->print("Roots "); }
     VerifyRootsClosure rootsCl(vo);
+    G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
+    G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
     VerifyKlassClosure klassCl(this, &rootsCl);
 
     // We apply the relevant closures to all the oops in the
-    // system dictionary, class loader data graph and the string table.
-    // Don't verify the code cache here, since it's verified below.
-    const int so = SO_AllClasses | SO_Strings;
+    // system dictionary, the string table and the code cache.
+    const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
 
     // Need cleared claim bits for the strong roots processing
     ClassLoaderDataGraph::clear_claimed_marks();
 
     process_strong_roots(true,      // activate StrongRootsScope
+                         false,     // we set "is scavenging" to false,
+                                    // so we don't reset the dirty cards.
                          ScanningOption(so),  // roots scanning options
                          &rootsCl,
+                         &blobsCl,
                          &klassCl
                          );
 
-    // Verify the nmethods in the code cache.
-    G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
-    G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
-    CodeCache::blobs_do(&blobsCl);
-
     bool failures = rootsCl.failures() || codeRootsCl.failures();
 
     if (vo != VerifyOption_G1UseMarkWord) {
@@ -3670,7 +3684,6 @@
   // always_do_update_barrier = false;
   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
   // Fill TLAB's and such
-  accumulate_statistics_all_tlabs();
   ensure_parsability(true);
 
   if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
@@ -3695,8 +3708,6 @@
                         "derived pointer present"));
   // always_do_update_barrier = true;
 
-  resize_all_tlabs();
-
   // We have just completed a GC. Update the soft reference
   // policy with the new heap occupancy
   Universe::update_heap_info_at_gc();
@@ -4544,7 +4555,7 @@
 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
   ParGCAllocBuffer(gclab_word_size), _retired(false) { }
 
-G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
+G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
   : _g1h(g1h),
     _refs(g1h->task_queue(queue_num)),
     _dcq(&g1h->dirty_card_queue_set()),
@@ -4554,7 +4565,7 @@
     _term_attempts(0),
     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
-    _age_table(false), _scanner(g1h, this, rp),
+    _age_table(false),
     _strong_roots_time(0), _term_time(0),
     _alloc_buffer_waste(0), _undo_waste(0) {
   // we allocate G1YoungSurvRateNumRegions plus one entries, since
@@ -4663,10 +4674,14 @@
 
 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
                                      G1ParScanThreadState* par_scan_state) :
-  _g1(g1), _par_scan_state(par_scan_state),
-  _worker_id(par_scan_state->queue_num()) { }
-
-void G1ParCopyHelper::mark_object(oop obj) {
+  _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
+  _par_scan_state(par_scan_state),
+  _worker_id(par_scan_state->queue_num()),
+  _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
+  _mark_in_progress(_g1->mark_in_progress()) { }
+
+template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
+void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop obj) {
 #ifdef ASSERT
   HeapRegion* hr = _g1->heap_region_containing(obj);
   assert(hr != NULL, "sanity");
@@ -4677,7 +4692,9 @@
   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
 }
 
-void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
+template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
+void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
+  ::mark_forwarded_object(oop from_obj, oop to_obj) {
 #ifdef ASSERT
   assert(from_obj->is_forwarded(), "from obj should be forwarded");
   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
@@ -4699,25 +4716,27 @@
   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
 }
 
-oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
+template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
+oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
+  ::copy_to_survivor_space(oop old) {
   size_t word_sz = old->size();
-  HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
+  HeapRegion* from_region = _g1->heap_region_containing_raw(old);
   // +1 to make the -1 indexes valid...
   int       young_index = from_region->young_index_in_cset()+1;
   assert( (from_region->is_young() && young_index >  0) ||
          (!from_region->is_young() && young_index == 0), "invariant" );
-  G1CollectorPolicy* g1p = _g1h->g1_policy();
+  G1CollectorPolicy* g1p = _g1->g1_policy();
   markOop m = old->mark();
   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
                                            : m->age();
   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
                                                              word_sz);
-  HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
+  HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
 #ifndef PRODUCT
   // Should this evacuation fail?
-  if (_g1h->evacuation_should_fail()) {
+  if (_g1->evacuation_should_fail()) {
     if (obj_ptr != NULL) {
-      undo_allocation(alloc_purpose, obj_ptr, word_sz);
+      _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
       obj_ptr = NULL;
     }
   }
@@ -4726,7 +4745,7 @@
   if (obj_ptr == NULL) {
     // This will either forward-to-self, or detect that someone else has
     // installed a forwarding pointer.
-    return _g1h->handle_evacuation_failure_par(this, old);
+    return _g1->handle_evacuation_failure_par(_par_scan_state, old);
   }
 
   oop obj = oop(obj_ptr);
@@ -4759,12 +4778,12 @@
         m = m->incr_age();
         obj->set_mark(m);
       }
-      age_table()->add(obj, word_sz);
+      _par_scan_state->age_table()->add(obj, word_sz);
     } else {
       obj->set_mark(m);
     }
 
-    size_t* surv_young_words = surviving_young_words();
+    size_t* surv_young_words = _par_scan_state->surviving_young_words();
     surv_young_words[young_index] += word_sz;
 
     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
@@ -4773,15 +4792,15 @@
       // length field of the from-space object.
       arrayOop(obj)->set_length(0);
       oop* old_p = set_partial_array_mask(old);
-      push_on_queue(old_p);
+      _par_scan_state->push_on_queue(old_p);
     } else {
       // No point in using the slower heap_region_containing() method,
       // given that we know obj is in the heap.
-      _scanner.set_region(_g1h->heap_region_containing_raw(obj));
+      _scanner.set_region(_g1->heap_region_containing_raw(obj));
       obj->oop_iterate_backwards(&_scanner);
     }
   } else {
-    undo_allocation(alloc_purpose, obj_ptr, word_sz);
+    _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
     obj = forward_ptr;
   }
   return obj;
@@ -4794,25 +4813,23 @@
   }
 }
 
-template <G1Barrier barrier, bool do_mark_object>
+template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
 template <class T>
-void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-
-  if (oopDesc::is_null(heap_oop)) {
-    return;
-  }
-
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
+::do_oop_work(T* p) {
+  oop obj = oopDesc::load_decode_heap_oop(p);
+  assert(barrier != G1BarrierRS || obj != NULL,
+         "Precondition: G1BarrierRS implies obj is non-NULL");
 
   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
 
+  // here the null check is implicit in the cset_fast_test() test
   if (_g1->in_cset_fast_test(obj)) {
     oop forwardee;
     if (obj->is_forwarded()) {
       forwardee = obj->forwardee();
     } else {
-      forwardee = _par_scan_state->copy_to_survivor_space(obj);
+      forwardee = copy_to_survivor_space(obj);
     }
     assert(forwardee != NULL, "forwardee should not be NULL");
     oopDesc::encode_store_heap_oop(p, forwardee);
@@ -4822,25 +4839,32 @@
       mark_forwarded_object(obj, forwardee);
     }
 
-    if (barrier == G1BarrierKlass) {
+    // When scanning the RS, we only care about objs in CS.
+    if (barrier == G1BarrierRS) {
+      _par_scan_state->update_rs(_from, p, _worker_id);
+    } else if (barrier == G1BarrierKlass) {
       do_klass_barrier(p, forwardee);
     }
   } else {
     // The object is not in collection set. If we're a root scanning
     // closure during an initial mark pause (i.e. do_mark_object will
     // be true) then attempt to mark the object.
-    if (do_mark_object) {
+    if (do_mark_object && _g1->is_in_g1_reserved(obj)) {
       mark_object(obj);
     }
   }
 
-  if (barrier == G1BarrierEvac) {
+  if (barrier == G1BarrierEvac && obj != NULL) {
     _par_scan_state->update_rs(_from, p, _worker_id);
   }
-}
-
-template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
-template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
+
+  if (do_gen_barrier && obj != NULL) {
+    par_do_barrier(p);
+  }
+}
+
+template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
+template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
 
 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
   assert(has_partial_array_mask(p), "invariant");
@@ -5031,7 +5055,7 @@
 
       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
 
-      G1ParScanThreadState            pss(_g1h, worker_id, rp);
+      G1ParScanThreadState            pss(_g1h, worker_id);
       G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
       G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
@@ -5124,9 +5148,15 @@
 
   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
 
+  assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow");
+  // Walk the code cache/strong code roots w/o buffering, because StarTask
+  // cannot handle unaligned oop locations.
+  CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
+
   process_strong_roots(false, // no scoping; this is parallel code
-                       so,
+                       is_scavenging, so,
                        &buf_scan_non_heap_roots,
+                       &eager_scan_code_roots,
                        scan_klasses
                        );
 
@@ -5172,7 +5202,7 @@
   // the collection set.
   // Note all threads participate in this set of root tasks.
   double mark_strong_code_roots_ms = 0.0;
-  if (g1_policy()->during_initial_mark_pause() && !(so & SO_AllCodeCache)) {
+  if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
     double mark_strong_roots_start = os::elapsedTime();
     mark_strong_code_roots(worker_i);
     mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
@@ -5180,106 +5210,16 @@
   g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
 
   // Now scan the complement of the collection set.
-  CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
-  g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
-
+  if (scan_rs != NULL) {
+    g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
+  }
   _process_strong_tasks->all_tasks_completed();
 }
 
-class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
-private:
-  BoolObjectClosure* _is_alive;
-  int _initial_string_table_size;
-  int _initial_symbol_table_size;
-
-  bool  _process_strings;
-  int _strings_processed;
-  int _strings_removed;
-
-  bool  _process_symbols;
-  int _symbols_processed;
-  int _symbols_removed;
-
-  bool _do_in_parallel;
-public:
-  G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
-    AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
-    _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
-    _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
-    _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
-
-    _initial_string_table_size = StringTable::the_table()->table_size();
-    _initial_symbol_table_size = SymbolTable::the_table()->table_size();
-    if (process_strings) {
-      StringTable::clear_parallel_claimed_index();
-    }
-    if (process_symbols) {
-      SymbolTable::clear_parallel_claimed_index();
-    }
-  }
-
-  ~G1StringSymbolTableUnlinkTask() {
-    guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
-              err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT,
-                      StringTable::parallel_claimed_index(), _initial_string_table_size));
-    guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
-              err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
-                      SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
-  }
-
-  void work(uint worker_id) {
-    if (_do_in_parallel) {
-      int strings_processed = 0;
-      int strings_removed = 0;
-      int symbols_processed = 0;
-      int symbols_removed = 0;
-      if (_process_strings) {
-        StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
-        Atomic::add(strings_processed, &_strings_processed);
-        Atomic::add(strings_removed, &_strings_removed);
-      }
-      if (_process_symbols) {
-        SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
-        Atomic::add(symbols_processed, &_symbols_processed);
-        Atomic::add(symbols_removed, &_symbols_removed);
-      }
-    } else {
-      if (_process_strings) {
-        StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
-      }
-      if (_process_symbols) {
-        SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
-      }
-    }
-  }
-
-  size_t strings_processed() const { return (size_t)_strings_processed; }
-  size_t strings_removed()   const { return (size_t)_strings_removed; }
-
-  size_t symbols_processed() const { return (size_t)_symbols_processed; }
-  size_t symbols_removed()   const { return (size_t)_symbols_removed; }
-};
-
-void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
-                                                     bool process_strings, bool process_symbols) {
-  uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
-                   _g1h->workers()->active_workers() : 1);
-
-  G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
-  if (G1CollectedHeap::use_parallel_gc_threads()) {
-    set_par_threads(n_workers);
-    workers()->run_task(&g1_unlink_task);
-    set_par_threads(0);
-  } else {
-    g1_unlink_task.work(0);
-  }
-  if (G1TraceStringSymbolTableScrubbing) {
-    gclog_or_tty->print_cr("Cleaned string and symbol table, "
-                           "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
-                           "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
-                           g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
-                           g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
-  }
+void
+G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
+  CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
+  SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
 }
 
 // Weak Reference Processing support
@@ -5462,7 +5402,7 @@
 
     G1STWIsAliveClosure is_alive(_g1h);
 
-    G1ParScanThreadState            pss(_g1h, worker_id, NULL);
+    G1ParScanThreadState pss(_g1h, worker_id);
 
     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
@@ -5574,7 +5514,7 @@
     ResourceMark rm;
     HandleMark   hm;
 
-    G1ParScanThreadState            pss(_g1h, worker_id, NULL);
+    G1ParScanThreadState            pss(_g1h, worker_id);
     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
@@ -5700,7 +5640,7 @@
   // JNI refs.
 
   // Use only a single queue for this PSS.
-  G1ParScanThreadState            pss(this, 0, NULL);
+  G1ParScanThreadState pss(this, 0);
 
   // We do not embed a reference processor in the copying/scanning
   // closures while we're actually processing the discovered