comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 1974:fd1d227ef1b9

6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent Summary: Enable reference discovery during concurrent marking by setting the reference processor field of the concurrent marking closure. Keep reference objects on the discovered reference lists alive during incremental evacuation pauses until they are processed at the end of concurrent marking. Reviewed-by: ysr, tonyp
author johnc
date Wed, 01 Dec 2010 17:34:02 -0800
parents 631f79e71e90
children d9310331a29c
comparison
equal deleted inserted replaced
1973:631f79e71e90 1974:fd1d227ef1b9
1248 1248
1249 if (g1_policy()->in_young_gc_mode()) { 1249 if (g1_policy()->in_young_gc_mode()) {
1250 empty_young_list(); 1250 empty_young_list();
1251 g1_policy()->set_full_young_gcs(true); 1251 g1_policy()->set_full_young_gcs(true);
1252 } 1252 }
1253
1254 // See the comment in G1CollectedHeap::ref_processing_init() about
1255 // how reference processing currently works in G1.
1253 1256
1254 // Temporarily make reference _discovery_ single threaded (non-MT). 1257 // Temporarily make reference _discovery_ single threaded (non-MT).
1255 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); 1258 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
1256 1259
1257 // Temporarily make refs discovery atomic 1260 // Temporarily make refs discovery atomic
2010 2013
2011 return JNI_OK; 2014 return JNI_OK;
2012 } 2015 }
2013 2016
2014 void G1CollectedHeap::ref_processing_init() { 2017 void G1CollectedHeap::ref_processing_init() {
2018 // Reference processing in G1 currently works as follows:
2019 //
2020 // * There is only one reference processor instance that
2021 // 'spans' the entire heap. It is created by the code
2022 // below.
2023 // * Reference discovery is not enabled during an incremental
2024 // pause (see 6484982).
2025 // * Discoverered refs are not enqueued nor are they processed
2026 // during an incremental pause (see 6484982).
2027 // * Reference discovery is enabled at initial marking.
2028 // * Reference discovery is disabled and the discovered
2029 // references processed etc during remarking.
2030 // * Reference discovery is MT (see below).
2031 // * Reference discovery requires a barrier (see below).
2032 // * Reference processing is currently not MT (see 6608385).
2033 // * A full GC enables (non-MT) reference discovery and
2034 // processes any discovered references.
2035
2015 SharedHeap::ref_processing_init(); 2036 SharedHeap::ref_processing_init();
2016 MemRegion mr = reserved_region(); 2037 MemRegion mr = reserved_region();
2017 _ref_processor = ReferenceProcessor::create_ref_processor( 2038 _ref_processor = ReferenceProcessor::create_ref_processor(
2018 mr, // span 2039 mr, // span
2019 false, // Reference discovery is not atomic 2040 false, // Reference discovery is not atomic
3229 Universe::verify(false); 3250 Universe::verify(false);
3230 } 3251 }
3231 3252
3232 COMPILER2_PRESENT(DerivedPointerTable::clear()); 3253 COMPILER2_PRESENT(DerivedPointerTable::clear());
3233 3254
3255 // Please see comment in G1CollectedHeap::ref_processing_init()
3256 // to see how reference processing currently works in G1.
3257 //
3234 // We want to turn off ref discovery, if necessary, and turn it back on 3258 // We want to turn off ref discovery, if necessary, and turn it back on
3235 // on again later if we do. XXX Dubious: why is discovery disabled? 3259 // on again later if we do. XXX Dubious: why is discovery disabled?
3236 bool was_enabled = ref_processor()->discovery_enabled(); 3260 bool was_enabled = ref_processor()->discovery_enabled();
3237 if (was_enabled) ref_processor()->disable_discovery(); 3261 if (was_enabled) ref_processor()->disable_discovery();
3238 3262
3658 // We keep a separate list of all regions that have been alloc regions in 3682 // We keep a separate list of all regions that have been alloc regions in
3659 // the current collection pause. Forget that now. This method will 3683 // the current collection pause. Forget that now. This method will
3660 // untag the GC alloc regions and tear down the GC alloc region 3684 // untag the GC alloc regions and tear down the GC alloc region
3661 // list. It's desirable that no regions are tagged as GC alloc 3685 // list. It's desirable that no regions are tagged as GC alloc
3662 // outside GCs. 3686 // outside GCs.
3687
3663 forget_alloc_region_list(); 3688 forget_alloc_region_list();
3664 3689
3665 // The current alloc regions contain objs that have survived 3690 // The current alloc regions contain objs that have survived
3666 // collection. Make them no longer GC alloc regions. 3691 // collection. Make them no longer GC alloc regions.
3667 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 3692 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
4663 if (scan_rs != NULL) { 4688 if (scan_rs != NULL) {
4664 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); 4689 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
4665 } 4690 }
4666 // Finish with the ref_processor roots. 4691 // Finish with the ref_processor roots.
4667 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { 4692 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4693 // We need to treat the discovered reference lists as roots and
4694 // keep entries (which are added by the marking threads) on them
4695 // live until they can be processed at the end of marking.
4696 ref_processor()->weak_oops_do(scan_non_heap_roots);
4668 ref_processor()->oops_do(scan_non_heap_roots); 4697 ref_processor()->oops_do(scan_non_heap_roots);
4669 } 4698 }
4670 g1_policy()->record_collection_pause_end_G1_strong_roots(); 4699 g1_policy()->record_collection_pause_end_G1_strong_roots();
4671 _process_strong_tasks->all_tasks_completed(); 4700 _process_strong_tasks->all_tasks_completed();
4672 } 4701 }
4728 set_par_threads(0); 4757 set_par_threads(0);
4729 // Is this the right thing to do here? We don't save marks 4758 // Is this the right thing to do here? We don't save marks
4730 // on individual heap regions when we allocate from 4759 // on individual heap regions when we allocate from
4731 // them in parallel, so this seems like the correct place for this. 4760 // them in parallel, so this seems like the correct place for this.
4732 retire_all_alloc_regions(); 4761 retire_all_alloc_regions();
4762
4763 // Weak root processing.
4764 // Note: when JSR 292 is enabled and code blobs can contain
4765 // non-perm oops then we will need to process the code blobs
4766 // here too.
4733 { 4767 {
4734 G1IsAliveClosure is_alive(this); 4768 G1IsAliveClosure is_alive(this);
4735 G1KeepAliveClosure keep_alive(this); 4769 G1KeepAliveClosure keep_alive(this);
4736 JNIHandles::weak_oops_do(&is_alive, &keep_alive); 4770 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
4737 } 4771 }