# HG changeset patch # User tonyp # Date 1320721872 18000 # Node ID 8aae2050e83e25ebd19d62bc955e9d155c6935da # Parent ed80554efa25daff2ee919c6a69cdae3c4d8c112 7092309: G1: introduce old region set Summary: Keep track of all the old regions in the heap with a heap region set. Reviewed-by: brutisso, johnc diff -r ed80554efa25 -r 8aae2050e83e src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Nov 02 08:04:23 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Mon Nov 07 22:11:12 2011 -0500 @@ -1518,6 +1518,7 @@ size_t _regions_claimed; size_t _freed_bytes; FreeRegionList* _local_cleanup_list; + OldRegionSet* _old_proxy_set; HumongousRegionSet* _humongous_proxy_set; HRRSCleanupTask* _hrrs_cleanup_task; double _claimed_region_time; @@ -1527,6 +1528,7 @@ G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, int worker_num, FreeRegionList* local_cleanup_list, + OldRegionSet* old_proxy_set, HumongousRegionSet* humongous_proxy_set, HRRSCleanupTask* hrrs_cleanup_task); size_t freed_bytes() { return _freed_bytes; } @@ -1557,9 +1559,11 @@ void work(int i) { double start = os::elapsedTime(); FreeRegionList local_cleanup_list("Local Cleanup List"); + OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set"); HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set"); HRRSCleanupTask hrrs_cleanup_task; G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i, &local_cleanup_list, + &old_proxy_set, &humongous_proxy_set, &hrrs_cleanup_task); if (G1CollectedHeap::use_parallel_gc_threads()) { @@ -1573,6 +1577,7 @@ // Now update the lists _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(), NULL /* free_list */, + &old_proxy_set, &humongous_proxy_set, true /* par */); { @@ -1643,6 +1648,7 @@ G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, int worker_num, FreeRegionList* local_cleanup_list, + OldRegionSet* old_proxy_set, HumongousRegionSet* humongous_proxy_set, HRRSCleanupTask* hrrs_cleanup_task) : _g1(g1), _worker_num(worker_num), @@ -1650,6 +1656,7 @@ _freed_bytes(0), _claimed_region_time(0.0), _max_region_time(0.0), _local_cleanup_list(local_cleanup_list), + _old_proxy_set(old_proxy_set), _humongous_proxy_set(humongous_proxy_set), _hrrs_cleanup_task(hrrs_cleanup_task) { } @@ -1665,6 +1672,7 @@ _g1->free_region_if_empty(hr, &_freed_bytes, _local_cleanup_list, + _old_proxy_set, _humongous_proxy_set, _hrrs_cleanup_task, true /* par */); @@ -1689,6 +1697,7 @@ return; } + HRSPhaseSetter x(HRSPhaseCleanup); g1h->verify_region_sets_optional(); if (VerifyDuringGC) { diff -r ed80554efa25 -r 8aae2050e83e src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Nov 02 08:04:23 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Nov 07 22:11:12 2011 -0500 @@ -1203,6 +1203,7 @@ Universe::print_heap_before_gc(); } + HRSPhaseSetter x(HRSPhaseFullGC); verify_region_sets_optional(); const bool do_clear_all_soft_refs = clear_all_soft_refs || @@ -1263,7 +1264,6 @@ release_mutator_alloc_region(); abandon_gc_alloc_regions(); g1_rem_set()->cleanupHRRS(); - tear_down_region_lists(); // We should call this after we retire any currently active alloc // regions so that all the ALLOC / RETIRE events are generated @@ -1278,7 +1278,7 @@ g1_policy()->clear_incremental_cset(); g1_policy()->stop_incremental_cset_building(); - empty_young_list(); + tear_down_region_sets(false /* free_list_only */); g1_policy()->set_full_young_gcs(true); // See the comments in g1CollectedHeap.hpp and @@ -1301,9 +1301,7 @@ } assert(free_regions() == 0, "we should not have added any free regions"); - rebuild_region_lists(); - - _summary_bytes_used = recalculate_used(); + rebuild_region_sets(false /* free_list_only */); // Enqueue any discovered reference objects that have // not been removed from the discovered lists. @@ -1764,9 +1762,9 @@ // Instead of tearing down / rebuilding the free lists here, we // could instead use the remove_all_pending() method on free_list to // remove only the ones that we need to remove. - tear_down_region_lists(); // We will rebuild them in a moment. + tear_down_region_sets(true /* free_list_only */); shrink_helper(shrink_bytes); - rebuild_region_lists(); + rebuild_region_sets(true /* free_list_only */); _hrs.verify_optional(); verify_region_sets_optional(); @@ -1799,6 +1797,7 @@ _full_collection(false), _free_list("Master Free List"), _secondary_free_list("Secondary Free List"), + _old_set("Old Set"), _humongous_set("Master Humongous Set"), _free_regions_coming(false), _young_list(new YoungList(this)), @@ -3352,6 +3351,7 @@ Universe::print_heap_before_gc(); } + HRSPhaseSetter x(HRSPhaseEvacuation); verify_region_sets_optional(); verify_dirty_young_regions(); @@ -3774,6 +3774,11 @@ !retained_region->is_empty() && !retained_region->isHumongous()) { retained_region->set_saved_mark(); + // The retained region was added to the old region set when it was + // retired. We have to remove it now, since we don't allow regions + // we allocate to in the region sets. We'll re-add it later, when + // it's retired again. + _old_set.remove(retained_region); _old_gc_alloc_region.set(retained_region); _hr_printer.reuse(retained_region); } @@ -5338,6 +5343,7 @@ void G1CollectedHeap::free_region_if_empty(HeapRegion* hr, size_t* pre_used, FreeRegionList* free_list, + OldRegionSet* old_proxy_set, HumongousRegionSet* humongous_proxy_set, HRRSCleanupTask* hrrs_cleanup_task, bool par) { @@ -5346,6 +5352,7 @@ assert(hr->startsHumongous(), "we should only see starts humongous"); free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par); } else { + _old_set.remove_with_proxy(hr, old_proxy_set); free_region(hr, pre_used, free_list, par); } } else { @@ -5402,6 +5409,7 @@ void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used, FreeRegionList* free_list, + OldRegionSet* old_proxy_set, HumongousRegionSet* humongous_proxy_set, bool par) { if (pre_used > 0) { @@ -5417,6 +5425,10 @@ MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); _free_list.add_as_head(free_list); } + if (old_proxy_set != NULL && !old_proxy_set->is_empty()) { + MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); + _old_set.update_from_proxy(old_proxy_set); + } if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) { MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); _humongous_set.update_from_proxy(humongous_proxy_set); @@ -5614,6 +5626,8 @@ cur->set_young_index_in_cset(-1); cur->set_not_young(); cur->set_evacuation_failed(false); + // The region is now considered to be old. + _old_set.add(cur); } cur = next; } @@ -5629,6 +5643,7 @@ young_time_ms += elapsed_ms; update_sets_after_freeing_regions(pre_used, &local_free_list, + NULL /* old_proxy_set */, NULL /* humongous_proxy_set */, false /* par */); policy->record_young_free_cset_time_ms(young_time_ms); @@ -5740,52 +5755,106 @@ return ret; } -void G1CollectedHeap::empty_young_list() { - assert(heap_lock_held_for_gc(), - "the heap lock should already be held by or for this thread"); - - _young_list->empty_list(); -} - -// Done at the start of full GC. -void G1CollectedHeap::tear_down_region_lists() { - _free_list.remove_all(); -} - -class RegionResetter: public HeapRegionClosure { - G1CollectedHeap* _g1h; - FreeRegionList _local_free_list; +class TearDownRegionSetsClosure : public HeapRegionClosure { +private: + OldRegionSet *_old_set; public: - RegionResetter() : _g1h(G1CollectedHeap::heap()), - _local_free_list("Local Free List for RegionResetter") { } + TearDownRegionSetsClosure(OldRegionSet* old_set) : _old_set(old_set) { } bool doHeapRegion(HeapRegion* r) { - if (r->continuesHumongous()) return false; - if (r->top() > r->bottom()) { - if (r->top() < r->end()) { - Copy::fill_to_words(r->top(), - pointer_delta(r->end(), r->top())); - } + if (r->is_empty()) { + // We ignore empty regions, we'll empty the free list afterwards + } else if (r->is_young()) { + // We ignore young regions, we'll empty the young list afterwards + } else if (r->isHumongous()) { + // We ignore humongous regions, we're not tearing down the + // humongous region set } else { - assert(r->is_empty(), "tautology"); - _local_free_list.add_as_tail(r); + // The rest should be old + _old_set->remove(r); } return false; } - void update_free_lists() { - _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL, - false /* par */); + ~TearDownRegionSetsClosure() { + assert(_old_set->is_empty(), "post-condition"); } }; -// Done at the end of full GC. -void G1CollectedHeap::rebuild_region_lists() { - // This needs to go at the end of the full GC. - RegionResetter rs; - heap_region_iterate(&rs); - rs.update_free_lists(); +void G1CollectedHeap::tear_down_region_sets(bool free_list_only) { + assert_at_safepoint(true /* should_be_vm_thread */); + + if (!free_list_only) { + TearDownRegionSetsClosure cl(&_old_set); + heap_region_iterate(&cl); + + // Need to do this after the heap iteration to be able to + // recognize the young regions and ignore them during the iteration. + _young_list->empty_list(); + } + _free_list.remove_all(); +} + +class RebuildRegionSetsClosure : public HeapRegionClosure { +private: + bool _free_list_only; + OldRegionSet* _old_set; + FreeRegionList* _free_list; + size_t _total_used; + +public: + RebuildRegionSetsClosure(bool free_list_only, + OldRegionSet* old_set, FreeRegionList* free_list) : + _free_list_only(free_list_only), + _old_set(old_set), _free_list(free_list), _total_used(0) { + assert(_free_list->is_empty(), "pre-condition"); + if (!free_list_only) { + assert(_old_set->is_empty(), "pre-condition"); + } + } + + bool doHeapRegion(HeapRegion* r) { + if (r->continuesHumongous()) { + return false; + } + + if (r->is_empty()) { + // Add free regions to the free list + _free_list->add_as_tail(r); + } else if (!_free_list_only) { + assert(!r->is_young(), "we should not come across young regions"); + + if (r->isHumongous()) { + // We ignore humongous regions, we left the humongous set unchanged + } else { + // The rest should be old, add them to the old set + _old_set->add(r); + } + _total_used += r->used(); + } + + return false; + } + + size_t total_used() { + return _total_used; + } +}; + +void G1CollectedHeap::rebuild_region_sets(bool free_list_only) { + assert_at_safepoint(true /* should_be_vm_thread */); + + RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list); + heap_region_iterate(&cl); + + if (!free_list_only) { + _summary_bytes_used = cl.total_used(); + } + assert(_summary_bytes_used == recalculate_used(), + err_msg("inconsistent _summary_bytes_used, " + "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT, + _summary_bytes_used, recalculate_used())); } void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { @@ -5882,6 +5951,8 @@ g1_policy()->record_bytes_copied_during_gc(allocated_bytes); if (ap == GCAllocForSurvived) { young_list()->add_survivor_region(alloc_region); + } else { + _old_set.add(alloc_region); } _hr_printer.retire(alloc_region); } @@ -5913,15 +5984,17 @@ class VerifyRegionListsClosure : public HeapRegionClosure { private: + FreeRegionList* _free_list; + OldRegionSet* _old_set; HumongousRegionSet* _humongous_set; - FreeRegionList* _free_list; size_t _region_count; public: - VerifyRegionListsClosure(HumongousRegionSet* humongous_set, + VerifyRegionListsClosure(OldRegionSet* old_set, + HumongousRegionSet* humongous_set, FreeRegionList* free_list) : - _humongous_set(humongous_set), _free_list(free_list), - _region_count(0) { } + _old_set(old_set), _humongous_set(humongous_set), + _free_list(free_list), _region_count(0) { } size_t region_count() { return _region_count; } @@ -5938,6 +6011,8 @@ _humongous_set->verify_next_region(hr); } else if (hr->is_empty()) { _free_list->verify_next_region(hr); + } else { + _old_set->verify_next_region(hr); } return false; } @@ -5964,6 +6039,7 @@ MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); _secondary_free_list.verify(); } + _old_set.verify(); _humongous_set.verify(); // If a concurrent region freeing operation is in progress it will @@ -5987,12 +6063,14 @@ // Finally, make sure that the region accounting in the lists is // consistent with what we see in the heap. + _old_set.verify_start(); _humongous_set.verify_start(); _free_list.verify_start(); - VerifyRegionListsClosure cl(&_humongous_set, &_free_list); + VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list); heap_region_iterate(&cl); + _old_set.verify_end(); _humongous_set.verify_end(); _free_list.verify_end(); } diff -r ed80554efa25 -r 8aae2050e83e src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Nov 02 08:04:23 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Nov 07 22:11:12 2011 -0500 @@ -239,6 +239,9 @@ // master free list when appropriate. SecondaryFreeRegionList _secondary_free_list; + // It keeps track of the old regions. + MasterOldRegionSet _old_set; + // It keeps track of the humongous regions. MasterHumongousRegionSet _humongous_set; @@ -248,10 +251,21 @@ // The block offset table for the G1 heap. G1BlockOffsetSharedArray* _bot_shared; - // Move all of the regions off the free lists, then rebuild those free - // lists, before and after full GC. - void tear_down_region_lists(); - void rebuild_region_lists(); + // Tears down the region sets / lists so that they are empty and the + // regions on the heap do not belong to a region set / list. The + // only exception is the humongous set which we leave unaltered. If + // free_list_only is true, it will only tear down the master free + // list. It is called before a Full GC (free_list_only == false) or + // before heap shrinking (free_list_only == true). + void tear_down_region_sets(bool free_list_only); + + // Rebuilds the region sets / lists so that they are repopulated to + // reflect the contents of the heap. The only exception is the + // humongous set which was not torn down in the first place. If + // free_list_only is true, it will only rebuild the master free + // list. It is called after a Full GC (free_list_only == false) or + // after heap shrinking (free_list_only == true). + void rebuild_region_sets(bool free_list_only); // The sequence of all heap regions in the heap. HeapRegionSeq _hrs; @@ -1124,6 +1138,10 @@ } } + void old_set_remove(HeapRegion* hr) { + _old_set.remove(hr); + } + void set_free_regions_coming(); void reset_free_regions_coming(); bool free_regions_coming() { return _free_regions_coming; } @@ -1153,6 +1171,7 @@ void free_region_if_empty(HeapRegion* hr, size_t* pre_used, FreeRegionList* free_list, + OldRegionSet* old_proxy_set, HumongousRegionSet* humongous_proxy_set, HRRSCleanupTask* hrrs_cleanup_task, bool par); @@ -1163,6 +1182,7 @@ // (if par is true, it will do so by taking the ParGCRareEvent_lock). void update_sets_after_freeing_regions(size_t pre_used, FreeRegionList* free_list, + OldRegionSet* old_proxy_set, HumongousRegionSet* humongous_proxy_set, bool par); @@ -1452,8 +1472,6 @@ // asserted to be this type. static G1CollectedHeap* heap(); - void empty_young_list(); - void set_region_short_lived_locked(HeapRegion* hr); // add appropriate methods for any other surv rate groups diff -r ed80554efa25 -r 8aae2050e83e src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Wed Nov 02 08:04:23 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Mon Nov 07 22:11:12 2011 -0500 @@ -3015,6 +3015,7 @@ hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms, avg_prediction); if (hr != NULL) { + _g1->old_set_remove(hr); double predicted_time_ms = predict_region_elapsed_time_ms(hr, false); time_remaining_ms -= predicted_time_ms; predicted_pause_time_ms += predicted_time_ms; diff -r ed80554efa25 -r 8aae2050e83e src/share/vm/gc_implementation/g1/g1MarkSweep.cpp --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Wed Nov 02 08:04:23 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Mon Nov 07 22:11:12 2011 -0500 @@ -236,6 +236,7 @@ // at the end of the GC, so no point in updating those values here. _g1h->update_sets_after_freeing_regions(0, /* pre_used */ NULL, /* free_list */ + NULL, /* old_proxy_set */ &_humongous_proxy_set, false /* par */); } diff -r ed80554efa25 -r 8aae2050e83e src/share/vm/gc_implementation/g1/heapRegionSet.cpp --- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Wed Nov 02 08:04:23 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Mon Nov 07 22:11:12 2011 -0500 @@ -26,6 +26,7 @@ #include "gc_implementation/g1/heapRegionSet.inline.hpp" size_t HeapRegionSetBase::_unrealistically_long_length = 0; +HRSPhase HeapRegionSetBase::_phase = HRSPhaseNone; //////////////////// HeapRegionSetBase //////////////////// @@ -192,6 +193,17 @@ _verify_in_progress = false; } +void HeapRegionSetBase::clear_phase() { + assert(_phase != HRSPhaseNone, "pre-condition"); + _phase = HRSPhaseNone; +} + +void HeapRegionSetBase::set_phase(HRSPhase phase) { + assert(_phase == HRSPhaseNone, "pre-condition"); + assert(phase != HRSPhaseNone, "pre-condition"); + _phase = phase; +} + void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) { out->cr(); out->print_cr("Set: %s ("PTR_FORMAT")", name(), this); diff -r ed80554efa25 -r 8aae2050e83e src/share/vm/gc_implementation/g1/heapRegionSet.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Wed Nov 02 08:04:23 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Mon Nov 07 22:11:12 2011 -0500 @@ -47,8 +47,18 @@ class hrs_ext_msg; +typedef enum { + HRSPhaseNone, + HRSPhaseEvacuation, + HRSPhaseCleanup, + HRSPhaseFullGC +} HRSPhase; + +class HRSPhaseSetter; + class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC { friend class hrs_ext_msg; + friend class HRSPhaseSetter; protected: static size_t calculate_region_num(HeapRegion* hr); @@ -80,6 +90,15 @@ size_t _calc_total_capacity_bytes; size_t _calc_total_used_bytes; + // This is here so that it can be used in the subclasses to assert + // something different depending on which phase the GC is in. This + // can be particularly helpful in the check_mt_safety() methods. + static HRSPhase _phase; + + // Only used by HRSPhaseSetter. + static void clear_phase(); + static void set_phase(HRSPhase phase); + // verify_region() is used to ensure that the contents of a region // added to / removed from a set are consistent. Different sets // make different assumptions about the regions added to them. So @@ -177,6 +196,16 @@ } }; +class HRSPhaseSetter { +public: + HRSPhaseSetter(HRSPhase phase) { + HeapRegionSetBase::set_phase(phase); + } + ~HRSPhaseSetter() { + HeapRegionSetBase::clear_phase(); + } +}; + // These two macros are provided for convenience, to keep the uses of // these two asserts a bit more concise. diff -r ed80554efa25 -r 8aae2050e83e src/share/vm/gc_implementation/g1/heapRegionSets.cpp --- a/src/share/vm/gc_implementation/g1/heapRegionSets.cpp Wed Nov 02 08:04:23 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/heapRegionSets.cpp Mon Nov 07 22:11:12 2011 -0500 @@ -26,6 +26,17 @@ #include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionSets.hpp" +// Note on the check_mt_safety() methods below: +// +// Verification of the "master" heap region sets / lists that are +// maintained by G1CollectedHeap is always done during a STW pause and +// by the VM thread at the start / end of the pause. The standard +// verification methods all assert check_mt_safety(). This is +// important as it ensures that verification is done without +// concurrent updates taking place at the same time. It follows, that, +// for the "master" heap region sets / lists, the check_mt_safety() +// method should include the VM thread / STW case. + //////////////////// FreeRegionList //////////////////// const char* FreeRegionList::verify_region_extra(HeapRegion* hr) { @@ -33,7 +44,7 @@ return "the region should not be young"; } // The superclass will check that the region is empty and - // not-humongous. + // not humongous. return HeapRegionLinkedList::verify_region_extra(hr); } @@ -58,12 +69,16 @@ // (b) If we're not at a safepoint, operations on the master free // list should be invoked while holding the Heap_lock. - guarantee((SafepointSynchronize::is_at_safepoint() && - (Thread::current()->is_VM_thread() || - FreeList_lock->owned_by_self())) || - (!SafepointSynchronize::is_at_safepoint() && - Heap_lock->owned_by_self()), - hrs_ext_msg(this, "master free list MT safety protocol")); + if (SafepointSynchronize::is_at_safepoint()) { + guarantee(Thread::current()->is_VM_thread() || + FreeList_lock->owned_by_self(), + hrs_ext_msg(this, "master free list MT safety protocol " + "at a safepoint")); + } else { + guarantee(Heap_lock->owned_by_self(), + hrs_ext_msg(this, "master free list MT safety protocol " + "outside a safepoint")); + } return FreeRegionList::check_mt_safety(); } @@ -81,6 +96,48 @@ return FreeRegionList::check_mt_safety(); } +//////////////////// OldRegionSet //////////////////// + +const char* OldRegionSet::verify_region_extra(HeapRegion* hr) { + if (hr->is_young()) { + return "the region should not be young"; + } + // The superclass will check that the region is not empty and not + // humongous. + return HeapRegionSet::verify_region_extra(hr); +} + +//////////////////// MasterOldRegionSet //////////////////// + +bool MasterOldRegionSet::check_mt_safety() { + // Master Old Set MT safety protocol: + // (a) If we're at a safepoint, operations on the master old set + // should be invoked: + // - by the VM thread (which will serialize them), or + // - by the GC workers while holding the FreeList_lock, if we're + // at a safepoint for an evacuation pause (this lock is taken + // anyway when an GC alloc region is retired so that a new one + // is allocated from the free list), or + // - by the GC workers while holding the OldSets_lock, if we're at a + // safepoint for a cleanup pause. + // (b) If we're not at a safepoint, operations on the master old set + // should be invoked while holding the Heap_lock. + + if (SafepointSynchronize::is_at_safepoint()) { + guarantee(Thread::current()->is_VM_thread() || + _phase == HRSPhaseEvacuation && FreeList_lock->owned_by_self() || + _phase == HRSPhaseCleanup && OldSets_lock->owned_by_self(), + hrs_ext_msg(this, "master old set MT safety protocol " + "at a safepoint")); + } else { + guarantee(Heap_lock->owned_by_self(), + hrs_ext_msg(this, "master old set MT safety protocol " + "outside a safepoint")); + } + + return OldRegionSet::check_mt_safety(); +} + //////////////////// HumongousRegionSet //////////////////// const char* HumongousRegionSet::verify_region_extra(HeapRegion* hr) { @@ -103,11 +160,16 @@ // (b) If we're not at a safepoint, operations on the master // humongous set should be invoked while holding the Heap_lock. - guarantee((SafepointSynchronize::is_at_safepoint() && - (Thread::current()->is_VM_thread() || - OldSets_lock->owned_by_self())) || - (!SafepointSynchronize::is_at_safepoint() && - Heap_lock->owned_by_self()), - hrs_ext_msg(this, "master humongous set MT safety protocol")); + if (SafepointSynchronize::is_at_safepoint()) { + guarantee(Thread::current()->is_VM_thread() || + OldSets_lock->owned_by_self(), + hrs_ext_msg(this, "master humongous set MT safety protocol " + "at a safepoint")); + } else { + guarantee(Heap_lock->owned_by_self(), + hrs_ext_msg(this, "master humongous set MT safety protocol " + "outside a safepoint")); + } + return HumongousRegionSet::check_mt_safety(); } diff -r ed80554efa25 -r 8aae2050e83e src/share/vm/gc_implementation/g1/heapRegionSets.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionSets.hpp Wed Nov 02 08:04:23 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/heapRegionSets.hpp Mon Nov 07 22:11:12 2011 -0500 @@ -61,6 +61,30 @@ SecondaryFreeRegionList(const char* name) : FreeRegionList(name) { } }; +//////////////////// OldRegionSet //////////////////// + +class OldRegionSet : public HeapRegionSet { +protected: + virtual const char* verify_region_extra(HeapRegion* hr); + + virtual bool regions_humongous() { return false; } + virtual bool regions_empty() { return false; } + +public: + OldRegionSet(const char* name) : HeapRegionSet(name) { } +}; + +//////////////////// MasterOldRegionSet //////////////////// + +class MasterOldRegionSet : public OldRegionSet { +private: +protected: + virtual bool check_mt_safety(); + +public: + MasterOldRegionSet(const char* name) : OldRegionSet(name) { } +}; + //////////////////// HumongousRegionSet //////////////////// class HumongousRegionSet : public HeapRegionSet {