# HG changeset patch # User jcoomes # Date 1340993738 25200 # Node ID 22de825d6faf71f04089e08688ecee4bc88cfa7f # Parent de2f17add1fb8ea1452fa5eb2ab8610edf31c379# Parent 7994a5a35fcfd2bf47ec879653b739cc81ad3005 Merge diff -r de2f17add1fb -r 22de825d6faf src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Jun 28 10:35:28 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Jun 29 11:15:38 2012 -0700 @@ -4750,9 +4750,6 @@ _g1h->g1_policy()->record_thread_age_table(pss.age_table()); _g1h->update_surviving_young_words(pss.surviving_young_words()+1); - // Clean up any par-expanded rem sets. - HeapRegionRemSet::par_cleanup(); - if (ParallelGCVerbose) { MutexLocker x(stats_lock()); pss.print_termination_stats(worker_id); diff -r de2f17add1fb -r 22de825d6faf src/share/vm/gc_implementation/g1/g1_globals.hpp --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp Thu Jun 28 10:35:28 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp Fri Jun 29 11:15:38 2012 -0700 @@ -53,6 +53,9 @@ develop(bool, G1TraceMarkStackOverflow, false, \ "If true, extra debugging code for CM restart for ovflw.") \ \ + develop(bool, G1TraceHeapRegionRememberedSet, false, \ + "Enables heap region remembered set debug logs") \ + \ diagnostic(bool, G1SummarizeConcMark, false, \ "Summarize concurrent mark info") \ \ diff -r de2f17add1fb -r 22de825d6faf src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Thu Jun 28 10:35:28 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Fri Jun 29 11:15:38 2012 -0700 @@ -30,13 +30,10 @@ #include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "memory/allocation.hpp" #include "memory/space.inline.hpp" +#include "oops/oop.inline.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/globalDefinitions.hpp" -#define HRRS_VERBOSE 0 - -#define PRT_COUNT_OCCUPIED 1 - // OtherRegionsTable class PerRegionTable: public CHeapObj { @@ -45,14 +42,10 @@ HeapRegion* _hr; BitMap _bm; -#if PRT_COUNT_OCCUPIED jint _occupied; -#endif - PerRegionTable* _next_free; - PerRegionTable* next_free() { return _next_free; } - void set_next_free(PerRegionTable* prt) { _next_free = prt; } - + // next pointer for free/allocated lis + PerRegionTable* _next; static PerRegionTable* _free_list; @@ -69,63 +62,25 @@ // We need access in order to union things into the base table. BitMap* bm() { return &_bm; } -#if PRT_COUNT_OCCUPIED void recount_occupied() { _occupied = (jint) bm()->count_one_bits(); } -#endif PerRegionTable(HeapRegion* hr) : _hr(hr), -#if PRT_COUNT_OCCUPIED _occupied(0), -#endif _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */) {} - static void free(PerRegionTable* prt) { - while (true) { - PerRegionTable* fl = _free_list; - prt->set_next_free(fl); - PerRegionTable* res = - (PerRegionTable*) - Atomic::cmpxchg_ptr(prt, &_free_list, fl); - if (res == fl) return; - } - ShouldNotReachHere(); - } - - static PerRegionTable* alloc(HeapRegion* hr) { - PerRegionTable* fl = _free_list; - while (fl != NULL) { - PerRegionTable* nxt = fl->next_free(); - PerRegionTable* res = - (PerRegionTable*) - Atomic::cmpxchg_ptr(nxt, &_free_list, fl); - if (res == fl) { - fl->init(hr); - return fl; - } else { - fl = _free_list; - } - } - assert(fl == NULL, "Loop condition."); - return new PerRegionTable(hr); - } - void add_card_work(CardIdx_t from_card, bool par) { if (!_bm.at(from_card)) { if (par) { if (_bm.par_at_put(from_card, 1)) { -#if PRT_COUNT_OCCUPIED Atomic::inc(&_occupied); -#endif } } else { _bm.at_put(from_card, 1); -#if PRT_COUNT_OCCUPIED _occupied++; -#endif } } } @@ -134,10 +89,13 @@ // Must make this robust in case "from" is not in "_hr", because of // concurrency. -#if HRRS_VERBOSE - gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").", - from, *from); -#endif + if (G1TraceHeapRegionRememberedSet) { + gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").", + from, + UseCompressedOops + ? oopDesc::load_decode_heap_oop((narrowOop*)from) + : oopDesc::load_decode_heap_oop((oop*)from)); + } HeapRegion* loc_hr = hr(); // If the test below fails, then this table was reused concurrently @@ -162,23 +120,16 @@ HeapRegion* hr() const { return _hr; } -#if PRT_COUNT_OCCUPIED jint occupied() const { // Overkill, but if we ever need it... // guarantee(_occupied == _bm.count_one_bits(), "Check"); return _occupied; } -#else - jint occupied() const { - return _bm.count_one_bits(); - } -#endif void init(HeapRegion* hr) { _hr = hr; -#if PRT_COUNT_OCCUPIED + _next = NULL; _occupied = 0; -#endif _bm.clear(); } @@ -194,9 +145,7 @@ HeapWord* hr_bot = hr()->bottom(); size_t hr_first_card_index = ctbs->index_for(hr_bot); bm()->set_intersection_at_offset(*card_bm, hr_first_card_index); -#if PRT_COUNT_OCCUPIED recount_occupied(); -#endif } void add_card(CardIdx_t from_card_index) { @@ -218,16 +167,6 @@ return sizeof(this) + _bm.size_in_words() * HeapWordSize; } - static size_t fl_mem_size() { - PerRegionTable* cur = _free_list; - size_t res = 0; - while (cur != NULL) { - res += sizeof(PerRegionTable); - cur = cur->next_free(); - } - return res; - } - // Requires "from" to be in "hr()". bool contains_reference(OopOrNarrowOopStar from) const { assert(hr()->is_in_reserved(from), "Precondition."); @@ -235,122 +174,29 @@ CardTableModRefBS::card_size); return _bm.at(card_ind); } -}; -PerRegionTable* PerRegionTable::_free_list = NULL; - - -#define COUNT_PAR_EXPANDS 0 - -#if COUNT_PAR_EXPANDS -static jint n_par_expands = 0; -static jint n_par_contracts = 0; -static jint par_expand_list_len = 0; -static jint max_par_expand_list_len = 0; - -static void print_par_expand() { - Atomic::inc(&n_par_expands); - Atomic::inc(&par_expand_list_len); - if (par_expand_list_len > max_par_expand_list_len) { - max_par_expand_list_len = par_expand_list_len; - } - if ((n_par_expands % 10) == 0) { - gclog_or_tty->print_cr("\n\n%d par expands: %d contracts, " - "len = %d, max_len = %d\n.", - n_par_expands, n_par_contracts, par_expand_list_len, - max_par_expand_list_len); - } -} -#endif - -class PosParPRT: public PerRegionTable { - PerRegionTable** _par_tables; - - enum SomePrivateConstants { - ReserveParTableExpansion = 1 - }; + PerRegionTable* next() const { return _next; } + void set_next(PerRegionTable* nxt) { _next = nxt; } + PerRegionTable** next_addr() { return &_next; } - void par_contract() { - assert(_par_tables != NULL, "Precondition."); - int n = HeapRegionRemSet::num_par_rem_sets()-1; - for (int i = 0; i < n; i++) { - _par_tables[i]->union_bitmap_into(bm()); - PerRegionTable::free(_par_tables[i]); - _par_tables[i] = NULL; - } -#if PRT_COUNT_OCCUPIED - // We must recount the "occupied." - recount_occupied(); -#endif - FREE_C_HEAP_ARRAY(PerRegionTable*, _par_tables); - _par_tables = NULL; -#if COUNT_PAR_EXPANDS - Atomic::inc(&n_par_contracts); - Atomic::dec(&par_expand_list_len); -#endif - } - - static PerRegionTable** _par_table_fl; - - PosParPRT* _next; - - static PosParPRT* _free_list; - - PerRegionTable** par_tables() const { - assert(uintptr_t(NULL) == 0, "Assumption."); - if (uintptr_t(_par_tables) <= ReserveParTableExpansion) - return NULL; - else - return _par_tables; - } - - PosParPRT* _next_par_expanded; - PosParPRT* next_par_expanded() { return _next_par_expanded; } - void set_next_par_expanded(PosParPRT* ppprt) { _next_par_expanded = ppprt; } - static PosParPRT* _par_expanded_list; - -public: - - PosParPRT(HeapRegion* hr) : PerRegionTable(hr), _par_tables(NULL) {} - - jint occupied() const { - jint res = PerRegionTable::occupied(); - if (par_tables() != NULL) { - for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { - res += par_tables()[i]->occupied(); - } - } - return res; - } - - void init(HeapRegion* hr) { - PerRegionTable::init(hr); - _next = NULL; - if (par_tables() != NULL) { - for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { - par_tables()[i]->init(hr); - } - } - } - - static void free(PosParPRT* prt) { + static void free(PerRegionTable* prt) { while (true) { - PosParPRT* fl = _free_list; + PerRegionTable* fl = _free_list; prt->set_next(fl); - PosParPRT* res = - (PosParPRT*) + PerRegionTable* res = + (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl); if (res == fl) return; } ShouldNotReachHere(); } - static PosParPRT* alloc(HeapRegion* hr) { - PosParPRT* fl = _free_list; + static PerRegionTable* alloc(HeapRegion* hr) { + PerRegionTable* fl = _free_list; while (fl != NULL) { - PosParPRT* nxt = fl->next(); - PosParPRT* res = - (PosParPRT*) + PerRegionTable* nxt = fl->next(); + PerRegionTable* res = + (PerRegionTable*) Atomic::cmpxchg_ptr(nxt, &_free_list, fl); if (res == fl) { fl->init(hr); @@ -360,148 +206,26 @@ } } assert(fl == NULL, "Loop condition."); - return new PosParPRT(hr); - } - - PosParPRT* next() const { return _next; } - void set_next(PosParPRT* nxt) { _next = nxt; } - PosParPRT** next_addr() { return &_next; } - - bool should_expand(int tid) { - // Given that we now defer RSet updates for after a GC we don't - // really need to expand the tables any more. This code should be - // cleaned up in the future (see CR 6921087). - return false; - } - - void par_expand() { - int n = HeapRegionRemSet::num_par_rem_sets()-1; - if (n <= 0) return; - if (_par_tables == NULL) { - PerRegionTable* res = - (PerRegionTable*) - Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion, - &_par_tables, NULL); - if (res != NULL) return; - // Otherwise, we reserved the right to do the expansion. - - PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n); - for (int i = 0; i < n; i++) { - PerRegionTable* ptable = PerRegionTable::alloc(hr()); - ptables[i] = ptable; - } - // Here we do not need an atomic. - _par_tables = ptables; -#if COUNT_PAR_EXPANDS - print_par_expand(); -#endif - // We must put this table on the expanded list. - PosParPRT* exp_head = _par_expanded_list; - while (true) { - set_next_par_expanded(exp_head); - PosParPRT* res = - (PosParPRT*) - Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head); - if (res == exp_head) return; - // Otherwise. - exp_head = res; - } - ShouldNotReachHere(); - } - } - - void add_reference(OopOrNarrowOopStar from, int tid) { - // Expand if necessary. - PerRegionTable** pt = par_tables(); - if (pt != NULL) { - // We always have to assume that mods to table 0 are in parallel, - // because of the claiming scheme in parallel expansion. A thread - // with tid != 0 that finds the table to be NULL, but doesn't succeed - // in claiming the right of expanding it, will end up in the else - // clause of the above if test. That thread could be delayed, and a - // thread 0 add reference could see the table expanded, and come - // here. Both threads would be adding in parallel. But we get to - // not use atomics for tids > 0. - if (tid == 0) { - PerRegionTable::add_reference(from); - } else { - pt[tid-1]->seq_add_reference(from); - } - } else { - // Not expanded -- add to the base table. - PerRegionTable::add_reference(from); - } - } - - void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) { - assert(_par_tables == NULL, "Precondition"); - PerRegionTable::scrub(ctbs, card_bm); - } - - size_t mem_size() const { - size_t res = - PerRegionTable::mem_size() + sizeof(this) - sizeof(PerRegionTable); - if (_par_tables != NULL) { - for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { - res += _par_tables[i]->mem_size(); - } - } - return res; + return new PerRegionTable(hr); } static size_t fl_mem_size() { - PosParPRT* cur = _free_list; + PerRegionTable* cur = _free_list; size_t res = 0; while (cur != NULL) { - res += sizeof(PosParPRT); + res += sizeof(PerRegionTable); cur = cur->next(); } return res; } - - bool contains_reference(OopOrNarrowOopStar from) const { - if (PerRegionTable::contains_reference(from)) return true; - if (_par_tables != NULL) { - for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { - if (_par_tables[i]->contains_reference(from)) return true; - } - } - return false; - } - - static void par_contract_all(); }; -void PosParPRT::par_contract_all() { - PosParPRT* hd = _par_expanded_list; - while (hd != NULL) { - PosParPRT* nxt = hd->next_par_expanded(); - PosParPRT* res = - (PosParPRT*) - Atomic::cmpxchg_ptr(nxt, &_par_expanded_list, hd); - if (res == hd) { - // We claimed the right to contract this table. - hd->set_next_par_expanded(NULL); - hd->par_contract(); - hd = _par_expanded_list; - } else { - hd = res; - } - } -} - -PosParPRT* PosParPRT::_free_list = NULL; -PosParPRT* PosParPRT::_par_expanded_list = NULL; - -jint OtherRegionsTable::_cache_probes = 0; -jint OtherRegionsTable::_cache_hits = 0; +PerRegionTable* PerRegionTable::_free_list = NULL; size_t OtherRegionsTable::_max_fine_entries = 0; size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0; -#if SAMPLE_FOR_EVICTION size_t OtherRegionsTable::_fine_eviction_stride = 0; size_t OtherRegionsTable::_fine_eviction_sample_size = 0; -#endif OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) : _g1h(G1CollectedHeap::heap()), @@ -511,34 +235,36 @@ false /* in-resource-area */), _fine_grain_regions(NULL), _n_fine_entries(0), _n_coarse_entries(0), -#if SAMPLE_FOR_EVICTION _fine_eviction_start(0), -#endif _sparse_table(hr) { - typedef PosParPRT* PosParPRTPtr; + typedef PerRegionTable* PerRegionTablePtr; + if (_max_fine_entries == 0) { assert(_mod_max_fine_entries_mask == 0, "Both or none."); size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries); _max_fine_entries = (size_t)(1 << max_entries_log); _mod_max_fine_entries_mask = _max_fine_entries - 1; -#if SAMPLE_FOR_EVICTION + assert(_fine_eviction_sample_size == 0 && _fine_eviction_stride == 0, "All init at same time."); _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log); _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size; -#endif } - _fine_grain_regions = new PosParPRTPtr[_max_fine_entries]; - if (_fine_grain_regions == NULL) + + _fine_grain_regions = new PerRegionTablePtr[_max_fine_entries]; + + if (_fine_grain_regions == NULL) { vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, "Failed to allocate _fine_grain_entries."); + } + for (size_t i = 0; i < _max_fine_entries; i++) { _fine_grain_regions[i] = NULL; } } -int** OtherRegionsTable::_from_card_cache = NULL; +int** OtherRegionsTable::_from_card_cache = NULL; size_t OtherRegionsTable::_from_card_cache_max_regions = 0; size_t OtherRegionsTable::_from_card_cache_mem_size = 0; @@ -579,38 +305,26 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) { size_t cur_hrs_ind = (size_t) hr()->hrs_index(); -#if HRRS_VERBOSE - gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").", - from, - UseCompressedOops - ? oopDesc::load_decode_heap_oop((narrowOop*)from) - : oopDesc::load_decode_heap_oop((oop*)from)); -#endif + if (G1TraceHeapRegionRememberedSet) { + gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").", + from, + UseCompressedOops + ? oopDesc::load_decode_heap_oop((narrowOop*)from) + : oopDesc::load_decode_heap_oop((oop*)from)); + } int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift); -#if HRRS_VERBOSE - gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)", - hr()->bottom(), from_card, - _from_card_cache[tid][cur_hrs_ind]); -#endif + if (G1TraceHeapRegionRememberedSet) { + gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)", + hr()->bottom(), from_card, + _from_card_cache[tid][cur_hrs_ind]); + } -#define COUNT_CACHE 0 -#if COUNT_CACHE - jint p = Atomic::add(1, &_cache_probes); - if ((p % 10000) == 0) { - jint hits = _cache_hits; - gclog_or_tty->print_cr("%d/%d = %5.2f%% RS cache hits.", - _cache_hits, p, 100.0* (float)hits/(float)p); - } -#endif if (from_card == _from_card_cache[tid][cur_hrs_ind]) { -#if HRRS_VERBOSE - gclog_or_tty->print_cr(" from-card cache hit."); -#endif -#if COUNT_CACHE - Atomic::inc(&_cache_hits); -#endif + if (G1TraceHeapRegionRememberedSet) { + gclog_or_tty->print_cr(" from-card cache hit."); + } assert(contains_reference(from), "We just added it!"); return; } else { @@ -623,16 +337,16 @@ // If the region is already coarsened, return. if (_coarse_map.at(from_hrs_ind)) { -#if HRRS_VERBOSE - gclog_or_tty->print_cr(" coarse map hit."); -#endif + if (G1TraceHeapRegionRememberedSet) { + gclog_or_tty->print_cr(" coarse map hit."); + } assert(contains_reference(from), "We just added it!"); return; } // Otherwise find a per-region table to add it to. size_t ind = from_hrs_ind & _mod_max_fine_entries_mask; - PosParPRT* prt = find_region_table(ind, from_hr); + PerRegionTable* prt = find_region_table(ind, from_hr); if (prt == NULL) { MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); // Confirm that it's really not there... @@ -649,35 +363,35 @@ _sparse_table.add_card(from_hrs_ind, card_index)) { if (G1RecordHRRSOops) { HeapRegionRemSet::record(hr(), from); -#if HRRS_VERBOSE - gclog_or_tty->print(" Added card " PTR_FORMAT " to region " - "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", - align_size_down(uintptr_t(from), - CardTableModRefBS::card_size), - hr()->bottom(), from); -#endif + if (G1TraceHeapRegionRememberedSet) { + gclog_or_tty->print(" Added card " PTR_FORMAT " to region " + "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", + align_size_down(uintptr_t(from), + CardTableModRefBS::card_size), + hr()->bottom(), from); + } } -#if HRRS_VERBOSE - gclog_or_tty->print_cr(" added card to sparse table."); -#endif + if (G1TraceHeapRegionRememberedSet) { + gclog_or_tty->print_cr(" added card to sparse table."); + } assert(contains_reference_locked(from), "We just added it!"); return; } else { -#if HRRS_VERBOSE - gclog_or_tty->print_cr(" [tid %d] sparse table entry " - "overflow(f: %d, t: %d)", - tid, from_hrs_ind, cur_hrs_ind); -#endif + if (G1TraceHeapRegionRememberedSet) { + gclog_or_tty->print_cr(" [tid %d] sparse table entry " + "overflow(f: %d, t: %d)", + tid, from_hrs_ind, cur_hrs_ind); + } } if (_n_fine_entries == _max_fine_entries) { prt = delete_region_table(); } else { - prt = PosParPRT::alloc(from_hr); + prt = PerRegionTable::alloc(from_hr); } prt->init(from_hr); - PosParPRT* first_prt = _fine_grain_regions[ind]; + PerRegionTable* first_prt = _fine_grain_regions[ind]; prt->set_next(first_prt); // XXX Maybe move to init? _fine_grain_regions[ind] = prt; _n_fine_entries++; @@ -704,38 +418,25 @@ // OtherRegionsTable for why this is OK. assert(prt != NULL, "Inv"); - if (prt->should_expand(tid)) { - MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); - HeapRegion* prt_hr = prt->hr(); - if (prt_hr == from_hr) { - // Make sure the table still corresponds to the same region - prt->par_expand(); - prt->add_reference(from, tid); - } - // else: The table has been concurrently coarsened, evicted, and - // the table data structure re-used for another table. So, we - // don't need to add the reference any more given that the table - // has been coarsened and the whole region will be scanned anyway. - } else { - prt->add_reference(from, tid); - } + prt->add_reference(from); + if (G1RecordHRRSOops) { HeapRegionRemSet::record(hr(), from); -#if HRRS_VERBOSE - gclog_or_tty->print("Added card " PTR_FORMAT " to region " - "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", - align_size_down(uintptr_t(from), - CardTableModRefBS::card_size), - hr()->bottom(), from); -#endif + if (G1TraceHeapRegionRememberedSet) { + gclog_or_tty->print("Added card " PTR_FORMAT " to region " + "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", + align_size_down(uintptr_t(from), + CardTableModRefBS::card_size), + hr()->bottom(), from); + } } assert(contains_reference(from), "We just added it!"); } -PosParPRT* +PerRegionTable* OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const { assert(0 <= ind && ind < _max_fine_entries, "Preconditions."); - PosParPRT* prt = _fine_grain_regions[ind]; + PerRegionTable* prt = _fine_grain_regions[ind]; while (prt != NULL && prt->hr() != hr) { prt = prt->next(); } @@ -743,32 +444,16 @@ return prt; } - -#define DRT_CENSUS 0 - -#if DRT_CENSUS -static const int HistoSize = 6; -static int global_histo[HistoSize] = { 0, 0, 0, 0, 0, 0 }; -static int coarsenings = 0; -static int occ_sum = 0; -#endif - jint OtherRegionsTable::_n_coarsenings = 0; -PosParPRT* OtherRegionsTable::delete_region_table() { -#if DRT_CENSUS - int histo[HistoSize] = { 0, 0, 0, 0, 0, 0 }; - const int histo_limits[] = { 1, 4, 16, 64, 256, 2048 }; -#endif - +PerRegionTable* OtherRegionsTable::delete_region_table() { assert(_m.owned_by_self(), "Precondition"); assert(_n_fine_entries == _max_fine_entries, "Precondition"); - PosParPRT* max = NULL; + PerRegionTable* max = NULL; jint max_occ = 0; - PosParPRT** max_prev; + PerRegionTable** max_prev; size_t max_ind; -#if SAMPLE_FOR_EVICTION size_t i = _fine_eviction_start; for (size_t k = 0; k < _fine_eviction_sample_size; k++) { size_t ii = i; @@ -778,8 +463,8 @@ if (ii == _max_fine_entries) ii = 0; guarantee(ii != i, "We must find one."); } - PosParPRT** prev = &_fine_grain_regions[ii]; - PosParPRT* cur = *prev; + PerRegionTable** prev = &_fine_grain_regions[ii]; + PerRegionTable* cur = *prev; while (cur != NULL) { jint cur_occ = cur->occupied(); if (max == NULL || cur_occ > max_occ) { @@ -794,64 +479,27 @@ i = i + _fine_eviction_stride; if (i >= _n_fine_entries) i = i - _n_fine_entries; } + _fine_eviction_start++; - if (_fine_eviction_start >= _n_fine_entries) + + if (_fine_eviction_start >= _n_fine_entries) { _fine_eviction_start -= _n_fine_entries; -#else - for (int i = 0; i < _max_fine_entries; i++) { - PosParPRT** prev = &_fine_grain_regions[i]; - PosParPRT* cur = *prev; - while (cur != NULL) { - jint cur_occ = cur->occupied(); -#if DRT_CENSUS - for (int k = 0; k < HistoSize; k++) { - if (cur_occ <= histo_limits[k]) { - histo[k]++; global_histo[k]++; break; - } - } -#endif - if (max == NULL || cur_occ > max_occ) { - max = cur; - max_prev = prev; - max_ind = i; - max_occ = cur_occ; - } - prev = cur->next_addr(); - cur = cur->next(); - } } -#endif - // XXX + guarantee(max != NULL, "Since _n_fine_entries > 0"); -#if DRT_CENSUS - gclog_or_tty->print_cr("In a coarsening: histo of occs:"); - for (int k = 0; k < HistoSize; k++) { - gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], histo[k]); - } - coarsenings++; - occ_sum += max_occ; - if ((coarsenings % 100) == 0) { - gclog_or_tty->print_cr("\ncoarsenings = %d; global summary:", coarsenings); - for (int k = 0; k < HistoSize; k++) { - gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], global_histo[k]); - } - gclog_or_tty->print_cr("Avg occ of deleted region = %6.2f.", - (float)occ_sum/(float)coarsenings); - } -#endif // Set the corresponding coarse bit. size_t max_hrs_index = (size_t) max->hr()->hrs_index(); if (!_coarse_map.at(max_hrs_index)) { _coarse_map.at_put(max_hrs_index, true); _n_coarse_entries++; -#if 0 - gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] " - "for region [" PTR_FORMAT "...] (%d coarse entries).\n", - hr()->bottom(), - max->hr()->bottom(), - _n_coarse_entries); -#endif + if (G1TraceHeapRegionRememberedSet) { + gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] " + "for region [" PTR_FORMAT "...] (%d coarse entries).\n", + hr()->bottom(), + max->hr()->bottom(), + _n_coarse_entries); + } } // Unsplice. @@ -883,10 +531,10 @@ // Now do the fine-grained maps. for (size_t i = 0; i < _max_fine_entries; i++) { - PosParPRT* cur = _fine_grain_regions[i]; - PosParPRT** prev = &_fine_grain_regions[i]; + PerRegionTable* cur = _fine_grain_regions[i]; + PerRegionTable** prev = &_fine_grain_regions[i]; while (cur != NULL) { - PosParPRT* nxt = cur->next(); + PerRegionTable* nxt = cur->next(); // If the entire region is dead, eliminate. if (G1RSScrubVerbose) { gclog_or_tty->print_cr(" For other region %u:", @@ -899,7 +547,7 @@ if (G1RSScrubVerbose) { gclog_or_tty->print_cr(" deleted via region map."); } - PosParPRT::free(cur); + PerRegionTable::free(cur); } else { // Do fine-grain elimination. if (G1RSScrubVerbose) { @@ -914,7 +562,7 @@ *prev = nxt; cur->set_next(NULL); _n_fine_entries--; - PosParPRT::free(cur); + PerRegionTable::free(cur); } else { prev = cur->next_addr(); } @@ -940,7 +588,7 @@ size_t OtherRegionsTable::occ_fine() const { size_t sum = 0; for (size_t i = 0; i < _max_fine_entries; i++) { - PosParPRT* cur = _fine_grain_regions[i]; + PerRegionTable* cur = _fine_grain_regions[i]; while (cur != NULL) { sum += cur->occupied(); cur = cur->next(); @@ -962,13 +610,13 @@ MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag); size_t sum = 0; for (size_t i = 0; i < _max_fine_entries; i++) { - PosParPRT* cur = _fine_grain_regions[i]; + PerRegionTable* cur = _fine_grain_regions[i]; while (cur != NULL) { sum += cur->mem_size(); cur = cur->next(); } } - sum += (sizeof(PosParPRT*) * _max_fine_entries); + sum += (sizeof(PerRegionTable*) * _max_fine_entries); sum += (_coarse_map.size_in_words() * HeapWordSize); sum += (_sparse_table.mem_size()); sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above. @@ -980,7 +628,7 @@ } size_t OtherRegionsTable::fl_mem_size() { - return PerRegionTable::fl_mem_size() + PosParPRT::fl_mem_size(); + return PerRegionTable::fl_mem_size(); } void OtherRegionsTable::clear_fcc() { @@ -992,10 +640,10 @@ void OtherRegionsTable::clear() { MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); for (size_t i = 0; i < _max_fine_entries; i++) { - PosParPRT* cur = _fine_grain_regions[i]; + PerRegionTable* cur = _fine_grain_regions[i]; while (cur != NULL) { - PosParPRT* nxt = cur->next(); - PosParPRT::free(cur); + PerRegionTable* nxt = cur->next(); + PerRegionTable::free(cur); cur = nxt; } _fine_grain_regions[i] = NULL; @@ -1035,8 +683,8 @@ bool OtherRegionsTable::del_single_region_table(size_t ind, HeapRegion* hr) { assert(0 <= ind && ind < _max_fine_entries, "Preconditions."); - PosParPRT** prev_addr = &_fine_grain_regions[ind]; - PosParPRT* prt = *prev_addr; + PerRegionTable** prev_addr = &_fine_grain_regions[ind]; + PerRegionTable* prt = *prev_addr; while (prt != NULL && prt->hr() != hr) { prev_addr = prt->next_addr(); prt = prt->next(); @@ -1044,7 +692,7 @@ if (prt != NULL) { assert(prt->hr() == hr, "Loop postcondition."); *prev_addr = prt->next(); - PosParPRT::free(prt); + PerRegionTable::free(prt); _n_fine_entries--; return true; } else { @@ -1065,7 +713,7 @@ // Is this region in the coarse map? if (_coarse_map.at(hr_ind)) return true; - PosParPRT* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask, + PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask, hr); if (prt != NULL) { return prt->contains_reference(from); @@ -1145,7 +793,7 @@ G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index); gclog_or_tty->print_cr(" Card " PTR_FORMAT, card_start); } - // XXX + if (iter.n_yielded() != occupied()) { gclog_or_tty->print_cr("Yielded disagrees with occupied:"); gclog_or_tty->print_cr(" %6d yielded (%6d coarse, %6d fine).", @@ -1163,10 +811,6 @@ SparsePRT::cleanup_all(); } -void HeapRegionRemSet::par_cleanup() { - PosParPRT::par_contract_all(); -} - void HeapRegionRemSet::clear() { _other_regions.clear(); assert(occupied() == 0, "Should be clear."); diff -r de2f17add1fb -r 22de825d6faf src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Thu Jun 28 10:35:28 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Fri Jun 29 11:15:38 2012 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ class G1BlockOffsetSharedArray; class HeapRegion; class HeapRegionRemSetIterator; -class PosParPRT; +class PerRegionTable; class SparsePRT; // Essentially a wrapper around SparsePRTCleanupTask. See @@ -79,15 +79,14 @@ size_t _n_coarse_entries; static jint _n_coarsenings; - PosParPRT** _fine_grain_regions; - size_t _n_fine_entries; + PerRegionTable** _fine_grain_regions; + size_t _n_fine_entries; -#define SAMPLE_FOR_EVICTION 1 -#if SAMPLE_FOR_EVICTION + // Used to sample a subset of the fine grain PRTs to determine which + // PRT to evict and coarsen. size_t _fine_eviction_start; static size_t _fine_eviction_stride; static size_t _fine_eviction_sample_size; -#endif SparsePRT _sparse_table; @@ -98,21 +97,18 @@ // Requires "prt" to be the first element of the bucket list appropriate // for "hr". If this list contains an entry for "hr", return it, // otherwise return "NULL". - PosParPRT* find_region_table(size_t ind, HeapRegion* hr) const; + PerRegionTable* find_region_table(size_t ind, HeapRegion* hr) const; - // Find, delete, and return a candidate PosParPRT, if any exists, + // Find, delete, and return a candidate PerRegionTable, if any exists, // adding the deleted region to the coarse bitmap. Requires the caller // to hold _m, and the fine-grain table to be full. - PosParPRT* delete_region_table(); + PerRegionTable* delete_region_table(); // If a PRT for "hr" is in the bucket list indicated by "ind" (which must // be the correct index for "hr"), delete it and return true; else return // false. bool del_single_region_table(size_t ind, HeapRegion* hr); - static jint _cache_probes; - static jint _cache_hits; - // Indexed by thread X heap region, to minimize thread contention. static int** _from_card_cache; static size_t _from_card_cache_max_regions; @@ -127,10 +123,6 @@ // sense. void add_reference(OopOrNarrowOopStar from, int tid); - void add_reference(OopOrNarrowOopStar from) { - return add_reference(from, 0); - } - // Removes any entries shown by the given bitmaps to contain only dead // objects. void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); @@ -233,14 +225,12 @@ static jint n_coarsenings() { return OtherRegionsTable::n_coarsenings(); } - /* Used in the sequential case. Returns "true" iff this addition causes - the size limit to be reached. */ + // Used in the sequential case. void add_reference(OopOrNarrowOopStar from) { - _other_regions.add_reference(from); + _other_regions.add_reference(from, 0); } - /* Used in the parallel case. Returns "true" iff this addition causes - the size limit to be reached. */ + // Used in the parallel case. void add_reference(OopOrNarrowOopStar from, int tid) { _other_regions.add_reference(from, tid); } @@ -253,15 +243,6 @@ // entries for this region in other remsets. void clear(); - // Forget any entries due to pointers from "from_hr". - void clear_incoming_entry(HeapRegion* from_hr) { - _other_regions.clear_incoming_entry(from_hr); - } - -#if 0 - virtual void cleanup() = 0; -#endif - // Attempt to claim the region. Returns true iff this call caused an // atomic transition from Unclaimed to Claimed. bool claim_iter(); @@ -290,12 +271,6 @@ // Initialize the given iterator to iterate over this rem set. void init_iterator(HeapRegionRemSetIterator* iter) const; -#if 0 - // Apply the "do_card" method to the start address of every card in the - // rem set. Returns false if some application of the closure aborted. - virtual bool card_iterate(CardClosure* iter) = 0; -#endif - // The actual # of bytes this hr_remset takes up. size_t mem_size() { return _other_regions.mem_size() @@ -322,10 +297,7 @@ void print() const; // Called during a stop-world phase to perform any deferred cleanups. - // The second version may be called by parallel threads after then finish - // collection work. static void cleanup(); - static void par_cleanup(); // Declare the heap size (in # of regions) to the HeapRegionRemSet(s). // (Uses it to initialize from_card_cache). @@ -367,7 +339,7 @@ // Local caching of HRRS fields. const BitMap* _coarse_map; - PosParPRT** _fine_grain_regions; + PerRegionTable** _fine_grain_regions; G1BlockOffsetSharedArray* _bosa; G1CollectedHeap* _g1h; @@ -404,8 +376,9 @@ // Index of bucket-list we're working on. int _fine_array_index; + // Per Region Table we're doing within current bucket list. - PosParPRT* _fine_cur_prt; + PerRegionTable* _fine_cur_prt; /* SparsePRT::*/ SparsePRTIter _sparse_iter; @@ -435,12 +408,4 @@ } }; -#if 0 -class CardClosure: public Closure { -public: - virtual void do_card(HeapWord* card_start) = 0; -}; - -#endif - #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP