Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1RemSet.cpp @ 3317:063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
Summary: We should only undirty cards after we decide that they are not on a young region, not before. The fix also includes improvements to the verify_dirty_region() method which print out which cards were not found dirty.
Reviewed-by: johnc, brutisso
author | tonyp |
---|---|
date | Fri, 29 Apr 2011 14:59:04 -0400 |
parents | 04d1138b4cce |
children | ae5b2f1dcf12 |
comparison
equal
deleted
inserted
replaced
3316:cd8e33b2a8ad | 3317:063382f9b575 |
---|---|
155 } | 155 } |
156 | 156 |
157 void set_try_claimed() { _try_claimed = true; } | 157 void set_try_claimed() { _try_claimed = true; } |
158 | 158 |
159 void scanCard(size_t index, HeapRegion *r) { | 159 void scanCard(size_t index, HeapRegion *r) { |
160 _cards_done++; | |
161 DirtyCardToOopClosure* cl = | 160 DirtyCardToOopClosure* cl = |
162 r->new_dcto_closure(_oc, | 161 r->new_dcto_closure(_oc, |
163 CardTableModRefBS::Precise, | 162 CardTableModRefBS::Precise, |
164 HeapRegionDCTOC::IntoCSFilterKind); | 163 HeapRegionDCTOC::IntoCSFilterKind); |
165 | 164 |
166 // Set the "from" region in the closure. | 165 // Set the "from" region in the closure. |
167 _oc->set_region(r); | 166 _oc->set_region(r); |
168 HeapWord* card_start = _bot_shared->address_for_index(index); | 167 HeapWord* card_start = _bot_shared->address_for_index(index); |
169 HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words; | 168 HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words; |
170 Space *sp = SharedHeap::heap()->space_containing(card_start); | 169 Space *sp = SharedHeap::heap()->space_containing(card_start); |
171 MemRegion sm_region; | 170 MemRegion sm_region = sp->used_region_at_save_marks(); |
172 if (ParallelGCThreads > 0) { | |
173 // first find the used area | |
174 sm_region = sp->used_region_at_save_marks(); | |
175 } else { | |
176 // The closure is not idempotent. We shouldn't look at objects | |
177 // allocated during the GC. | |
178 sm_region = sp->used_region_at_save_marks(); | |
179 } | |
180 MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end)); | 171 MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end)); |
181 if (!mr.is_empty()) { | 172 if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) { |
173 // We make the card as "claimed" lazily (so races are possible | |
174 // but they're benign), which reduces the number of duplicate | |
175 // scans (the rsets of the regions in the cset can intersect). | |
176 _ct_bs->set_card_claimed(index); | |
177 _cards_done++; | |
182 cl->do_MemRegion(mr); | 178 cl->do_MemRegion(mr); |
183 } | 179 } |
184 } | 180 } |
185 | 181 |
186 void printCard(HeapRegion* card_region, size_t card_index, | 182 void printCard(HeapRegion* card_region, size_t card_index, |
197 bool doHeapRegion(HeapRegion* r) { | 193 bool doHeapRegion(HeapRegion* r) { |
198 assert(r->in_collection_set(), "should only be called on elements of CS."); | 194 assert(r->in_collection_set(), "should only be called on elements of CS."); |
199 HeapRegionRemSet* hrrs = r->rem_set(); | 195 HeapRegionRemSet* hrrs = r->rem_set(); |
200 if (hrrs->iter_is_complete()) return false; // All done. | 196 if (hrrs->iter_is_complete()) return false; // All done. |
201 if (!_try_claimed && !hrrs->claim_iter()) return false; | 197 if (!_try_claimed && !hrrs->claim_iter()) return false; |
198 // If we ever free the collection set concurrently, we should also | |
199 // clear the card table concurrently therefore we won't need to | |
200 // add regions of the collection set to the dirty cards region. | |
202 _g1h->push_dirty_cards_region(r); | 201 _g1h->push_dirty_cards_region(r); |
203 // If we didn't return above, then | 202 // If we didn't return above, then |
204 // _try_claimed || r->claim_iter() | 203 // _try_claimed || r->claim_iter() |
205 // is true: either we're supposed to work on claimed-but-not-complete | 204 // is true: either we're supposed to work on claimed-but-not-complete |
206 // regions, or we successfully claimed the region. | 205 // regions, or we successfully claimed the region. |
228 | 227 |
229 if (!card_region->is_on_dirty_cards_region_list()) { | 228 if (!card_region->is_on_dirty_cards_region_list()) { |
230 _g1h->push_dirty_cards_region(card_region); | 229 _g1h->push_dirty_cards_region(card_region); |
231 } | 230 } |
232 | 231 |
233 // If the card is dirty, then we will scan it during updateRS. | 232 // If the card is dirty, then we will scan it during updateRS. |
234 if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) { | 233 if (!card_region->in_collection_set() && |
235 // We make the card as "claimed" lazily (so races are possible but they're benign), | 234 !_ct_bs->is_card_dirty(card_index)) { |
236 // which reduces the number of duplicate scans (the rsets of the regions in the cset | 235 scanCard(card_index, card_region); |
237 // can intersect). | |
238 if (!_ct_bs->is_card_claimed(card_index)) { | |
239 _ct_bs->set_card_claimed(card_index); | |
240 scanCard(card_index, card_region); | |
241 } | |
242 } | 236 } |
243 } | 237 } |
244 if (!_try_claimed) { | 238 if (!_try_claimed) { |
245 hrrs->set_iter_complete(); | 239 hrrs->set_iter_complete(); |
246 } | 240 } |
247 return false; | 241 return false; |
248 } | 242 } |
249 // Set all cards back to clean. | |
250 void cleanup() {_g1h->cleanUpCardTable();} | |
251 size_t cards_done() { return _cards_done;} | 243 size_t cards_done() { return _cards_done;} |
252 size_t cards_looked_up() { return _cards;} | 244 size_t cards_looked_up() { return _cards;} |
253 }; | 245 }; |
254 | 246 |
255 // We want the parallel threads to start their scanning at | 247 // We want the parallel threads to start their scanning at |
564 // As a result, when this closure is applied to "refs into cset" | 556 // As a result, when this closure is applied to "refs into cset" |
565 // DCQS, we shouldn't see any cards in young regions. | 557 // DCQS, we shouldn't see any cards in young regions. |
566 update_rs_cl.set_region(r); | 558 update_rs_cl.set_region(r); |
567 HeapWord* stop_point = | 559 HeapWord* stop_point = |
568 r->oops_on_card_seq_iterate_careful(scanRegion, | 560 r->oops_on_card_seq_iterate_careful(scanRegion, |
569 &filter_then_update_rs_cset_oop_cl, | 561 &filter_then_update_rs_cset_oop_cl, |
570 false /* filter_young */); | 562 false /* filter_young */, |
563 NULL /* card_ptr */); | |
571 | 564 |
572 // Since this is performed in the event of an evacuation failure, we | 565 // Since this is performed in the event of an evacuation failure, we |
573 // we shouldn't see a non-null stop point | 566 // we shouldn't see a non-null stop point |
574 assert(stop_point == NULL, "saw an unallocated region"); | 567 assert(stop_point == NULL, "saw an unallocated region"); |
575 return true; | 568 return true; |
733 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, | 726 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, |
734 (check_for_refs_into_cset ? | 727 (check_for_refs_into_cset ? |
735 (OopClosure*)&mux : | 728 (OopClosure*)&mux : |
736 (OopClosure*)&update_rs_oop_cl)); | 729 (OopClosure*)&update_rs_oop_cl)); |
737 | 730 |
738 // Undirty the card. | |
739 *card_ptr = CardTableModRefBS::clean_card_val(); | |
740 // We must complete this write before we do any of the reads below. | |
741 OrderAccess::storeload(); | |
742 // And process it, being careful of unallocated portions of TLAB's. | |
743 | |
744 // The region for the current card may be a young region. The | 731 // The region for the current card may be a young region. The |
745 // current card may have been a card that was evicted from the | 732 // current card may have been a card that was evicted from the |
746 // card cache. When the card was inserted into the cache, we had | 733 // card cache. When the card was inserted into the cache, we had |
747 // determined that its region was non-young. While in the cache, | 734 // determined that its region was non-young. While in the cache, |
748 // the region may have been freed during a cleanup pause, reallocated | 735 // the region may have been freed during a cleanup pause, reallocated |
749 // and tagged as young. | 736 // and tagged as young. |
750 // | 737 // |
751 // We wish to filter out cards for such a region but the current | 738 // We wish to filter out cards for such a region but the current |
752 // thread, if we're running conucrrently, may "see" the young type | 739 // thread, if we're running concurrently, may "see" the young type |
753 // change at any time (so an earlier "is_young" check may pass or | 740 // change at any time (so an earlier "is_young" check may pass or |
754 // fail arbitrarily). We tell the iteration code to perform this | 741 // fail arbitrarily). We tell the iteration code to perform this |
755 // filtering when it has been determined that there has been an actual | 742 // filtering when it has been determined that there has been an actual |
756 // allocation in this region and making it safe to check the young type. | 743 // allocation in this region and making it safe to check the young type. |
757 bool filter_young = true; | 744 bool filter_young = true; |
758 | 745 |
759 HeapWord* stop_point = | 746 HeapWord* stop_point = |
760 r->oops_on_card_seq_iterate_careful(dirtyRegion, | 747 r->oops_on_card_seq_iterate_careful(dirtyRegion, |
761 &filter_then_update_rs_oop_cl, | 748 &filter_then_update_rs_oop_cl, |
762 filter_young); | 749 filter_young, |
750 card_ptr); | |
763 | 751 |
764 // If stop_point is non-null, then we encountered an unallocated region | 752 // If stop_point is non-null, then we encountered an unallocated region |
765 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the | 753 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the |
766 // card and re-enqueue: if we put off the card until a GC pause, then the | 754 // card and re-enqueue: if we put off the card until a GC pause, then the |
767 // unallocated portion will be filled in. Alternatively, we might try | 755 // unallocated portion will be filled in. Alternatively, we might try |