comparison src/share/vm/gc_implementation/g1/g1RemSet.cpp @ 616:4f360ec815ba

6720309: G1: don't synchronously update RSet during evacuation pauses 6720334: G1: don't update RSets of collection set regions during an evacuation pause Summary: Introduced a deferred update mechanism for delaying the rset updates during the collection pause Reviewed-by: apetrusenko, tonyp
author iveresov
date Fri, 06 Mar 2009 13:50:14 -0800
parents 58054a18d735
children 87fa6e083d82
comparison
equal deleted inserted replaced
615:c6c601a0f2d6 616:4f360ec815ba
175 _cg1r(g1->concurrent_g1_refine()), 175 _cg1r(g1->concurrent_g1_refine()),
176 _par_traversal_in_progress(false), _new_refs(NULL), 176 _par_traversal_in_progress(false), _new_refs(NULL),
177 _cards_scanned(NULL), _total_cards_scanned(0) 177 _cards_scanned(NULL), _total_cards_scanned(0)
178 { 178 {
179 _seq_task = new SubTasksDone(NumSeqTasks); 179 _seq_task = new SubTasksDone(NumSeqTasks);
180 _new_refs = NEW_C_HEAP_ARRAY(GrowableArray<oop*>*, ParallelGCThreads); 180 guarantee(n_workers() > 0, "There should be some workers");
181 _new_refs = NEW_C_HEAP_ARRAY(GrowableArray<oop*>*, n_workers());
182 for (uint i = 0; i < n_workers(); i++) {
183 _new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<oop*>(8192,true);
184 }
181 } 185 }
182 186
183 HRInto_G1RemSet::~HRInto_G1RemSet() { 187 HRInto_G1RemSet::~HRInto_G1RemSet() {
184 delete _seq_task; 188 delete _seq_task;
189 for (uint i = 0; i < n_workers(); i++) {
190 delete _new_refs[i];
191 }
192 FREE_C_HEAP_ARRAY(GrowableArray<oop*>*, _new_refs);
185 } 193 }
186 194
187 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) { 195 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
188 if (_g1->is_in_g1_reserved(mr.start())) { 196 if (_g1->is_in_g1_reserved(mr.start())) {
189 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size)); 197 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
279 if (!card_region->in_collection_set()) { 287 if (!card_region->in_collection_set()) {
280 // If the card is dirty, then we will scan it during updateRS. 288 // If the card is dirty, then we will scan it during updateRS.
281 if (!_ct_bs->is_card_claimed(card_index) && 289 if (!_ct_bs->is_card_claimed(card_index) &&
282 !_ct_bs->is_card_dirty(card_index)) { 290 !_ct_bs->is_card_dirty(card_index)) {
283 assert(_ct_bs->is_card_clean(card_index) || 291 assert(_ct_bs->is_card_clean(card_index) ||
284 _ct_bs->is_card_claimed(card_index), 292 _ct_bs->is_card_claimed(card_index) ||
285 "Card is either dirty, clean, or claimed"); 293 _ct_bs->is_card_deferred(card_index),
294 "Card is either clean, claimed or deferred");
286 if (_ct_bs->claim_card(card_index)) 295 if (_ct_bs->claim_card(card_index))
287 scanCard(card_index, card_region); 296 scanCard(card_index, card_region);
288 } 297 }
289 } 298 }
290 } 299 }
336 assert( _cards_scanned != NULL, "invariant" ); 345 assert( _cards_scanned != NULL, "invariant" );
337 _cards_scanned[worker_i] = scanRScl.cards_done(); 346 _cards_scanned[worker_i] = scanRScl.cards_done();
338 347
339 _g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0); 348 _g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0);
340 _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0); 349 _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
341 if (ParallelGCThreads > 0) { 350
342 // In this case, we called scanNewRefsRS and recorded the corresponding 351 double scan_new_refs_time_ms = _g1p->get_scan_new_refs_time(worker_i);
343 // time. 352 if (scan_new_refs_time_ms > 0.0) {
344 double scan_new_refs_time_ms = _g1p->get_scan_new_refs_time(worker_i); 353 closure_app_time_ms += scan_new_refs_time_ms;
345 if (scan_new_refs_time_ms > 0.0) { 354 }
346 closure_app_time_ms += scan_new_refs_time_ms; 355
347 }
348 }
349 _g1p->record_obj_copy_time(worker_i, closure_app_time_ms); 356 _g1p->record_obj_copy_time(worker_i, closure_app_time_ms);
350 } 357 }
351 358
352 void HRInto_G1RemSet::updateRS(int worker_i) { 359 void HRInto_G1RemSet::updateRS(int worker_i) {
353 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); 360 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
467 HRInto_G1RemSet::scanNewRefsRS(OopsInHeapRegionClosure* oc, 474 HRInto_G1RemSet::scanNewRefsRS(OopsInHeapRegionClosure* oc,
468 int worker_i) { 475 int worker_i) {
469 double scan_new_refs_start_sec = os::elapsedTime(); 476 double scan_new_refs_start_sec = os::elapsedTime();
470 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 477 G1CollectedHeap* g1h = G1CollectedHeap::heap();
471 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set()); 478 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
472 while (_new_refs[worker_i]->is_nonempty()) { 479 for (int i = 0; i < _new_refs[worker_i]->length(); i++) {
473 oop* p = _new_refs[worker_i]->pop(); 480 oop* p = _new_refs[worker_i]->at(i);
474 oop obj = *p; 481 oop obj = *p;
475 // *p was in the collection set when p was pushed on "_new_refs", but 482 // *p was in the collection set when p was pushed on "_new_refs", but
476 // another thread may have processed this location from an RS, so it 483 // another thread may have processed this location from an RS, so it
477 // might not point into the CS any longer. If so, it's obviously been 484 // might not point into the CS any longer. If so, it's obviously been
478 // processed, and we don't need to do anything further. 485 // processed, and we don't need to do anything further.
479 if (g1h->obj_in_cs(obj)) { 486 if (g1h->obj_in_cs(obj)) {
480 HeapRegion* r = g1h->heap_region_containing(p); 487 HeapRegion* r = g1h->heap_region_containing(p);
481 488
482 DEBUG_ONLY(HeapRegion* to = g1h->heap_region_containing(obj)); 489 DEBUG_ONLY(HeapRegion* to = g1h->heap_region_containing(obj));
483 assert(ParallelGCThreads > 1
484 || to->rem_set()->contains_reference(p),
485 "Invariant: pushed after being added."
486 "(Not reliable in parallel code.)");
487 oc->set_region(r); 490 oc->set_region(r);
488 // If "p" has already been processed concurrently, this is 491 // If "p" has already been processed concurrently, this is
489 // idempotent. 492 // idempotent.
490 oc->do_oop(p); 493 oc->do_oop(p);
491 } 494 }
536 scanNewRefsRS(oc, worker_i); 539 scanNewRefsRS(oc, worker_i);
537 scanRS(oc, worker_i); 540 scanRS(oc, worker_i);
538 } 541 }
539 } else { 542 } else {
540 assert(worker_i == 0, "invariant"); 543 assert(worker_i == 0, "invariant");
541
542 updateRS(0); 544 updateRS(0);
545 scanNewRefsRS(oc, 0);
543 scanRS(oc, 0); 546 scanRS(oc, 0);
544 } 547 }
545 } 548 }
546 549
547 void HRInto_G1RemSet:: 550 void HRInto_G1RemSet::
557 dcqs.concatenate_logs(); 560 dcqs.concatenate_logs();
558 561
559 assert(!_par_traversal_in_progress, "Invariant between iterations."); 562 assert(!_par_traversal_in_progress, "Invariant between iterations.");
560 if (ParallelGCThreads > 0) { 563 if (ParallelGCThreads > 0) {
561 set_par_traversal(true); 564 set_par_traversal(true);
562 int n_workers = _g1->workers()->total_workers(); 565 _seq_task->set_par_threads((int)n_workers());
563 _seq_task->set_par_threads(n_workers);
564 for (uint i = 0; i < ParallelGCThreads; i++)
565 _new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<oop*>(8192,true);
566
567 if (cg1r->do_traversal()) { 566 if (cg1r->do_traversal()) {
568 updateRS(0); 567 updateRS(0);
569 // Have to do this again after updaters 568 // Have to do this again after updaters
570 cleanupHRRS(); 569 cleanupHRRS();
571 } 570 }
584 HeapRegionRemSet* hrrs = r->rem_set(); 583 HeapRegionRemSet* hrrs = r->rem_set();
585 hrrs->init_for_par_iteration(); 584 hrrs->init_for_par_iteration();
586 return false; 585 return false;
587 } 586 }
588 }; 587 };
588
589 class UpdateRSetOopsIntoCSImmediate : public OopClosure {
590 G1CollectedHeap* _g1;
591 public:
592 UpdateRSetOopsIntoCSImmediate(G1CollectedHeap* g1) : _g1(g1) { }
593 virtual void do_oop(narrowOop* p) {
594 guarantee(false, "NYI");
595 }
596 virtual void do_oop(oop* p) {
597 HeapRegion* to = _g1->heap_region_containing(*p);
598 if (to->in_collection_set()) {
599 if (to->rem_set()->add_reference(p, 0)) {
600 _g1->schedule_popular_region_evac(to);
601 }
602 }
603 }
604 };
605
606 class UpdateRSetOopsIntoCSDeferred : public OopClosure {
607 G1CollectedHeap* _g1;
608 CardTableModRefBS* _ct_bs;
609 DirtyCardQueue* _dcq;
610 public:
611 UpdateRSetOopsIntoCSDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
612 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) { }
613 virtual void do_oop(narrowOop* p) {
614 guarantee(false, "NYI");
615 }
616 virtual void do_oop(oop* p) {
617 oop obj = *p;
618 if (_g1->obj_in_cs(obj)) {
619 size_t card_index = _ct_bs->index_for(p);
620 if (_ct_bs->mark_card_deferred(card_index)) {
621 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
622 }
623 }
624 }
625 };
626
627 void HRInto_G1RemSet::new_refs_iterate(OopClosure* cl) {
628 for (size_t i = 0; i < n_workers(); i++) {
629 for (int j = 0; j < _new_refs[i]->length(); j++) {
630 oop* p = _new_refs[i]->at(j);
631 cl->do_oop(p);
632 }
633 }
634 }
589 635
590 void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() { 636 void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() {
591 guarantee( _cards_scanned != NULL, "invariant" ); 637 guarantee( _cards_scanned != NULL, "invariant" );
592 _total_cards_scanned = 0; 638 _total_cards_scanned = 0;
593 for (uint i = 0; i < n_workers(); ++i) 639 for (uint i = 0; i < n_workers(); ++i)
607 if (ParallelGCThreads > 0) { 653 if (ParallelGCThreads > 0) {
608 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); 654 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
609 if (cg1r->do_traversal()) { 655 if (cg1r->do_traversal()) {
610 cg1r->cg1rThread()->set_do_traversal(false); 656 cg1r->cg1rThread()->set_do_traversal(false);
611 } 657 }
612 for (uint i = 0; i < ParallelGCThreads; i++) {
613 delete _new_refs[i];
614 }
615 set_par_traversal(false); 658 set_par_traversal(false);
616 } 659 }
660
661 if (_g1->evacuation_failed()) {
662 // Restore remembered sets for the regions pointing into
663 // the collection set.
664 if (G1DeferredRSUpdate) {
665 DirtyCardQueue dcq(&_g1->dirty_card_queue_set());
666 UpdateRSetOopsIntoCSDeferred deferred_update(_g1, &dcq);
667 new_refs_iterate(&deferred_update);
668 } else {
669 UpdateRSetOopsIntoCSImmediate immediate_update(_g1);
670 new_refs_iterate(&immediate_update);
671 }
672 }
673 for (uint i = 0; i < n_workers(); i++) {
674 _new_refs[i]->clear();
675 }
676
617 assert(!_par_traversal_in_progress, "Invariant between iterations."); 677 assert(!_par_traversal_in_progress, "Invariant between iterations.");
618 } 678 }
619 679
620 class UpdateRSObjectClosure: public ObjectClosure { 680 class UpdateRSObjectClosure: public ObjectClosure {
621 UpdateRSOopClosure* _update_rs_oop_cl; 681 UpdateRSOopClosure* _update_rs_oop_cl;
681 {} 741 {}
682 742
683 bool doHeapRegion(HeapRegion* r) { 743 bool doHeapRegion(HeapRegion* r) {
684 if (!r->in_collection_set() && 744 if (!r->in_collection_set() &&
685 !r->continuesHumongous() && 745 !r->continuesHumongous() &&
686 !r->is_young()) { 746 !r->is_young() &&
747 !r->is_survivor()) {
687 _update_rs_oop_cl.set_from(r); 748 _update_rs_oop_cl.set_from(r);
688 UpdateRSObjectClosure update_rs_obj_cl(&_update_rs_oop_cl); 749 UpdateRSObjectClosure update_rs_obj_cl(&_update_rs_oop_cl);
689 750
690 // For each run of dirty card in the region: 751 // For each run of dirty card in the region:
691 // 1) Clear the cards. 752 // 1) Clear the cards.
818 // as a result, it is possible for other threads to actually 879 // as a result, it is possible for other threads to actually
819 // allocate objects in the region (after the acquire the lock) 880 // allocate objects in the region (after the acquire the lock)
820 // before all the cards on the region are dirtied. This is unlikely, 881 // before all the cards on the region are dirtied. This is unlikely,
821 // and it doesn't happen often, but it can happen. So, the extra 882 // and it doesn't happen often, but it can happen. So, the extra
822 // check below filters out those cards. 883 // check below filters out those cards.
823 if (r->is_young()) { 884 if (r->is_young() || r->is_survivor()) {
824 return; 885 return;
825 } 886 }
826 // While we are processing RSet buffers during the collection, we 887 // While we are processing RSet buffers during the collection, we
827 // actually don't want to scan any cards on the collection set, 888 // actually don't want to scan any cards on the collection set,
828 // since we don't want to update remebered sets with entries that 889 // since we don't want to update remebered sets with entries that