comparison src/share/vm/gc_implementation/g1/g1RemSet.cpp @ 628:7bb995fbd3c0

Merge
author trims
date Thu, 12 Mar 2009 18:16:36 -0700
parents 0fbdb4381b99 87fa6e083d82
children ba50942c8138
comparison
equal deleted inserted replaced
580:ce2272390558 628:7bb995fbd3c0
103 int worker_i) { 103 int worker_i) {
104 IntoCSRegionClosure rc(_g1, oc); 104 IntoCSRegionClosure rc(_g1, oc);
105 _g1->heap_region_iterate(&rc); 105 _g1->heap_region_iterate(&rc);
106 } 106 }
107 107
108 class UpdateRSOopClosure: public OopClosure {
109 HeapRegion* _from;
110 HRInto_G1RemSet* _rs;
111 int _worker_i;
112 public:
113 UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) :
114 _from(NULL), _rs(rs), _worker_i(worker_i) {
115 guarantee(_rs != NULL, "Requires an HRIntoG1RemSet");
116 }
117
118 void set_from(HeapRegion* from) {
119 assert(from != NULL, "from region must be non-NULL");
120 _from = from;
121 }
122
123 virtual void do_oop(narrowOop* p) {
124 guarantee(false, "NYI");
125 }
126 virtual void do_oop(oop* p) {
127 assert(_from != NULL, "from region must be non-NULL");
128 _rs->par_write_ref(_from, p, _worker_i);
129 }
130 // Override: this closure is idempotent.
131 // bool idempotent() { return true; }
132 bool apply_to_weak_ref_discovered_field() { return true; }
133 };
134
135 class UpdateRSOutOfRegionClosure: public HeapRegionClosure { 108 class UpdateRSOutOfRegionClosure: public HeapRegionClosure {
136 G1CollectedHeap* _g1h; 109 G1CollectedHeap* _g1h;
137 ModRefBarrierSet* _mr_bs; 110 ModRefBarrierSet* _mr_bs;
138 UpdateRSOopClosure _cl; 111 UpdateRSOopClosure _cl;
139 int _worker_i; 112 int _worker_i;
175 _cg1r(g1->concurrent_g1_refine()), 148 _cg1r(g1->concurrent_g1_refine()),
176 _par_traversal_in_progress(false), _new_refs(NULL), 149 _par_traversal_in_progress(false), _new_refs(NULL),
177 _cards_scanned(NULL), _total_cards_scanned(0) 150 _cards_scanned(NULL), _total_cards_scanned(0)
178 { 151 {
179 _seq_task = new SubTasksDone(NumSeqTasks); 152 _seq_task = new SubTasksDone(NumSeqTasks);
180 _new_refs = NEW_C_HEAP_ARRAY(GrowableArray<oop*>*, ParallelGCThreads); 153 guarantee(n_workers() > 0, "There should be some workers");
154 _new_refs = NEW_C_HEAP_ARRAY(GrowableArray<oop*>*, n_workers());
155 for (uint i = 0; i < n_workers(); i++) {
156 _new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<oop*>(8192,true);
157 }
181 } 158 }
182 159
183 HRInto_G1RemSet::~HRInto_G1RemSet() { 160 HRInto_G1RemSet::~HRInto_G1RemSet() {
184 delete _seq_task; 161 delete _seq_task;
162 for (uint i = 0; i < n_workers(); i++) {
163 delete _new_refs[i];
164 }
165 FREE_C_HEAP_ARRAY(GrowableArray<oop*>*, _new_refs);
185 } 166 }
186 167
187 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) { 168 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
188 if (_g1->is_in_g1_reserved(mr.start())) { 169 if (_g1->is_in_g1_reserved(mr.start())) {
189 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size)); 170 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
279 if (!card_region->in_collection_set()) { 260 if (!card_region->in_collection_set()) {
280 // If the card is dirty, then we will scan it during updateRS. 261 // If the card is dirty, then we will scan it during updateRS.
281 if (!_ct_bs->is_card_claimed(card_index) && 262 if (!_ct_bs->is_card_claimed(card_index) &&
282 !_ct_bs->is_card_dirty(card_index)) { 263 !_ct_bs->is_card_dirty(card_index)) {
283 assert(_ct_bs->is_card_clean(card_index) || 264 assert(_ct_bs->is_card_clean(card_index) ||
284 _ct_bs->is_card_claimed(card_index), 265 _ct_bs->is_card_claimed(card_index) ||
285 "Card is either dirty, clean, or claimed"); 266 _ct_bs->is_card_deferred(card_index),
267 "Card is either clean, claimed or deferred");
286 if (_ct_bs->claim_card(card_index)) 268 if (_ct_bs->claim_card(card_index))
287 scanCard(card_index, card_region); 269 scanCard(card_index, card_region);
288 } 270 }
289 } 271 }
290 } 272 }
336 assert( _cards_scanned != NULL, "invariant" ); 318 assert( _cards_scanned != NULL, "invariant" );
337 _cards_scanned[worker_i] = scanRScl.cards_done(); 319 _cards_scanned[worker_i] = scanRScl.cards_done();
338 320
339 _g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0); 321 _g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0);
340 _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0); 322 _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
341 if (ParallelGCThreads > 0) { 323
342 // In this case, we called scanNewRefsRS and recorded the corresponding 324 double scan_new_refs_time_ms = _g1p->get_scan_new_refs_time(worker_i);
343 // time. 325 if (scan_new_refs_time_ms > 0.0) {
344 double scan_new_refs_time_ms = _g1p->get_scan_new_refs_time(worker_i); 326 closure_app_time_ms += scan_new_refs_time_ms;
345 if (scan_new_refs_time_ms > 0.0) { 327 }
346 closure_app_time_ms += scan_new_refs_time_ms; 328
347 }
348 }
349 _g1p->record_obj_copy_time(worker_i, closure_app_time_ms); 329 _g1p->record_obj_copy_time(worker_i, closure_app_time_ms);
350 } 330 }
351 331
352 void HRInto_G1RemSet::updateRS(int worker_i) { 332 void HRInto_G1RemSet::updateRS(int worker_i) {
353 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); 333 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
467 HRInto_G1RemSet::scanNewRefsRS(OopsInHeapRegionClosure* oc, 447 HRInto_G1RemSet::scanNewRefsRS(OopsInHeapRegionClosure* oc,
468 int worker_i) { 448 int worker_i) {
469 double scan_new_refs_start_sec = os::elapsedTime(); 449 double scan_new_refs_start_sec = os::elapsedTime();
470 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 450 G1CollectedHeap* g1h = G1CollectedHeap::heap();
471 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set()); 451 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
472 while (_new_refs[worker_i]->is_nonempty()) { 452 for (int i = 0; i < _new_refs[worker_i]->length(); i++) {
473 oop* p = _new_refs[worker_i]->pop(); 453 oop* p = _new_refs[worker_i]->at(i);
474 oop obj = *p; 454 oop obj = *p;
475 // *p was in the collection set when p was pushed on "_new_refs", but 455 // *p was in the collection set when p was pushed on "_new_refs", but
476 // another thread may have processed this location from an RS, so it 456 // another thread may have processed this location from an RS, so it
477 // might not point into the CS any longer. If so, it's obviously been 457 // might not point into the CS any longer. If so, it's obviously been
478 // processed, and we don't need to do anything further. 458 // processed, and we don't need to do anything further.
479 if (g1h->obj_in_cs(obj)) { 459 if (g1h->obj_in_cs(obj)) {
480 HeapRegion* r = g1h->heap_region_containing(p); 460 HeapRegion* r = g1h->heap_region_containing(p);
481 461
482 DEBUG_ONLY(HeapRegion* to = g1h->heap_region_containing(obj)); 462 DEBUG_ONLY(HeapRegion* to = g1h->heap_region_containing(obj));
483 assert(ParallelGCThreads > 1
484 || to->rem_set()->contains_reference(p),
485 "Invariant: pushed after being added."
486 "(Not reliable in parallel code.)");
487 oc->set_region(r); 463 oc->set_region(r);
488 // If "p" has already been processed concurrently, this is 464 // If "p" has already been processed concurrently, this is
489 // idempotent. 465 // idempotent.
490 oc->do_oop(p); 466 oc->do_oop(p);
491 } 467 }
536 scanNewRefsRS(oc, worker_i); 512 scanNewRefsRS(oc, worker_i);
537 scanRS(oc, worker_i); 513 scanRS(oc, worker_i);
538 } 514 }
539 } else { 515 } else {
540 assert(worker_i == 0, "invariant"); 516 assert(worker_i == 0, "invariant");
541
542 updateRS(0); 517 updateRS(0);
518 scanNewRefsRS(oc, 0);
543 scanRS(oc, 0); 519 scanRS(oc, 0);
544 } 520 }
545 } 521 }
546 522
547 void HRInto_G1RemSet:: 523 void HRInto_G1RemSet::
557 dcqs.concatenate_logs(); 533 dcqs.concatenate_logs();
558 534
559 assert(!_par_traversal_in_progress, "Invariant between iterations."); 535 assert(!_par_traversal_in_progress, "Invariant between iterations.");
560 if (ParallelGCThreads > 0) { 536 if (ParallelGCThreads > 0) {
561 set_par_traversal(true); 537 set_par_traversal(true);
562 int n_workers = _g1->workers()->total_workers(); 538 _seq_task->set_par_threads((int)n_workers());
563 _seq_task->set_par_threads(n_workers);
564 for (uint i = 0; i < ParallelGCThreads; i++)
565 _new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<oop*>(8192,true);
566
567 if (cg1r->do_traversal()) { 539 if (cg1r->do_traversal()) {
568 updateRS(0); 540 updateRS(0);
569 // Have to do this again after updaters 541 // Have to do this again after updaters
570 cleanupHRRS(); 542 cleanupHRRS();
571 } 543 }
584 HeapRegionRemSet* hrrs = r->rem_set(); 556 HeapRegionRemSet* hrrs = r->rem_set();
585 hrrs->init_for_par_iteration(); 557 hrrs->init_for_par_iteration();
586 return false; 558 return false;
587 } 559 }
588 }; 560 };
561
562 class UpdateRSetOopsIntoCSImmediate : public OopClosure {
563 G1CollectedHeap* _g1;
564 public:
565 UpdateRSetOopsIntoCSImmediate(G1CollectedHeap* g1) : _g1(g1) { }
566 virtual void do_oop(narrowOop* p) {
567 guarantee(false, "NYI");
568 }
569 virtual void do_oop(oop* p) {
570 HeapRegion* to = _g1->heap_region_containing(*p);
571 if (to->in_collection_set()) {
572 if (to->rem_set()->add_reference(p, 0)) {
573 _g1->schedule_popular_region_evac(to);
574 }
575 }
576 }
577 };
578
579 class UpdateRSetOopsIntoCSDeferred : public OopClosure {
580 G1CollectedHeap* _g1;
581 CardTableModRefBS* _ct_bs;
582 DirtyCardQueue* _dcq;
583 public:
584 UpdateRSetOopsIntoCSDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
585 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) { }
586 virtual void do_oop(narrowOop* p) {
587 guarantee(false, "NYI");
588 }
589 virtual void do_oop(oop* p) {
590 oop obj = *p;
591 if (_g1->obj_in_cs(obj)) {
592 size_t card_index = _ct_bs->index_for(p);
593 if (_ct_bs->mark_card_deferred(card_index)) {
594 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
595 }
596 }
597 }
598 };
599
600 void HRInto_G1RemSet::new_refs_iterate(OopClosure* cl) {
601 for (size_t i = 0; i < n_workers(); i++) {
602 for (int j = 0; j < _new_refs[i]->length(); j++) {
603 oop* p = _new_refs[i]->at(j);
604 cl->do_oop(p);
605 }
606 }
607 }
589 608
590 void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() { 609 void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() {
591 guarantee( _cards_scanned != NULL, "invariant" ); 610 guarantee( _cards_scanned != NULL, "invariant" );
592 _total_cards_scanned = 0; 611 _total_cards_scanned = 0;
593 for (uint i = 0; i < n_workers(); ++i) 612 for (uint i = 0; i < n_workers(); ++i)
607 if (ParallelGCThreads > 0) { 626 if (ParallelGCThreads > 0) {
608 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); 627 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
609 if (cg1r->do_traversal()) { 628 if (cg1r->do_traversal()) {
610 cg1r->cg1rThread()->set_do_traversal(false); 629 cg1r->cg1rThread()->set_do_traversal(false);
611 } 630 }
612 for (uint i = 0; i < ParallelGCThreads; i++) {
613 delete _new_refs[i];
614 }
615 set_par_traversal(false); 631 set_par_traversal(false);
616 } 632 }
633
634 if (_g1->evacuation_failed()) {
635 // Restore remembered sets for the regions pointing into
636 // the collection set.
637 if (G1DeferredRSUpdate) {
638 DirtyCardQueue dcq(&_g1->dirty_card_queue_set());
639 UpdateRSetOopsIntoCSDeferred deferred_update(_g1, &dcq);
640 new_refs_iterate(&deferred_update);
641 } else {
642 UpdateRSetOopsIntoCSImmediate immediate_update(_g1);
643 new_refs_iterate(&immediate_update);
644 }
645 }
646 for (uint i = 0; i < n_workers(); i++) {
647 _new_refs[i]->clear();
648 }
649
617 assert(!_par_traversal_in_progress, "Invariant between iterations."); 650 assert(!_par_traversal_in_progress, "Invariant between iterations.");
618 } 651 }
619 652
620 class UpdateRSObjectClosure: public ObjectClosure { 653 class UpdateRSObjectClosure: public ObjectClosure {
621 UpdateRSOopClosure* _update_rs_oop_cl; 654 UpdateRSOopClosure* _update_rs_oop_cl;
681 {} 714 {}
682 715
683 bool doHeapRegion(HeapRegion* r) { 716 bool doHeapRegion(HeapRegion* r) {
684 if (!r->in_collection_set() && 717 if (!r->in_collection_set() &&
685 !r->continuesHumongous() && 718 !r->continuesHumongous() &&
686 !r->is_young()) { 719 !r->is_young() &&
720 !r->is_survivor()) {
687 _update_rs_oop_cl.set_from(r); 721 _update_rs_oop_cl.set_from(r);
688 UpdateRSObjectClosure update_rs_obj_cl(&_update_rs_oop_cl); 722 UpdateRSObjectClosure update_rs_obj_cl(&_update_rs_oop_cl);
689 723
690 // For each run of dirty card in the region: 724 // For each run of dirty card in the region:
691 // 1) Clear the cards. 725 // 1) Clear the cards.
818 // as a result, it is possible for other threads to actually 852 // as a result, it is possible for other threads to actually
819 // allocate objects in the region (after the acquire the lock) 853 // allocate objects in the region (after the acquire the lock)
820 // before all the cards on the region are dirtied. This is unlikely, 854 // before all the cards on the region are dirtied. This is unlikely,
821 // and it doesn't happen often, but it can happen. So, the extra 855 // and it doesn't happen often, but it can happen. So, the extra
822 // check below filters out those cards. 856 // check below filters out those cards.
823 if (r->is_young()) { 857 if (r->is_young() || r->is_survivor()) {
824 return; 858 return;
825 } 859 }
826 // While we are processing RSet buffers during the collection, we 860 // While we are processing RSet buffers during the collection, we
827 // actually don't want to scan any cards on the collection set, 861 // actually don't want to scan any cards on the collection set,
828 // since we don't want to update remebered sets with entries that 862 // since we don't want to update remebered sets with entries that