Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1RemSet.cpp @ 1708:a03ae377b2e8
6930581: G1: assert(ParallelGCThreads > 1 || n_yielded() == _hrrs->occupied(),"Should have yielded all the ..
Summary: During RSet updating, when ParallelGCThreads is zero, references that point into the collection set are added directly the referenced region's RSet. This can cause the sparse table in the RSet to expand. RSet scanning and the "occupied" routine will then operate on different instances of the sparse table causing the assert to trip. This may also cause some cards added post expansion to be missed during RSet scanning. When ParallelGCThreads is non-zero such references are recorded on the "references to be scanned" queue and the card containing the reference is recorded in a dirty card queue for use in the event of an evacuation failure. Employ the parallel code in the serial case to avoid expanding the RSets of regions in the collection set.
Reviewed-by: iveresov, ysr, tonyp
author | johnc |
---|---|
date | Fri, 06 Aug 2010 10:17:21 -0700 |
parents | 2d160770d2e5 |
children | 8b10f48633dc |
comparison
equal
deleted
inserted
replaced
1707:0ce1569c90e5 | 1708:a03ae377b2e8 |
---|---|
120 }; | 120 }; |
121 | 121 |
122 HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs) | 122 HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs) |
123 : G1RemSet(g1), _ct_bs(ct_bs), _g1p(_g1->g1_policy()), | 123 : G1RemSet(g1), _ct_bs(ct_bs), _g1p(_g1->g1_policy()), |
124 _cg1r(g1->concurrent_g1_refine()), | 124 _cg1r(g1->concurrent_g1_refine()), |
125 _par_traversal_in_progress(false), | 125 _traversal_in_progress(false), |
126 _cset_rs_update_cl(NULL), | 126 _cset_rs_update_cl(NULL), |
127 _cards_scanned(NULL), _total_cards_scanned(0) | 127 _cards_scanned(NULL), _total_cards_scanned(0) |
128 { | 128 { |
129 _seq_task = new SubTasksDone(NumSeqTasks); | 129 _seq_task = new SubTasksDone(NumSeqTasks); |
130 guarantee(n_workers() > 0, "There should be some workers"); | 130 guarantee(n_workers() > 0, "There should be some workers"); |
482 // * scanned for references that point into the collection set | 482 // * scanned for references that point into the collection set |
483 // and the RSet of the corresponding region in the collection set | 483 // and the RSet of the corresponding region in the collection set |
484 // is updated immediately. | 484 // is updated immediately. |
485 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set()); | 485 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set()); |
486 | 486 |
487 if (ParallelGCThreads > 0) { | 487 assert((ParallelGCThreads > 0) || worker_i == 0, "invariant"); |
488 // The two flags below were introduced temporarily to serialize | 488 |
489 // the updating and scanning of remembered sets. There are some | 489 // The two flags below were introduced temporarily to serialize |
490 // race conditions when these two operations are done in parallel | 490 // the updating and scanning of remembered sets. There are some |
491 // and they are causing failures. When we resolve said race | 491 // race conditions when these two operations are done in parallel |
492 // conditions, we'll revert back to parallel remembered set | 492 // and they are causing failures. When we resolve said race |
493 // updating and scanning. See CRs 6677707 and 6677708. | 493 // conditions, we'll revert back to parallel remembered set |
494 if (G1UseParallelRSetUpdating || (worker_i == 0)) { | 494 // updating and scanning. See CRs 6677707 and 6677708. |
495 updateRS(&into_cset_dcq, worker_i); | 495 if (G1UseParallelRSetUpdating || (worker_i == 0)) { |
496 } else { | 496 updateRS(&into_cset_dcq, worker_i); |
497 _g1p->record_update_rs_processed_buffers(worker_i, 0.0); | |
498 _g1p->record_update_rs_time(worker_i, 0.0); | |
499 } | |
500 if (G1UseParallelRSetScanning || (worker_i == 0)) { | |
501 scanRS(oc, worker_i); | |
502 } else { | |
503 _g1p->record_scan_rs_time(worker_i, 0.0); | |
504 } | |
505 } else { | 497 } else { |
506 assert(worker_i == 0, "invariant"); | 498 _g1p->record_update_rs_processed_buffers(worker_i, 0.0); |
507 updateRS(&into_cset_dcq, 0); | 499 _g1p->record_update_rs_time(worker_i, 0.0); |
508 scanRS(oc, 0); | 500 } |
501 if (G1UseParallelRSetScanning || (worker_i == 0)) { | |
502 scanRS(oc, worker_i); | |
503 } else { | |
504 _g1p->record_scan_rs_time(worker_i, 0.0); | |
509 } | 505 } |
510 | 506 |
511 // We now clear the cached values of _cset_rs_update_cl for this worker | 507 // We now clear the cached values of _cset_rs_update_cl for this worker |
512 _cset_rs_update_cl[worker_i] = NULL; | 508 _cset_rs_update_cl[worker_i] = NULL; |
513 } | 509 } |
522 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); | 518 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); |
523 _g1->set_refine_cte_cl_concurrency(false); | 519 _g1->set_refine_cte_cl_concurrency(false); |
524 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | 520 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
525 dcqs.concatenate_logs(); | 521 dcqs.concatenate_logs(); |
526 | 522 |
527 assert(!_par_traversal_in_progress, "Invariant between iterations."); | 523 assert(!_traversal_in_progress, "Invariant between iterations."); |
524 set_traversal(true); | |
528 if (ParallelGCThreads > 0) { | 525 if (ParallelGCThreads > 0) { |
529 set_par_traversal(true); | |
530 _seq_task->set_par_threads((int)n_workers()); | 526 _seq_task->set_par_threads((int)n_workers()); |
531 } | 527 } |
532 guarantee( _cards_scanned == NULL, "invariant" ); | 528 guarantee( _cards_scanned == NULL, "invariant" ); |
533 _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers()); | 529 _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers()); |
534 for (uint i = 0; i < n_workers(); ++i) { | 530 for (uint i = 0; i < n_workers(); ++i) { |
621 cleanUpIteratorsClosure iterClosure; | 617 cleanUpIteratorsClosure iterClosure; |
622 _g1->collection_set_iterate(&iterClosure); | 618 _g1->collection_set_iterate(&iterClosure); |
623 // Set all cards back to clean. | 619 // Set all cards back to clean. |
624 _g1->cleanUpCardTable(); | 620 _g1->cleanUpCardTable(); |
625 | 621 |
626 if (ParallelGCThreads > 0) { | 622 set_traversal(false); |
627 set_par_traversal(false); | |
628 } | |
629 | 623 |
630 DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set(); | 624 DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set(); |
631 int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num(); | 625 int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num(); |
632 | 626 |
633 if (_g1->evacuation_failed()) { | 627 if (_g1->evacuation_failed()) { |
658 _g1->into_cset_dirty_card_queue_set().clear(); | 652 _g1->into_cset_dirty_card_queue_set().clear(); |
659 assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0, | 653 assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0, |
660 "all buffers should be freed"); | 654 "all buffers should be freed"); |
661 _g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers(); | 655 _g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers(); |
662 | 656 |
663 assert(!_par_traversal_in_progress, "Invariant between iterations."); | 657 assert(!_traversal_in_progress, "Invariant between iterations."); |
664 } | 658 } |
665 | 659 |
666 class UpdateRSObjectClosure: public ObjectClosure { | 660 class UpdateRSObjectClosure: public ObjectClosure { |
667 UpdateRSOopClosure* _update_rs_oop_cl; | 661 UpdateRSOopClosure* _update_rs_oop_cl; |
668 public: | 662 public: |