comparison src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 4837:eff609af17d7

7127706: G1: re-enable survivors during the initial-mark pause Summary: Re-enable survivors during the initial-mark pause. Afterwards, the concurrent marking threads have to scan them and mark everything reachable from them. The next GC will have to wait for the survivors to be scanned. Reviewed-by: brutisso, johnc
author tonyp
date Wed, 25 Jan 2012 12:58:23 -0500
parents d30fa85f9994
children 2a0172480595
comparison
equal deleted inserted replaced
4836:d30fa85f9994 4837:eff609af17d7
458 return (_g1h->is_obj_ill(obj) 458 return (_g1h->is_obj_ill(obj)
459 || (_g1h->is_in_permanent(obj) 459 || (_g1h->is_in_permanent(obj)
460 && !nextMarkBitMap()->isMarked((HeapWord*)obj))); 460 && !nextMarkBitMap()->isMarked((HeapWord*)obj)));
461 } 461 }
462 462
463 CMRootRegions::CMRootRegions() :
464 _young_list(NULL), _cm(NULL), _scan_in_progress(false),
465 _should_abort(false), _next_survivor(NULL) { }
466
467 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
468 _young_list = g1h->young_list();
469 _cm = cm;
470 }
471
472 void CMRootRegions::prepare_for_scan() {
473 assert(!scan_in_progress(), "pre-condition");
474
475 // Currently, only survivors can be root regions.
476 assert(_next_survivor == NULL, "pre-condition");
477 _next_survivor = _young_list->first_survivor_region();
478 _scan_in_progress = (_next_survivor != NULL);
479 _should_abort = false;
480 }
481
482 HeapRegion* CMRootRegions::claim_next() {
483 if (_should_abort) {
484 // If someone has set the should_abort flag, we return NULL to
485 // force the caller to bail out of their loop.
486 return NULL;
487 }
488
489 // Currently, only survivors can be root regions.
490 HeapRegion* res = _next_survivor;
491 if (res != NULL) {
492 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
493 // Read it again in case it changed while we were waiting for the lock.
494 res = _next_survivor;
495 if (res != NULL) {
496 if (res == _young_list->last_survivor_region()) {
497 // We just claimed the last survivor so store NULL to indicate
498 // that we're done.
499 _next_survivor = NULL;
500 } else {
501 _next_survivor = res->get_next_young_region();
502 }
503 } else {
504 // Someone else claimed the last survivor while we were trying
505 // to take the lock so nothing else to do.
506 }
507 }
508 assert(res == NULL || res->is_survivor(), "post-condition");
509
510 return res;
511 }
512
513 void CMRootRegions::scan_finished() {
514 assert(scan_in_progress(), "pre-condition");
515
516 // Currently, only survivors can be root regions.
517 if (!_should_abort) {
518 assert(_next_survivor == NULL, "we should have claimed all survivors");
519 }
520 _next_survivor = NULL;
521
522 {
523 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
524 _scan_in_progress = false;
525 RootRegionScan_lock->notify_all();
526 }
527 }
528
529 bool CMRootRegions::wait_until_scan_finished() {
530 if (!scan_in_progress()) return false;
531
532 {
533 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
534 while (scan_in_progress()) {
535 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
536 }
537 }
538 return true;
539 }
540
463 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 541 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
464 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 542 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
465 #endif // _MSC_VER 543 #endif // _MSC_VER
466 544
467 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 545 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
545 assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency"); 623 assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency");
546 assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency"); 624 assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
547 625
548 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 626 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
549 satb_qs.set_buffer_size(G1SATBBufferSize); 627 satb_qs.set_buffer_size(G1SATBBufferSize);
628
629 _root_regions.init(_g1h, this);
550 630
551 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num); 631 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num);
552 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num); 632 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num);
553 633
554 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_task_num); 634 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_task_num);
861 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 941 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
862 // This is the start of the marking cycle, we're expected all 942 // This is the start of the marking cycle, we're expected all
863 // threads to have SATB queues with active set to false. 943 // threads to have SATB queues with active set to false.
864 satb_mq_set.set_active_all_threads(true, /* new active value */ 944 satb_mq_set.set_active_all_threads(true, /* new active value */
865 false /* expected_active */); 945 false /* expected_active */);
946
947 _root_regions.prepare_for_scan();
866 948
867 // update_g1_committed() will be called at the end of an evac pause 949 // update_g1_committed() will be called at the end of an evac pause
868 // when marking is on. So, it's also called at the end of the 950 // when marking is on. So, it's also called at the end of the
869 // initial-mark pause to update the heap end, if the heap expands 951 // initial-mark pause to update the heap end, if the heap expands
870 // during it. No need to call it here. 952 // during it. No need to call it here.
1153 } 1235 }
1154 // If we are not running with any parallel GC threads we will not 1236 // If we are not running with any parallel GC threads we will not
1155 // have spawned any marking threads either. Hence the number of 1237 // have spawned any marking threads either. Hence the number of
1156 // concurrent workers should be 0. 1238 // concurrent workers should be 0.
1157 return 0; 1239 return 0;
1240 }
1241
1242 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1243 // Currently, only survivors can be root regions.
1244 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1245 G1RootRegionScanClosure cl(_g1h, this, worker_id);
1246
1247 const uintx interval = PrefetchScanIntervalInBytes;
1248 HeapWord* curr = hr->bottom();
1249 const HeapWord* end = hr->top();
1250 while (curr < end) {
1251 Prefetch::read(curr, interval);
1252 oop obj = oop(curr);
1253 int size = obj->oop_iterate(&cl);
1254 assert(size == obj->size(), "sanity");
1255 curr += size;
1256 }
1257 }
1258
1259 class CMRootRegionScanTask : public AbstractGangTask {
1260 private:
1261 ConcurrentMark* _cm;
1262
1263 public:
1264 CMRootRegionScanTask(ConcurrentMark* cm) :
1265 AbstractGangTask("Root Region Scan"), _cm(cm) { }
1266
1267 void work(uint worker_id) {
1268 assert(Thread::current()->is_ConcurrentGC_thread(),
1269 "this should only be done by a conc GC thread");
1270
1271 CMRootRegions* root_regions = _cm->root_regions();
1272 HeapRegion* hr = root_regions->claim_next();
1273 while (hr != NULL) {
1274 _cm->scanRootRegion(hr, worker_id);
1275 hr = root_regions->claim_next();
1276 }
1277 }
1278 };
1279
1280 void ConcurrentMark::scanRootRegions() {
1281 // scan_in_progress() will have been set to true only if there was
1282 // at least one root region to scan. So, if it's false, we
1283 // should not attempt to do any further work.
1284 if (root_regions()->scan_in_progress()) {
1285 _parallel_marking_threads = calc_parallel_marking_threads();
1286 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1287 "Maximum number of marking threads exceeded");
1288 uint active_workers = MAX2(1U, parallel_marking_threads());
1289
1290 CMRootRegionScanTask task(this);
1291 if (parallel_marking_threads() > 0) {
1292 _parallel_workers->set_active_workers((int) active_workers);
1293 _parallel_workers->run_task(&task);
1294 } else {
1295 task.work(0);
1296 }
1297
1298 // It's possible that has_aborted() is true here without actually
1299 // aborting the survivor scan earlier. This is OK as it's
1300 // mainly used for sanity checking.
1301 root_regions()->scan_finished();
1302 }
1158 } 1303 }
1159 1304
1160 void ConcurrentMark::markFromRoots() { 1305 void ConcurrentMark::markFromRoots() {
1161 // we might be tempted to assert that: 1306 // we might be tempted to assert that:
1162 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1307 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),