comparison src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 4711:adedfbbf0360

7120038: G1: ParallelGCThreads==0 is broken Summary: Running G1 with ParallelGCThreads==0 results in various crashes and asserts. Most of these are caused by unguarded references to the worker threads array or an incorrect number of active workers. Reviewed-by: jmasa, tonyp
author johnc
date Fri, 16 Dec 2011 11:40:00 -0800
parents dc467e8b2c5e
children 441e946dc1af
comparison
equal deleted inserted replaced
4710:41406797186b 4711:adedfbbf0360
1115 ~CMConcurrentMarkingTask() { } 1115 ~CMConcurrentMarkingTask() { }
1116 }; 1116 };
1117 1117
1118 // Calculates the number of active workers for a concurrent 1118 // Calculates the number of active workers for a concurrent
1119 // phase. 1119 // phase.
1120 int ConcurrentMark::calc_parallel_marking_threads() { 1120 size_t ConcurrentMark::calc_parallel_marking_threads() {
1121 1121 if (G1CollectedHeap::use_parallel_gc_threads()) {
1122 size_t n_conc_workers; 1122 size_t n_conc_workers = 0;
1123 if (!G1CollectedHeap::use_parallel_gc_threads()) {
1124 n_conc_workers = 1;
1125 } else {
1126 if (!UseDynamicNumberOfGCThreads || 1123 if (!UseDynamicNumberOfGCThreads ||
1127 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1124 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1128 !ForceDynamicNumberOfGCThreads)) { 1125 !ForceDynamicNumberOfGCThreads)) {
1129 n_conc_workers = max_parallel_marking_threads(); 1126 n_conc_workers = max_parallel_marking_threads();
1130 } else { 1127 } else {
1135 parallel_marking_threads(), 1132 parallel_marking_threads(),
1136 Threads::number_of_non_daemon_threads()); 1133 Threads::number_of_non_daemon_threads());
1137 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1134 // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1138 // that scaling has already gone into "_max_parallel_marking_threads". 1135 // that scaling has already gone into "_max_parallel_marking_threads".
1139 } 1136 }
1140 } 1137 assert(n_conc_workers > 0, "Always need at least 1");
1141 assert(n_conc_workers > 0, "Always need at least 1"); 1138 return n_conc_workers;
1142 return (int) MAX2(n_conc_workers, (size_t) 1); 1139 }
1140 // If we are not running with any parallel GC threads we will not
1141 // have spawned any marking threads either. Hence the number of
1142 // concurrent workers should be 0.
1143 return 0;
1143 } 1144 }
1144 1145
1145 void ConcurrentMark::markFromRoots() { 1146 void ConcurrentMark::markFromRoots() {
1146 // we might be tempted to assert that: 1147 // we might be tempted to assert that:
1147 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1148 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1149 // However that wouldn't be right, because it's possible that 1150 // However that wouldn't be right, because it's possible that
1150 // a safepoint is indeed in progress as a younger generation 1151 // a safepoint is indeed in progress as a younger generation
1151 // stop-the-world GC happens even as we mark in this generation. 1152 // stop-the-world GC happens even as we mark in this generation.
1152 1153
1153 _restart_for_overflow = false; 1154 _restart_for_overflow = false;
1154
1155 // Parallel task terminator is set in "set_phase()".
1156 force_overflow_conc()->init(); 1155 force_overflow_conc()->init();
1157 1156
1158 // _g1h has _n_par_threads 1157 // _g1h has _n_par_threads
1159
1160 _parallel_marking_threads = calc_parallel_marking_threads(); 1158 _parallel_marking_threads = calc_parallel_marking_threads();
1161 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1159 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1162 "Maximum number of marking threads exceeded"); 1160 "Maximum number of marking threads exceeded");
1163 _parallel_workers->set_active_workers((int)_parallel_marking_threads); 1161
1164 // Don't set _n_par_threads because it affects MT in proceess_strong_roots() 1162 size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
1165 // and the decisions on that MT processing is made elsewhere. 1163
1166 1164 // Parallel task terminator is set in "set_phase()"
1167 assert( _parallel_workers->active_workers() > 0, "Should have been set"); 1165 set_phase(active_workers, true /* concurrent */);
1168 set_phase(_parallel_workers->active_workers(), true /* concurrent */);
1169 1166
1170 CMConcurrentMarkingTask markingTask(this, cmThread()); 1167 CMConcurrentMarkingTask markingTask(this, cmThread());
1171 if (parallel_marking_threads() > 0) { 1168 if (parallel_marking_threads() > 0) {
1169 _parallel_workers->set_active_workers((int)active_workers);
1170 // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
1171 // and the decisions on that MT processing is made elsewhere.
1172 assert(_parallel_workers->active_workers() > 0, "Should have been set");
1172 _parallel_workers->run_task(&markingTask); 1173 _parallel_workers->run_task(&markingTask);
1173 } else { 1174 } else {
1174 markingTask.work(0); 1175 markingTask.work(0);
1175 } 1176 }
1176 print_stats(); 1177 print_stats();
1763 1764
1764 double start = os::elapsedTime(); 1765 double start = os::elapsedTime();
1765 1766
1766 HeapRegionRemSet::reset_for_cleanup_tasks(); 1767 HeapRegionRemSet::reset_for_cleanup_tasks();
1767 1768
1768 g1h->set_par_threads(); 1769 size_t n_workers;
1769 size_t n_workers = g1h->n_par_threads();
1770 1770
1771 // Do counting once more with the world stopped for good measure. 1771 // Do counting once more with the world stopped for good measure.
1772 G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), 1772 G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
1773 &_region_bm, &_card_bm); 1773 &_region_bm, &_card_bm);
1774 if (G1CollectedHeap::use_parallel_gc_threads()) { 1774 if (G1CollectedHeap::use_parallel_gc_threads()) {
1775 assert(g1h->check_heap_region_claim_values( 1775 assert(g1h->check_heap_region_claim_values(
1776 HeapRegion::InitialClaimValue), 1776 HeapRegion::InitialClaimValue),
1777 "sanity check"); 1777 "sanity check");
1778 1778
1779 g1h->set_par_threads();
1780 n_workers = g1h->n_par_threads();
1779 assert(g1h->n_par_threads() == (int) n_workers, 1781 assert(g1h->n_par_threads() == (int) n_workers,
1780 "Should not have been reset"); 1782 "Should not have been reset");
1781 g1h->workers()->run_task(&g1_par_count_task); 1783 g1h->workers()->run_task(&g1_par_count_task);
1782 // Done with the parallel phase so reset to 0. 1784 // Done with the parallel phase so reset to 0.
1783 g1h->set_par_threads(0); 1785 g1h->set_par_threads(0);
1784 1786
1785 assert(g1h->check_heap_region_claim_values( 1787 assert(g1h->check_heap_region_claim_values(
1786 HeapRegion::FinalCountClaimValue), 1788 HeapRegion::FinalCountClaimValue),
1787 "sanity check"); 1789 "sanity check");
1788 } else { 1790 } else {
1791 n_workers = 1;
1789 g1_par_count_task.work(0); 1792 g1_par_count_task.work(0);
1790 } 1793 }
1791 1794
1792 size_t known_garbage_bytes = 1795 size_t known_garbage_bytes =
1793 g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes(); 1796 g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes();
1848 double note_end_end = os::elapsedTime(); 1851 double note_end_end = os::elapsedTime();
1849 if (G1PrintParCleanupStats) { 1852 if (G1PrintParCleanupStats) {
1850 gclog_or_tty->print_cr(" note end of marking: %8.3f ms.", 1853 gclog_or_tty->print_cr(" note end of marking: %8.3f ms.",
1851 (note_end_end - note_end_start)*1000.0); 1854 (note_end_end - note_end_start)*1000.0);
1852 } 1855 }
1853
1854 1856
1855 // call below, since it affects the metric by which we sort the heap 1857 // call below, since it affects the metric by which we sort the heap
1856 // regions. 1858 // regions.
1857 if (G1ScrubRemSets) { 1859 if (G1ScrubRemSets) {
1858 double rs_scrub_start = os::elapsedTime(); 1860 double rs_scrub_start = os::elapsedTime();
2327 // want to abort remark and do concurrent marking again. 2329 // want to abort remark and do concurrent marking again.
2328 task->record_end_time(); 2330 task->record_end_time();
2329 } 2331 }
2330 } 2332 }
2331 2333
2332 CMRemarkTask(ConcurrentMark* cm) : 2334 CMRemarkTask(ConcurrentMark* cm, int active_workers) :
2333 AbstractGangTask("Par Remark"), _cm(cm) { 2335 AbstractGangTask("Par Remark"), _cm(cm) {
2334 _cm->terminator()->reset_for_reuse(cm->_g1h->workers()->active_workers()); 2336 _cm->terminator()->reset_for_reuse(active_workers);
2335 } 2337 }
2336 }; 2338 };
2337 2339
2338 void ConcurrentMark::checkpointRootsFinalWork() { 2340 void ConcurrentMark::checkpointRootsFinalWork() {
2339 ResourceMark rm; 2341 ResourceMark rm;
2355 // Leave _parallel_marking_threads at it's 2357 // Leave _parallel_marking_threads at it's
2356 // value originally calculated in the ConcurrentMark 2358 // value originally calculated in the ConcurrentMark
2357 // constructor and pass values of the active workers 2359 // constructor and pass values of the active workers
2358 // through the gang in the task. 2360 // through the gang in the task.
2359 2361
2360 CMRemarkTask remarkTask(this); 2362 CMRemarkTask remarkTask(this, active_workers);
2361 g1h->set_par_threads(active_workers); 2363 g1h->set_par_threads(active_workers);
2362 g1h->workers()->run_task(&remarkTask); 2364 g1h->workers()->run_task(&remarkTask);
2363 g1h->set_par_threads(0); 2365 g1h->set_par_threads(0);
2364 } else { 2366 } else {
2365 G1CollectedHeap::StrongRootsScope srs(g1h); 2367 G1CollectedHeap::StrongRootsScope srs(g1h);
2366 // this is remark, so we'll use up all available threads 2368 // this is remark, so we'll use up all available threads
2367 int active_workers = 1; 2369 int active_workers = 1;
2368 set_phase(active_workers, false /* concurrent */); 2370 set_phase(active_workers, false /* concurrent */);
2369 2371
2370 CMRemarkTask remarkTask(this); 2372 CMRemarkTask remarkTask(this, active_workers);
2371 // We will start all available threads, even if we decide that the 2373 // We will start all available threads, even if we decide that the
2372 // active_workers will be fewer. The extra ones will just bail out 2374 // active_workers will be fewer. The extra ones will just bail out
2373 // immediately. 2375 // immediately.
2374 remarkTask.work(0); 2376 remarkTask.work(0);
2375 } 2377 }
3121 g1h->g1_policy()->record_mark_closure_time(0.0); 3123 g1h->g1_policy()->record_mark_closure_time(0.0);
3122 return; 3124 return;
3123 } 3125 }
3124 3126
3125 double start = os::elapsedTime(); 3127 double start = os::elapsedTime();
3126 int n_workers = g1h->workers()->total_workers();
3127
3128 G1ParCompleteMarkInCSetTask complete_mark_task(g1h, this); 3128 G1ParCompleteMarkInCSetTask complete_mark_task(g1h, this);
3129 3129
3130 assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); 3130 assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
3131 3131
3132 if (G1CollectedHeap::use_parallel_gc_threads()) { 3132 if (G1CollectedHeap::use_parallel_gc_threads()) {
3133 int n_workers = g1h->workers()->active_workers();
3133 g1h->set_par_threads(n_workers); 3134 g1h->set_par_threads(n_workers);
3134 g1h->workers()->run_task(&complete_mark_task); 3135 g1h->workers()->run_task(&complete_mark_task);
3135 g1h->set_par_threads(0); 3136 g1h->set_par_threads(0);
3136 } else { 3137 } else {
3137 complete_mark_task.work(0); 3138 complete_mark_task.work(0);