comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 1833:8b10f48633dc

6984287: Regularize how GC parallel workers are specified. Summary: Associate number of GC workers with the workgang as opposed to the task. Reviewed-by: johnc, ysr
author jmasa
date Mon, 20 Sep 2010 14:38:38 -0700
parents 8e5955ddf8e4
children 4e0094bc41fa
comparison
equal deleted inserted replaced
1781:97fbf5beff7b 1833:8b10f48633dc
959 _cg1r->clear_and_record_card_counts(); 959 _cg1r->clear_and_record_card_counts();
960 _cg1r->clear_hot_cache(); 960 _cg1r->clear_hot_cache();
961 } 961 }
962 962
963 // Rebuild remembered sets of all regions. 963 // Rebuild remembered sets of all regions.
964 if (ParallelGCThreads > 0) { 964
965 if (G1CollectedHeap::use_parallel_gc_threads()) {
965 ParRebuildRSTask rebuild_rs_task(this); 966 ParRebuildRSTask rebuild_rs_task(this);
966 assert(check_heap_region_claim_values( 967 assert(check_heap_region_claim_values(
967 HeapRegion::InitialClaimValue), "sanity check"); 968 HeapRegion::InitialClaimValue), "sanity check");
968 set_par_threads(workers()->total_workers()); 969 set_par_threads(workers()->total_workers());
969 workers()->run_task(&rebuild_rs_task); 970 workers()->run_task(&rebuild_rs_task);
1958 void 1959 void
1959 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, 1960 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
1960 int worker, 1961 int worker,
1961 jint claim_value) { 1962 jint claim_value) {
1962 const size_t regions = n_regions(); 1963 const size_t regions = n_regions();
1963 const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1); 1964 const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1);
1964 // try to spread out the starting points of the workers 1965 // try to spread out the starting points of the workers
1965 const size_t start_index = regions / worker_num * (size_t) worker; 1966 const size_t start_index = regions / worker_num * (size_t) worker;
1966 1967
1967 // each worker will actually look at all regions 1968 // each worker will actually look at all regions
1968 for (size_t count = 0; count < regions; ++count) { 1969 for (size_t count = 0; count < regions; ++count) {
2525 PrintRegionClosure blk(st); 2526 PrintRegionClosure blk(st);
2526 _hrs->iterate(&blk); 2527 _hrs->iterate(&blk);
2527 } 2528 }
2528 2529
2529 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { 2530 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
2530 if (ParallelGCThreads > 0) { 2531 if (G1CollectedHeap::use_parallel_gc_threads()) {
2531 workers()->print_worker_threads_on(st); 2532 workers()->print_worker_threads_on(st);
2532 } 2533 }
2533 2534
2534 _cmThread->print_on(st); 2535 _cmThread->print_on(st);
2535 st->cr(); 2536 st->cr();
2541 _czft->print_on(st); 2542 _czft->print_on(st);
2542 st->cr(); 2543 st->cr();
2543 } 2544 }
2544 2545
2545 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { 2546 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2546 if (ParallelGCThreads > 0) { 2547 if (G1CollectedHeap::use_parallel_gc_threads()) {
2547 workers()->threads_do(tc); 2548 workers()->threads_do(tc);
2548 } 2549 }
2549 tc->do_thread(_cmThread); 2550 tc->do_thread(_cmThread);
2550 _cg1r->threads_do(tc); 2551 _cg1r->threads_do(tc);
2551 tc->do_thread(_czft); 2552 tc->do_thread(_czft);
3081 // So record it now and use it later. 3082 // So record it now and use it later.
3082 size_t r_used = 0; 3083 size_t r_used = 0;
3083 if (r != NULL) { 3084 if (r != NULL) {
3084 r_used = r->used(); 3085 r_used = r->used();
3085 3086
3086 if (ParallelGCThreads > 0) { 3087 if (G1CollectedHeap::use_parallel_gc_threads()) {
3087 // need to take the lock to guard against two threads calling 3088 // need to take the lock to guard against two threads calling
3088 // get_gc_alloc_region concurrently (very unlikely but...) 3089 // get_gc_alloc_region concurrently (very unlikely but...)
3089 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 3090 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3090 r->save_marks(); 3091 r->save_marks();
3091 } 3092 }
4180 } 4181 }
4181 }; 4182 };
4182 4183
4183 // *** Common G1 Evacuation Stuff 4184 // *** Common G1 Evacuation Stuff
4184 4185
4186 // This method is run in a GC worker.
4187
4185 void 4188 void
4186 G1CollectedHeap:: 4189 G1CollectedHeap::
4187 g1_process_strong_roots(bool collecting_perm_gen, 4190 g1_process_strong_roots(bool collecting_perm_gen,
4188 SharedHeap::ScanningOption so, 4191 SharedHeap::ScanningOption so,
4189 OopClosure* scan_non_heap_roots, 4192 OopClosure* scan_non_heap_roots,
4257 return false; 4260 return false;
4258 } 4261 }
4259 }; 4262 };
4260 4263
4261 void G1CollectedHeap::save_marks() { 4264 void G1CollectedHeap::save_marks() {
4262 if (ParallelGCThreads == 0) { 4265 if (!CollectedHeap::use_parallel_gc_threads()) {
4263 SaveMarksClosure sm; 4266 SaveMarksClosure sm;
4264 heap_region_iterate(&sm); 4267 heap_region_iterate(&sm);
4265 } 4268 }
4266 // We do this even in the parallel case 4269 // We do this even in the parallel case
4267 perm_gen()->save_marks(); 4270 perm_gen()->save_marks();
4282 4285
4283 rem_set()->prepare_for_younger_refs_iterate(true); 4286 rem_set()->prepare_for_younger_refs_iterate(true);
4284 4287
4285 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); 4288 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
4286 double start_par = os::elapsedTime(); 4289 double start_par = os::elapsedTime();
4287 if (ParallelGCThreads > 0) { 4290 if (G1CollectedHeap::use_parallel_gc_threads()) {
4288 // The individual threads will set their evac-failure closures. 4291 // The individual threads will set their evac-failure closures.
4289 StrongRootsScope srs(this); 4292 StrongRootsScope srs(this);
4290 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); 4293 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
4291 workers()->run_task(&g1_par_task); 4294 workers()->run_task(&g1_par_task);
4292 } else { 4295 } else {