comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 6819:2e6857353b2c

8000311: G1: ParallelGCThreads==0 broken Summary: Divide by zero error, if ParallelGCThreads is 0, when adjusting the PLAB size. Reviewed-by: jmasa, jcoomes
author johnc
date Thu, 04 Oct 2012 10:04:13 -0700
parents b86575d092a2
children 04155d9c8c76
comparison
equal deleted inserted replaced
6818:22b8d3d181d9 6819:2e6857353b2c
4149 _old_gc_alloc_region.set(retained_region); 4149 _old_gc_alloc_region.set(retained_region);
4150 _hr_printer.reuse(retained_region); 4150 _hr_printer.reuse(retained_region);
4151 } 4151 }
4152 } 4152 }
4153 4153
4154 void G1CollectedHeap::release_gc_alloc_regions() { 4154 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) {
4155 _survivor_gc_alloc_region.release(); 4155 _survivor_gc_alloc_region.release();
4156 // If we have an old GC alloc region to release, we'll save it in 4156 // If we have an old GC alloc region to release, we'll save it in
4157 // _retained_old_gc_alloc_region. If we don't 4157 // _retained_old_gc_alloc_region. If we don't
4158 // _retained_old_gc_alloc_region will become NULL. This is what we 4158 // _retained_old_gc_alloc_region will become NULL. This is what we
4159 // want either way so no reason to check explicitly for either 4159 // want either way so no reason to check explicitly for either
4160 // condition. 4160 // condition.
4161 _retained_old_gc_alloc_region = _old_gc_alloc_region.release(); 4161 _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
4162 4162
4163 if (ResizePLAB) { 4163 if (ResizePLAB) {
4164 _survivor_plab_stats.adjust_desired_plab_sz(); 4164 _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4165 _old_plab_stats.adjust_desired_plab_sz(); 4165 _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4166 } 4166 }
4167 } 4167 }
4168 4168
4169 void G1CollectedHeap::abandon_gc_alloc_regions() { 4169 void G1CollectedHeap::abandon_gc_alloc_regions() {
4170 assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition"); 4170 assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
5425 assert(pss.refs()->is_empty(), "should be"); 5425 assert(pss.refs()->is_empty(), "should be");
5426 } 5426 }
5427 }; 5427 };
5428 5428
5429 // Weak Reference processing during an evacuation pause (part 1). 5429 // Weak Reference processing during an evacuation pause (part 1).
5430 void G1CollectedHeap::process_discovered_references() { 5430 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
5431 double ref_proc_start = os::elapsedTime(); 5431 double ref_proc_start = os::elapsedTime();
5432 5432
5433 ReferenceProcessor* rp = _ref_processor_stw; 5433 ReferenceProcessor* rp = _ref_processor_stw;
5434 assert(rp->discovery_enabled(), "should have been enabled"); 5434 assert(rp->discovery_enabled(), "should have been enabled");
5435 5435
5452 // We also need to do this copying before we process the reference 5452 // We also need to do this copying before we process the reference
5453 // objects discovered by the STW ref processor in case one of these 5453 // objects discovered by the STW ref processor in case one of these
5454 // referents points to another object which is also referenced by an 5454 // referents points to another object which is also referenced by an
5455 // object discovered by the STW ref processor. 5455 // object discovered by the STW ref processor.
5456 5456
5457 uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5458 workers()->active_workers() : 1);
5459
5460 assert(!G1CollectedHeap::use_parallel_gc_threads() || 5457 assert(!G1CollectedHeap::use_parallel_gc_threads() ||
5461 active_workers == workers()->active_workers(), 5458 no_of_gc_workers == workers()->active_workers(),
5462 "Need to reset active_workers"); 5459 "Need to reset active GC workers");
5463 5460
5464 set_par_threads(active_workers); 5461 set_par_threads(no_of_gc_workers);
5465 G1ParPreserveCMReferentsTask keep_cm_referents(this, active_workers, _task_queues); 5462 G1ParPreserveCMReferentsTask keep_cm_referents(this,
5463 no_of_gc_workers,
5464 _task_queues);
5466 5465
5467 if (G1CollectedHeap::use_parallel_gc_threads()) { 5466 if (G1CollectedHeap::use_parallel_gc_threads()) {
5468 workers()->run_task(&keep_cm_referents); 5467 workers()->run_task(&keep_cm_referents);
5469 } else { 5468 } else {
5470 keep_cm_referents.work(0); 5469 keep_cm_referents.work(0);
5526 &keep_alive, 5525 &keep_alive,
5527 &drain_queue, 5526 &drain_queue,
5528 NULL); 5527 NULL);
5529 } else { 5528 } else {
5530 // Parallel reference processing 5529 // Parallel reference processing
5531 assert(rp->num_q() == active_workers, "sanity"); 5530 assert(rp->num_q() == no_of_gc_workers, "sanity");
5532 assert(active_workers <= rp->max_num_q(), "sanity"); 5531 assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5533 5532
5534 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers); 5533 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5535 rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor); 5534 rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
5536 } 5535 }
5537 5536
5538 // We have completed copying any necessary live referent objects 5537 // We have completed copying any necessary live referent objects
5539 // (that were not copied during the actual pause) so we can 5538 // (that were not copied during the actual pause) so we can
5544 double ref_proc_time = os::elapsedTime() - ref_proc_start; 5543 double ref_proc_time = os::elapsedTime() - ref_proc_start;
5545 g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0); 5544 g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5546 } 5545 }
5547 5546
5548 // Weak Reference processing during an evacuation pause (part 2). 5547 // Weak Reference processing during an evacuation pause (part 2).
5549 void G1CollectedHeap::enqueue_discovered_references() { 5548 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
5550 double ref_enq_start = os::elapsedTime(); 5549 double ref_enq_start = os::elapsedTime();
5551 5550
5552 ReferenceProcessor* rp = _ref_processor_stw; 5551 ReferenceProcessor* rp = _ref_processor_stw;
5553 assert(!rp->discovery_enabled(), "should have been disabled as part of processing"); 5552 assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5554 5553
5558 // Serial reference processing... 5557 // Serial reference processing...
5559 rp->enqueue_discovered_references(); 5558 rp->enqueue_discovered_references();
5560 } else { 5559 } else {
5561 // Parallel reference enqueuing 5560 // Parallel reference enqueuing
5562 5561
5563 uint active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1); 5562 assert(no_of_gc_workers == workers()->active_workers(),
5564 assert(active_workers == workers()->active_workers(), 5563 "Need to reset active workers");
5565 "Need to reset active_workers"); 5564 assert(rp->num_q() == no_of_gc_workers, "sanity");
5566 assert(rp->num_q() == active_workers, "sanity"); 5565 assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5567 assert(active_workers <= rp->max_num_q(), "sanity"); 5566
5568 5567 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5569 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
5570 rp->enqueue_discovered_references(&par_task_executor); 5568 rp->enqueue_discovered_references(&par_task_executor);
5571 } 5569 }
5572 5570
5573 rp->verify_no_references_recorded(); 5571 rp->verify_no_references_recorded();
5574 assert(!rp->discovery_enabled(), "should have been disabled"); 5572 assert(!rp->discovery_enabled(), "should have been disabled");
5656 // Process any discovered reference objects - we have 5654 // Process any discovered reference objects - we have
5657 // to do this _before_ we retire the GC alloc regions 5655 // to do this _before_ we retire the GC alloc regions
5658 // as we may have to copy some 'reachable' referent 5656 // as we may have to copy some 'reachable' referent
5659 // objects (and their reachable sub-graphs) that were 5657 // objects (and their reachable sub-graphs) that were
5660 // not copied during the pause. 5658 // not copied during the pause.
5661 process_discovered_references(); 5659 process_discovered_references(n_workers);
5662 5660
5663 // Weak root processing. 5661 // Weak root processing.
5664 // Note: when JSR 292 is enabled and code blobs can contain 5662 // Note: when JSR 292 is enabled and code blobs can contain
5665 // non-perm oops then we will need to process the code blobs 5663 // non-perm oops then we will need to process the code blobs
5666 // here too. 5664 // here too.
5668 G1STWIsAliveClosure is_alive(this); 5666 G1STWIsAliveClosure is_alive(this);
5669 G1KeepAliveClosure keep_alive(this); 5667 G1KeepAliveClosure keep_alive(this);
5670 JNIHandles::weak_oops_do(&is_alive, &keep_alive); 5668 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5671 } 5669 }
5672 5670
5673 release_gc_alloc_regions(); 5671 release_gc_alloc_regions(n_workers);
5674 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); 5672 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5675 5673
5676 concurrent_g1_refine()->clear_hot_cache(); 5674 concurrent_g1_refine()->clear_hot_cache();
5677 concurrent_g1_refine()->set_use_cache(true); 5675 concurrent_g1_refine()->set_use_cache(true);
5678 5676
5692 // this after the card table is cleaned (and verified) as 5690 // this after the card table is cleaned (and verified) as
5693 // the act of enqueuing entries on to the pending list 5691 // the act of enqueuing entries on to the pending list
5694 // will log these updates (and dirty their associated 5692 // will log these updates (and dirty their associated
5695 // cards). We need these updates logged to update any 5693 // cards). We need these updates logged to update any
5696 // RSets. 5694 // RSets.
5697 enqueue_discovered_references(); 5695 enqueue_discovered_references(n_workers);
5698 5696
5699 if (G1DeferredRSUpdate) { 5697 if (G1DeferredRSUpdate) {
5700 RedirtyLoggedCardTableEntryFastClosure redirty; 5698 RedirtyLoggedCardTableEntryFastClosure redirty;
5701 dirty_card_queue_set().set_closure(&redirty); 5699 dirty_card_queue_set().set_closure(&redirty);
5702 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); 5700 dirty_card_queue_set().apply_closure_to_all_completed_buffers();