comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 4728:441e946dc1af

7121618: Change type of number of GC workers to unsigned int. Summary: Change variables representing the number of GC workers to uint from int and size_t. Change the parameter in work(int i) to work(uint worker_id). Reviewed-by: brutisso, tonyp
author jmasa
date Wed, 14 Dec 2011 13:34:57 -0800
parents adedfbbf0360
children bacb651cf5bf
comparison
equal deleted inserted replaced
4727:67fdcb391461 4728:441e946dc1af
1163 ParRebuildRSTask(G1CollectedHeap* g1) 1163 ParRebuildRSTask(G1CollectedHeap* g1)
1164 : AbstractGangTask("ParRebuildRSTask"), 1164 : AbstractGangTask("ParRebuildRSTask"),
1165 _g1(g1) 1165 _g1(g1)
1166 { } 1166 { }
1167 1167
1168 void work(int i) { 1168 void work(uint worker_id) {
1169 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); 1169 RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
1170 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, 1170 _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
1171 _g1->workers()->active_workers(), 1171 _g1->workers()->active_workers(),
1172 HeapRegion::RebuildRSClaimValue); 1172 HeapRegion::RebuildRSClaimValue);
1173 } 1173 }
1174 }; 1174 };
1175 1175
1372 _cg1r->clear_hot_cache(); 1372 _cg1r->clear_hot_cache();
1373 } 1373 }
1374 1374
1375 // Rebuild remembered sets of all regions. 1375 // Rebuild remembered sets of all regions.
1376 if (G1CollectedHeap::use_parallel_gc_threads()) { 1376 if (G1CollectedHeap::use_parallel_gc_threads()) {
1377 int n_workers = 1377 uint n_workers =
1378 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), 1378 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1379 workers()->active_workers(), 1379 workers()->active_workers(),
1380 Threads::number_of_non_daemon_threads()); 1380 Threads::number_of_non_daemon_threads());
1381 assert(UseDynamicNumberOfGCThreads || 1381 assert(UseDynamicNumberOfGCThreads ||
1382 n_workers == workers()->total_workers(), 1382 n_workers == workers()->total_workers(),
2517 _hrs.iterate_from(r, cl); 2517 _hrs.iterate_from(r, cl);
2518 } 2518 }
2519 2519
2520 void 2520 void
2521 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, 2521 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2522 int worker, 2522 uint worker,
2523 int no_of_par_workers, 2523 uint no_of_par_workers,
2524 jint claim_value) { 2524 jint claim_value) {
2525 const size_t regions = n_regions(); 2525 const size_t regions = n_regions();
2526 const size_t max_workers = (G1CollectedHeap::use_parallel_gc_threads() ? 2526 const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
2527 no_of_par_workers : 2527 no_of_par_workers :
2528 1); 2528 1);
2529 assert(UseDynamicNumberOfGCThreads || 2529 assert(UseDynamicNumberOfGCThreads ||
2530 no_of_par_workers == workers()->total_workers(), 2530 no_of_par_workers == workers()->total_workers(),
2531 "Non dynamic should use fixed number of workers"); 2531 "Non dynamic should use fixed number of workers");
2737 // Then thread t will start at region floor ((t * n) / p) 2737 // Then thread t will start at region floor ((t * n) / p)
2738 2738
2739 result = g1_policy()->collection_set(); 2739 result = g1_policy()->collection_set();
2740 if (G1CollectedHeap::use_parallel_gc_threads()) { 2740 if (G1CollectedHeap::use_parallel_gc_threads()) {
2741 size_t cs_size = g1_policy()->cset_region_length(); 2741 size_t cs_size = g1_policy()->cset_region_length();
2742 int active_workers = workers()->active_workers(); 2742 uint active_workers = workers()->active_workers();
2743 assert(UseDynamicNumberOfGCThreads || 2743 assert(UseDynamicNumberOfGCThreads ||
2744 active_workers == workers()->total_workers(), 2744 active_workers == workers()->total_workers(),
2745 "Unless dynamic should use total workers"); 2745 "Unless dynamic should use total workers");
2746 2746
2747 size_t end_ind = (cs_size * worker_i) / active_workers; 2747 size_t end_ind = (cs_size * worker_i) / active_workers;
3073 3073
3074 bool failures() { 3074 bool failures() {
3075 return _failures; 3075 return _failures;
3076 } 3076 }
3077 3077
3078 void work(int worker_i) { 3078 void work(uint worker_id) {
3079 HandleMark hm; 3079 HandleMark hm;
3080 VerifyRegionClosure blk(_allow_dirty, true, _vo); 3080 VerifyRegionClosure blk(_allow_dirty, true, _vo);
3081 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, 3081 _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
3082 _g1h->workers()->active_workers(), 3082 _g1h->workers()->active_workers(),
3083 HeapRegion::ParVerifyClaimValue); 3083 HeapRegion::ParVerifyClaimValue);
3084 if (blk.failures()) { 3084 if (blk.failures()) {
3085 _failures = true; 3085 _failures = true;
3086 } 3086 }
4723 class G1ParTask : public AbstractGangTask { 4723 class G1ParTask : public AbstractGangTask {
4724 protected: 4724 protected:
4725 G1CollectedHeap* _g1h; 4725 G1CollectedHeap* _g1h;
4726 RefToScanQueueSet *_queues; 4726 RefToScanQueueSet *_queues;
4727 ParallelTaskTerminator _terminator; 4727 ParallelTaskTerminator _terminator;
4728 int _n_workers; 4728 uint _n_workers;
4729 4729
4730 Mutex _stats_lock; 4730 Mutex _stats_lock;
4731 Mutex* stats_lock() { return &_stats_lock; } 4731 Mutex* stats_lock() { return &_stats_lock; }
4732 4732
4733 size_t getNCards() { 4733 size_t getNCards() {
4763 _g1h->set_n_termination(active_workers); 4763 _g1h->set_n_termination(active_workers);
4764 terminator()->reset_for_reuse(active_workers); 4764 terminator()->reset_for_reuse(active_workers);
4765 _n_workers = active_workers; 4765 _n_workers = active_workers;
4766 } 4766 }
4767 4767
4768 void work(int i) { 4768 void work(uint worker_id) {
4769 if (i >= _n_workers) return; // no work needed this round 4769 if (worker_id >= _n_workers) return; // no work needed this round
4770 4770
4771 double start_time_ms = os::elapsedTime() * 1000.0; 4771 double start_time_ms = os::elapsedTime() * 1000.0;
4772 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); 4772 _g1h->g1_policy()->record_gc_worker_start_time(worker_id, start_time_ms);
4773 4773
4774 ResourceMark rm; 4774 ResourceMark rm;
4775 HandleMark hm; 4775 HandleMark hm;
4776 4776
4777 ReferenceProcessor* rp = _g1h->ref_processor_stw(); 4777 ReferenceProcessor* rp = _g1h->ref_processor_stw();
4778 4778
4779 G1ParScanThreadState pss(_g1h, i); 4779 G1ParScanThreadState pss(_g1h, worker_id);
4780 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp); 4780 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp);
4781 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp); 4781 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4782 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp); 4782 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp);
4783 4783
4784 pss.set_evac_closure(&scan_evac_cl); 4784 pss.set_evac_closure(&scan_evac_cl);
4806 _g1h->g1_process_strong_roots(/* not collecting perm */ false, 4806 _g1h->g1_process_strong_roots(/* not collecting perm */ false,
4807 SharedHeap::SO_AllClasses, 4807 SharedHeap::SO_AllClasses,
4808 scan_root_cl, 4808 scan_root_cl,
4809 &push_heap_rs_cl, 4809 &push_heap_rs_cl,
4810 scan_perm_cl, 4810 scan_perm_cl,
4811 i); 4811 worker_id);
4812 pss.end_strong_roots(); 4812 pss.end_strong_roots();
4813 4813
4814 { 4814 {
4815 double start = os::elapsedTime(); 4815 double start = os::elapsedTime();
4816 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); 4816 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4817 evac.do_void(); 4817 evac.do_void();
4818 double elapsed_ms = (os::elapsedTime()-start)*1000.0; 4818 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4819 double term_ms = pss.term_time()*1000.0; 4819 double term_ms = pss.term_time()*1000.0;
4820 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); 4820 _g1h->g1_policy()->record_obj_copy_time(worker_id, elapsed_ms-term_ms);
4821 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); 4821 _g1h->g1_policy()->record_termination(worker_id, term_ms, pss.term_attempts());
4822 } 4822 }
4823 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); 4823 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4824 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); 4824 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4825 4825
4826 // Clean up any par-expanded rem sets. 4826 // Clean up any par-expanded rem sets.
4827 HeapRegionRemSet::par_cleanup(); 4827 HeapRegionRemSet::par_cleanup();
4828 4828
4829 if (ParallelGCVerbose) { 4829 if (ParallelGCVerbose) {
4830 MutexLocker x(stats_lock()); 4830 MutexLocker x(stats_lock());
4831 pss.print_termination_stats(i); 4831 pss.print_termination_stats(worker_id);
4832 } 4832 }
4833 4833
4834 assert(pss.refs()->is_empty(), "should be empty"); 4834 assert(pss.refs()->is_empty(), "should be empty");
4835 double end_time_ms = os::elapsedTime() * 1000.0; 4835 double end_time_ms = os::elapsedTime() * 1000.0;
4836 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); 4836 _g1h->g1_policy()->record_gc_worker_end_time(worker_id, end_time_ms);
4837 } 4837 }
4838 }; 4838 };
4839 4839
4840 // *** Common G1 Evacuation Stuff 4840 // *** Common G1 Evacuation Stuff
4841 4841
5089 _g1h(g1h), 5089 _g1h(g1h),
5090 _task_queues(task_queues), 5090 _task_queues(task_queues),
5091 _terminator(terminator) 5091 _terminator(terminator)
5092 {} 5092 {}
5093 5093
5094 virtual void work(int i) { 5094 virtual void work(uint worker_id) {
5095 // The reference processing task executed by a single worker. 5095 // The reference processing task executed by a single worker.
5096 ResourceMark rm; 5096 ResourceMark rm;
5097 HandleMark hm; 5097 HandleMark hm;
5098 5098
5099 G1STWIsAliveClosure is_alive(_g1h); 5099 G1STWIsAliveClosure is_alive(_g1h);
5100 5100
5101 G1ParScanThreadState pss(_g1h, i); 5101 G1ParScanThreadState pss(_g1h, worker_id);
5102 5102
5103 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL); 5103 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
5104 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); 5104 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5105 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL); 5105 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
5106 5106
5128 5128
5129 // Complete GC closure 5129 // Complete GC closure
5130 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator); 5130 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5131 5131
5132 // Call the reference processing task's work routine. 5132 // Call the reference processing task's work routine.
5133 _proc_task.work(i, is_alive, keep_alive, drain_queue); 5133 _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5134 5134
5135 // Note we cannot assert that the refs array is empty here as not all 5135 // Note we cannot assert that the refs array is empty here as not all
5136 // of the processing tasks (specifically phase2 - pp2_work) execute 5136 // of the processing tasks (specifically phase2 - pp2_work) execute
5137 // the complete_gc closure (which ordinarily would drain the queue) so 5137 // the complete_gc closure (which ordinarily would drain the queue) so
5138 // the queue may not be empty. 5138 // the queue may not be empty.
5163 G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) : 5163 G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
5164 AbstractGangTask("Enqueue reference objects in parallel"), 5164 AbstractGangTask("Enqueue reference objects in parallel"),
5165 _enq_task(enq_task) 5165 _enq_task(enq_task)
5166 { } 5166 { }
5167 5167
5168 virtual void work(int i) { 5168 virtual void work(uint worker_id) {
5169 _enq_task.work(i); 5169 _enq_task.work(worker_id);
5170 } 5170 }
5171 }; 5171 };
5172 5172
5173 // Driver routine for parallel reference enqueing. 5173 // Driver routine for parallel reference enqueing.
5174 // Creates an instance of the ref enqueueing gang 5174 // Creates an instance of the ref enqueueing gang
5193 class G1ParPreserveCMReferentsTask: public AbstractGangTask { 5193 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5194 protected: 5194 protected:
5195 G1CollectedHeap* _g1h; 5195 G1CollectedHeap* _g1h;
5196 RefToScanQueueSet *_queues; 5196 RefToScanQueueSet *_queues;
5197 ParallelTaskTerminator _terminator; 5197 ParallelTaskTerminator _terminator;
5198 int _n_workers; 5198 uint _n_workers;
5199 5199
5200 public: 5200 public:
5201 G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) : 5201 G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5202 AbstractGangTask("ParPreserveCMReferents"), 5202 AbstractGangTask("ParPreserveCMReferents"),
5203 _g1h(g1h), 5203 _g1h(g1h),
5204 _queues(task_queues), 5204 _queues(task_queues),
5205 _terminator(workers, _queues), 5205 _terminator(workers, _queues),
5206 _n_workers(workers) 5206 _n_workers(workers)
5207 { } 5207 { }
5208 5208
5209 void work(int i) { 5209 void work(uint worker_id) {
5210 ResourceMark rm; 5210 ResourceMark rm;
5211 HandleMark hm; 5211 HandleMark hm;
5212 5212
5213 G1ParScanThreadState pss(_g1h, i); 5213 G1ParScanThreadState pss(_g1h, worker_id);
5214 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL); 5214 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
5215 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); 5215 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5216 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL); 5216 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
5217 5217
5218 pss.set_evac_closure(&scan_evac_cl); 5218 pss.set_evac_closure(&scan_evac_cl);
5244 // to be copied. 5244 // to be copied.
5245 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss); 5245 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
5246 5246
5247 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 5247 ReferenceProcessor* rp = _g1h->ref_processor_cm();
5248 5248
5249 int limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q(); 5249 uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5250 int stride = MIN2(MAX2(_n_workers, 1), limit); 5250 uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5251 5251
5252 // limit is set using max_num_q() - which was set using ParallelGCThreads. 5252 // limit is set using max_num_q() - which was set using ParallelGCThreads.
5253 // So this must be true - but assert just in case someone decides to 5253 // So this must be true - but assert just in case someone decides to
5254 // change the worker ids. 5254 // change the worker ids.
5255 assert(0 <= i && i < limit, "sanity"); 5255 assert(0 <= worker_id && worker_id < limit, "sanity");
5256 assert(!rp->discovery_is_atomic(), "check this code"); 5256 assert(!rp->discovery_is_atomic(), "check this code");
5257 5257
5258 // Select discovered lists [i, i+stride, i+2*stride,...,limit) 5258 // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5259 for (int idx = i; idx < limit; idx += stride) { 5259 for (uint idx = worker_id; idx < limit; idx += stride) {
5260 DiscoveredList& ref_list = rp->discovered_refs()[idx]; 5260 DiscoveredList& ref_list = rp->discovered_refs()[idx];
5261 5261
5262 DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive); 5262 DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5263 while (iter.has_next()) { 5263 while (iter.has_next()) {
5264 // Since discovery is not atomic for the CM ref processor, we 5264 // Since discovery is not atomic for the CM ref processor, we
5308 // We also need to do this copying before we process the reference 5308 // We also need to do this copying before we process the reference
5309 // objects discovered by the STW ref processor in case one of these 5309 // objects discovered by the STW ref processor in case one of these
5310 // referents points to another object which is also referenced by an 5310 // referents points to another object which is also referenced by an
5311 // object discovered by the STW ref processor. 5311 // object discovered by the STW ref processor.
5312 5312
5313 int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? 5313 uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5314 workers()->active_workers() : 1); 5314 workers()->active_workers() : 1);
5315 5315
5316 assert(!G1CollectedHeap::use_parallel_gc_threads() || 5316 assert(!G1CollectedHeap::use_parallel_gc_threads() ||
5317 active_workers == workers()->active_workers(), 5317 active_workers == workers()->active_workers(),
5318 "Need to reset active_workers"); 5318 "Need to reset active_workers");
5414 // Serial reference processing... 5414 // Serial reference processing...
5415 rp->enqueue_discovered_references(); 5415 rp->enqueue_discovered_references();
5416 } else { 5416 } else {
5417 // Parallel reference enqueuing 5417 // Parallel reference enqueuing
5418 5418
5419 int active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1); 5419 uint active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1);
5420 assert(active_workers == workers()->active_workers(), 5420 assert(active_workers == workers()->active_workers(),
5421 "Need to reset active_workers"); 5421 "Need to reset active_workers");
5422 assert(rp->num_q() == active_workers, "sanity"); 5422 assert(rp->num_q() == active_workers, "sanity");
5423 assert(active_workers <= rp->max_num_q(), "sanity"); 5423 assert(active_workers <= rp->max_num_q(), "sanity");
5424 5424
5443 5443
5444 g1_rem_set()->prepare_for_oops_into_collection_set_do(); 5444 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5445 concurrent_g1_refine()->set_use_cache(false); 5445 concurrent_g1_refine()->set_use_cache(false);
5446 concurrent_g1_refine()->clear_hot_cache_claimed_index(); 5446 concurrent_g1_refine()->clear_hot_cache_claimed_index();
5447 5447
5448 int n_workers; 5448 uint n_workers;
5449 if (G1CollectedHeap::use_parallel_gc_threads()) { 5449 if (G1CollectedHeap::use_parallel_gc_threads()) {
5450 n_workers = 5450 n_workers =
5451 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), 5451 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5452 workers()->active_workers(), 5452 workers()->active_workers(),
5453 Threads::number_of_non_daemon_threads()); 5453 Threads::number_of_non_daemon_threads());
5656 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, 5656 G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
5657 G1CollectedHeap* g1h) : 5657 G1CollectedHeap* g1h) :
5658 AbstractGangTask("G1 Par Cleanup CT Task"), 5658 AbstractGangTask("G1 Par Cleanup CT Task"),
5659 _ct_bs(ct_bs), _g1h(g1h) { } 5659 _ct_bs(ct_bs), _g1h(g1h) { }
5660 5660
5661 void work(int i) { 5661 void work(uint worker_id) {
5662 HeapRegion* r; 5662 HeapRegion* r;
5663 while (r = _g1h->pop_dirty_cards_region()) { 5663 while (r = _g1h->pop_dirty_cards_region()) {
5664 clear_cards(r); 5664 clear_cards(r);
5665 } 5665 }
5666 } 5666 }
6139 6139
6140 void G1CollectedHeap::set_par_threads() { 6140 void G1CollectedHeap::set_par_threads() {
6141 // Don't change the number of workers. Use the value previously set 6141 // Don't change the number of workers. Use the value previously set
6142 // in the workgroup. 6142 // in the workgroup.
6143 assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise"); 6143 assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
6144 int n_workers = workers()->active_workers(); 6144 uint n_workers = workers()->active_workers();
6145 assert(UseDynamicNumberOfGCThreads || 6145 assert(UseDynamicNumberOfGCThreads ||
6146 n_workers == workers()->total_workers(), 6146 n_workers == workers()->total_workers(),
6147 "Otherwise should be using the total number of workers"); 6147 "Otherwise should be using the total number of workers");
6148 if (n_workers == 0) { 6148 if (n_workers == 0) {
6149 assert(false, "Should have been set in prior evacuation pause."); 6149 assert(false, "Should have been set in prior evacuation pause.");