comparison src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 4728:441e946dc1af

7121618: Change type of number of GC workers to unsigned int. Summary: Change variables representing the number of GC workers to uint from int and size_t. Change the parameter in work(int i) to work(uint worker_id). Reviewed-by: brutisso, tonyp
author jmasa
date Wed, 14 Dec 2011 13:34:57 -0800
parents adedfbbf0360
children 776173fc2df9
comparison
equal deleted inserted replaced
4727:67fdcb391461 4728:441e946dc1af
456 456
457 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 457 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
458 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 458 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
459 #endif // _MSC_VER 459 #endif // _MSC_VER
460 460
461 size_t ConcurrentMark::scale_parallel_threads(size_t n_par_threads) { 461 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
462 return MAX2((n_par_threads + 2) / 4, (size_t)1); 462 return MAX2((n_par_threads + 2) / 4, 1U);
463 } 463 }
464 464
465 ConcurrentMark::ConcurrentMark(ReservedSpace rs, 465 ConcurrentMark::ConcurrentMark(ReservedSpace rs,
466 int max_regions) : 466 int max_regions) :
467 _markBitMap1(rs, MinObjAlignment - 1), 467 _markBitMap1(rs, MinObjAlignment - 1),
484 484
485 _markStack(this), 485 _markStack(this),
486 _regionStack(), 486 _regionStack(),
487 // _finger set in set_non_marking_state 487 // _finger set in set_non_marking_state
488 488
489 _max_task_num(MAX2(ParallelGCThreads, (size_t)1)), 489 _max_task_num(MAX2((uint)ParallelGCThreads, 1U)),
490 // _active_tasks set in set_non_marking_state 490 // _active_tasks set in set_non_marking_state
491 // _tasks set inside the constructor 491 // _tasks set inside the constructor
492 _task_queues(new CMTaskQueueSet((int) _max_task_num)), 492 _task_queues(new CMTaskQueueSet((int) _max_task_num)),
493 _terminator(ParallelTaskTerminator((int) _max_task_num, _task_queues)), 493 _terminator(ParallelTaskTerminator((int) _max_task_num, _task_queues)),
494 494
504 _init_times(), 504 _init_times(),
505 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 505 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
506 _cleanup_times(), 506 _cleanup_times(),
507 _total_counting_time(0.0), 507 _total_counting_time(0.0),
508 _total_rs_scrub_time(0.0), 508 _total_rs_scrub_time(0.0),
509
510 _parallel_workers(NULL) { 509 _parallel_workers(NULL) {
511 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 510 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
512 if (verbose_level < no_verbose) { 511 if (verbose_level < no_verbose) {
513 verbose_level = no_verbose; 512 verbose_level = no_verbose;
514 } 513 }
566 } else { 565 } else {
567 if (ConcGCThreads > 0) { 566 if (ConcGCThreads > 0) {
568 // notice that ConcGCThreads overwrites G1MarkingOverheadPercent 567 // notice that ConcGCThreads overwrites G1MarkingOverheadPercent
569 // if both are set 568 // if both are set
570 569
571 _parallel_marking_threads = ConcGCThreads; 570 _parallel_marking_threads = (uint) ConcGCThreads;
572 _max_parallel_marking_threads = _parallel_marking_threads; 571 _max_parallel_marking_threads = _parallel_marking_threads;
573 _sleep_factor = 0.0; 572 _sleep_factor = 0.0;
574 _marking_task_overhead = 1.0; 573 _marking_task_overhead = 1.0;
575 } else if (G1MarkingOverheadPercent > 0) { 574 } else if (G1MarkingOverheadPercent > 0) {
576 // we will calculate the number of parallel marking threads 575 // we will calculate the number of parallel marking threads
587 overall_cm_overhead / marking_thread_num * 586 overall_cm_overhead / marking_thread_num *
588 (double) os::processor_count(); 587 (double) os::processor_count();
589 double sleep_factor = 588 double sleep_factor =
590 (1.0 - marking_task_overhead) / marking_task_overhead; 589 (1.0 - marking_task_overhead) / marking_task_overhead;
591 590
592 _parallel_marking_threads = (size_t) marking_thread_num; 591 _parallel_marking_threads = (uint) marking_thread_num;
593 _max_parallel_marking_threads = _parallel_marking_threads; 592 _max_parallel_marking_threads = _parallel_marking_threads;
594 _sleep_factor = sleep_factor; 593 _sleep_factor = sleep_factor;
595 _marking_task_overhead = marking_task_overhead; 594 _marking_task_overhead = marking_task_overhead;
596 } else { 595 } else {
597 _parallel_marking_threads = scale_parallel_threads(ParallelGCThreads); 596 _parallel_marking_threads = scale_parallel_threads((uint)ParallelGCThreads);
598 _max_parallel_marking_threads = _parallel_marking_threads; 597 _max_parallel_marking_threads = _parallel_marking_threads;
599 _sleep_factor = 0.0; 598 _sleep_factor = 0.0;
600 _marking_task_overhead = 1.0; 599 _marking_task_overhead = 1.0;
601 } 600 }
602 601
616 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 615 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor());
617 #endif 616 #endif
618 617
619 guarantee(parallel_marking_threads() > 0, "peace of mind"); 618 guarantee(parallel_marking_threads() > 0, "peace of mind");
620 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", 619 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
621 (int) _max_parallel_marking_threads, false, true); 620 _max_parallel_marking_threads, false, true);
622 if (_parallel_workers == NULL) { 621 if (_parallel_workers == NULL) {
623 vm_exit_during_initialization("Failed necessary allocation."); 622 vm_exit_during_initialization("Failed necessary allocation.");
624 } else { 623 } else {
625 _parallel_workers->initialize_workers(); 624 _parallel_workers->initialize_workers();
626 } 625 }
689 // we need this to make sure that the flag is on during the evac 688 // we need this to make sure that the flag is on during the evac
690 // pause with initial mark piggy-backed 689 // pause with initial mark piggy-backed
691 set_concurrent_marking_in_progress(); 690 set_concurrent_marking_in_progress();
692 } 691 }
693 692
694 void ConcurrentMark::set_phase(size_t active_tasks, bool concurrent) { 693 void ConcurrentMark::set_phase(uint active_tasks, bool concurrent) {
695 assert(active_tasks <= _max_task_num, "we should not have more"); 694 assert(active_tasks <= _max_task_num, "we should not have more");
696 695
697 _active_tasks = active_tasks; 696 _active_tasks = active_tasks;
698 // Need to update the three data structures below according to the 697 // Need to update the three data structures below according to the
699 // number of active threads for this phase. 698 // number of active threads for this phase.
1046 private: 1045 private:
1047 ConcurrentMark* _cm; 1046 ConcurrentMark* _cm;
1048 ConcurrentMarkThread* _cmt; 1047 ConcurrentMarkThread* _cmt;
1049 1048
1050 public: 1049 public:
1051 void work(int worker_i) { 1050 void work(uint worker_id) {
1052 assert(Thread::current()->is_ConcurrentGC_thread(), 1051 assert(Thread::current()->is_ConcurrentGC_thread(),
1053 "this should only be done by a conc GC thread"); 1052 "this should only be done by a conc GC thread");
1054 ResourceMark rm; 1053 ResourceMark rm;
1055 1054
1056 double start_vtime = os::elapsedVTime(); 1055 double start_vtime = os::elapsedVTime();
1057 1056
1058 ConcurrentGCThread::stsJoin(); 1057 ConcurrentGCThread::stsJoin();
1059 1058
1060 assert((size_t) worker_i < _cm->active_tasks(), "invariant"); 1059 assert(worker_id < _cm->active_tasks(), "invariant");
1061 CMTask* the_task = _cm->task(worker_i); 1060 CMTask* the_task = _cm->task(worker_id);
1062 the_task->record_start_time(); 1061 the_task->record_start_time();
1063 if (!_cm->has_aborted()) { 1062 if (!_cm->has_aborted()) {
1064 do { 1063 do {
1065 double start_vtime_sec = os::elapsedVTime(); 1064 double start_vtime_sec = os::elapsedVTime();
1066 double start_time_sec = os::elapsedTime(); 1065 double start_time_sec = os::elapsedTime();
1074 double end_vtime_sec = os::elapsedVTime(); 1073 double end_vtime_sec = os::elapsedVTime();
1075 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1074 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1076 double elapsed_time_sec = end_time_sec - start_time_sec; 1075 double elapsed_time_sec = end_time_sec - start_time_sec;
1077 _cm->clear_has_overflown(); 1076 _cm->clear_has_overflown();
1078 1077
1079 bool ret = _cm->do_yield_check(worker_i); 1078 bool ret = _cm->do_yield_check(worker_id);
1080 1079
1081 jlong sleep_time_ms; 1080 jlong sleep_time_ms;
1082 if (!_cm->has_aborted() && the_task->has_aborted()) { 1081 if (!_cm->has_aborted() && the_task->has_aborted()) {
1083 sleep_time_ms = 1082 sleep_time_ms =
1084 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1083 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1103 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1102 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1104 1103
1105 ConcurrentGCThread::stsLeave(); 1104 ConcurrentGCThread::stsLeave();
1106 1105
1107 double end_vtime = os::elapsedVTime(); 1106 double end_vtime = os::elapsedVTime();
1108 _cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime); 1107 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1109 } 1108 }
1110 1109
1111 CMConcurrentMarkingTask(ConcurrentMark* cm, 1110 CMConcurrentMarkingTask(ConcurrentMark* cm,
1112 ConcurrentMarkThread* cmt) : 1111 ConcurrentMarkThread* cmt) :
1113 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1112 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1115 ~CMConcurrentMarkingTask() { } 1114 ~CMConcurrentMarkingTask() { }
1116 }; 1115 };
1117 1116
1118 // Calculates the number of active workers for a concurrent 1117 // Calculates the number of active workers for a concurrent
1119 // phase. 1118 // phase.
1120 size_t ConcurrentMark::calc_parallel_marking_threads() { 1119 uint ConcurrentMark::calc_parallel_marking_threads() {
1121 if (G1CollectedHeap::use_parallel_gc_threads()) { 1120 if (G1CollectedHeap::use_parallel_gc_threads()) {
1122 size_t n_conc_workers = 0; 1121 uint n_conc_workers = 0;
1123 if (!UseDynamicNumberOfGCThreads || 1122 if (!UseDynamicNumberOfGCThreads ||
1124 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1123 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1125 !ForceDynamicNumberOfGCThreads)) { 1124 !ForceDynamicNumberOfGCThreads)) {
1126 n_conc_workers = max_parallel_marking_threads(); 1125 n_conc_workers = max_parallel_marking_threads();
1127 } else { 1126 } else {
1157 // _g1h has _n_par_threads 1156 // _g1h has _n_par_threads
1158 _parallel_marking_threads = calc_parallel_marking_threads(); 1157 _parallel_marking_threads = calc_parallel_marking_threads();
1159 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1158 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1160 "Maximum number of marking threads exceeded"); 1159 "Maximum number of marking threads exceeded");
1161 1160
1162 size_t active_workers = MAX2((size_t) 1, parallel_marking_threads()); 1161 uint active_workers = MAX2(1U, parallel_marking_threads());
1163 1162
1164 // Parallel task terminator is set in "set_phase()" 1163 // Parallel task terminator is set in "set_phase()"
1165 set_phase(active_workers, true /* concurrent */); 1164 set_phase(active_workers, true /* concurrent */);
1166 1165
1167 CMConcurrentMarkingTask markingTask(this, cmThread()); 1166 CMConcurrentMarkingTask markingTask(this, cmThread());
1501 1500
1502 class G1ParFinalCountTask: public AbstractGangTask { 1501 class G1ParFinalCountTask: public AbstractGangTask {
1503 protected: 1502 protected:
1504 G1CollectedHeap* _g1h; 1503 G1CollectedHeap* _g1h;
1505 CMBitMap* _bm; 1504 CMBitMap* _bm;
1506 size_t _n_workers; 1505 uint _n_workers;
1507 size_t *_live_bytes; 1506 size_t *_live_bytes;
1508 size_t *_used_bytes; 1507 size_t *_used_bytes;
1509 BitMap* _region_bm; 1508 BitMap* _region_bm;
1510 BitMap* _card_bm; 1509 BitMap* _card_bm;
1511 public: 1510 public:
1533 ~G1ParFinalCountTask() { 1532 ~G1ParFinalCountTask() {
1534 FREE_C_HEAP_ARRAY(size_t, _live_bytes); 1533 FREE_C_HEAP_ARRAY(size_t, _live_bytes);
1535 FREE_C_HEAP_ARRAY(size_t, _used_bytes); 1534 FREE_C_HEAP_ARRAY(size_t, _used_bytes);
1536 } 1535 }
1537 1536
1538 void work(int i) { 1537 void work(uint worker_id) {
1539 CalcLiveObjectsClosure calccl(true /*final*/, 1538 CalcLiveObjectsClosure calccl(true /*final*/,
1540 _bm, _g1h->concurrent_mark(), 1539 _bm, _g1h->concurrent_mark(),
1541 _region_bm, _card_bm); 1540 _region_bm, _card_bm);
1542 calccl.no_yield(); 1541 calccl.no_yield();
1543 if (G1CollectedHeap::use_parallel_gc_threads()) { 1542 if (G1CollectedHeap::use_parallel_gc_threads()) {
1544 _g1h->heap_region_par_iterate_chunked(&calccl, i, 1543 _g1h->heap_region_par_iterate_chunked(&calccl, worker_id,
1545 (int) _n_workers, 1544 (int) _n_workers,
1546 HeapRegion::FinalCountClaimValue); 1545 HeapRegion::FinalCountClaimValue);
1547 } else { 1546 } else {
1548 _g1h->heap_region_iterate(&calccl); 1547 _g1h->heap_region_iterate(&calccl);
1549 } 1548 }
1550 assert(calccl.complete(), "Shouldn't have yielded!"); 1549 assert(calccl.complete(), "Shouldn't have yielded!");
1551 1550
1552 assert((size_t) i < _n_workers, "invariant"); 1551 assert(worker_id < _n_workers, "invariant");
1553 _live_bytes[i] = calccl.tot_live(); 1552 _live_bytes[worker_id] = calccl.tot_live();
1554 _used_bytes[i] = calccl.tot_used(); 1553 _used_bytes[worker_id] = calccl.tot_used();
1555 } 1554 }
1556 size_t live_bytes() { 1555 size_t live_bytes() {
1557 size_t live_bytes = 0; 1556 size_t live_bytes = 0;
1558 for (size_t i = 0; i < _n_workers; ++i) 1557 for (uint i = 0; i < _n_workers; ++i)
1559 live_bytes += _live_bytes[i]; 1558 live_bytes += _live_bytes[i];
1560 return live_bytes; 1559 return live_bytes;
1561 } 1560 }
1562 size_t used_bytes() { 1561 size_t used_bytes() {
1563 size_t used_bytes = 0; 1562 size_t used_bytes = 0;
1564 for (size_t i = 0; i < _n_workers; ++i) 1563 for (uint i = 0; i < _n_workers; ++i)
1565 used_bytes += _used_bytes[i]; 1564 used_bytes += _used_bytes[i];
1566 return used_bytes; 1565 return used_bytes;
1567 } 1566 }
1568 }; 1567 };
1569 1568
1644 G1ParNoteEndTask(G1CollectedHeap* g1h, 1643 G1ParNoteEndTask(G1CollectedHeap* g1h,
1645 FreeRegionList* cleanup_list) : 1644 FreeRegionList* cleanup_list) :
1646 AbstractGangTask("G1 note end"), _g1h(g1h), 1645 AbstractGangTask("G1 note end"), _g1h(g1h),
1647 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { } 1646 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
1648 1647
1649 void work(int i) { 1648 void work(uint worker_id) {
1650 double start = os::elapsedTime(); 1649 double start = os::elapsedTime();
1651 FreeRegionList local_cleanup_list("Local Cleanup List"); 1650 FreeRegionList local_cleanup_list("Local Cleanup List");
1652 OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set"); 1651 OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set");
1653 HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set"); 1652 HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
1654 HRRSCleanupTask hrrs_cleanup_task; 1653 HRRSCleanupTask hrrs_cleanup_task;
1655 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i, &local_cleanup_list, 1654 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
1656 &old_proxy_set, 1655 &old_proxy_set,
1657 &humongous_proxy_set, 1656 &humongous_proxy_set,
1658 &hrrs_cleanup_task); 1657 &hrrs_cleanup_task);
1659 if (G1CollectedHeap::use_parallel_gc_threads()) { 1658 if (G1CollectedHeap::use_parallel_gc_threads()) {
1660 _g1h->heap_region_par_iterate_chunked(&g1_note_end, i, 1659 _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
1661 _g1h->workers()->active_workers(), 1660 _g1h->workers()->active_workers(),
1662 HeapRegion::NoteEndClaimValue); 1661 HeapRegion::NoteEndClaimValue);
1663 } else { 1662 } else {
1664 _g1h->heap_region_iterate(&g1_note_end); 1663 _g1h->heap_region_iterate(&g1_note_end);
1665 } 1664 }
1699 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1698 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1700 } 1699 }
1701 double end = os::elapsedTime(); 1700 double end = os::elapsedTime();
1702 if (G1PrintParCleanupStats) { 1701 if (G1PrintParCleanupStats) {
1703 gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] " 1702 gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] "
1704 "claimed %d regions (tot = %8.3f ms, max = %8.3f ms).\n", 1703 "claimed %u regions (tot = %8.3f ms, max = %8.3f ms).\n",
1705 i, start, end, (end-start)*1000.0, 1704 worker_id, start, end, (end-start)*1000.0,
1706 g1_note_end.regions_claimed(), 1705 g1_note_end.regions_claimed(),
1707 g1_note_end.claimed_region_time_sec()*1000.0, 1706 g1_note_end.claimed_region_time_sec()*1000.0,
1708 g1_note_end.max_region_time_sec()*1000.0); 1707 g1_note_end.max_region_time_sec()*1000.0);
1709 } 1708 }
1710 } 1709 }
1722 BitMap* region_bm, BitMap* card_bm) : 1721 BitMap* region_bm, BitMap* card_bm) :
1723 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), 1722 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
1724 _region_bm(region_bm), _card_bm(card_bm) 1723 _region_bm(region_bm), _card_bm(card_bm)
1725 {} 1724 {}
1726 1725
1727 void work(int i) { 1726 void work(uint worker_id) {
1728 if (G1CollectedHeap::use_parallel_gc_threads()) { 1727 if (G1CollectedHeap::use_parallel_gc_threads()) {
1729 _g1rs->scrub_par(_region_bm, _card_bm, i, 1728 _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
1730 HeapRegion::ScrubRemSetClaimValue); 1729 HeapRegion::ScrubRemSetClaimValue);
1731 } else { 1730 } else {
1732 _g1rs->scrub(_region_bm, _card_bm); 1731 _g1rs->scrub(_region_bm, _card_bm);
1733 } 1732 }
1734 } 1733 }
1764 1763
1765 double start = os::elapsedTime(); 1764 double start = os::elapsedTime();
1766 1765
1767 HeapRegionRemSet::reset_for_cleanup_tasks(); 1766 HeapRegionRemSet::reset_for_cleanup_tasks();
1768 1767
1769 size_t n_workers; 1768 uint n_workers;
1770 1769
1771 // Do counting once more with the world stopped for good measure. 1770 // Do counting once more with the world stopped for good measure.
1772 G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), 1771 G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
1773 &_region_bm, &_card_bm); 1772 &_region_bm, &_card_bm);
1774 if (G1CollectedHeap::use_parallel_gc_threads()) { 1773 if (G1CollectedHeap::use_parallel_gc_threads()) {
1776 HeapRegion::InitialClaimValue), 1775 HeapRegion::InitialClaimValue),
1777 "sanity check"); 1776 "sanity check");
1778 1777
1779 g1h->set_par_threads(); 1778 g1h->set_par_threads();
1780 n_workers = g1h->n_par_threads(); 1779 n_workers = g1h->n_par_threads();
1781 assert(g1h->n_par_threads() == (int) n_workers, 1780 assert(g1h->n_par_threads() == n_workers,
1782 "Should not have been reset"); 1781 "Should not have been reset");
1783 g1h->workers()->run_task(&g1_par_count_task); 1782 g1h->workers()->run_task(&g1_par_count_task);
1784 // Done with the parallel phase so reset to 0. 1783 // Done with the parallel phase so reset to 0.
1785 g1h->set_par_threads(0); 1784 g1h->set_par_threads(0);
1786 1785
2167 G1CollectedHeap* g1h, 2166 G1CollectedHeap* g1h,
2168 ConcurrentMark* cm) : 2167 ConcurrentMark* cm) :
2169 AbstractGangTask("Process reference objects in parallel"), 2168 AbstractGangTask("Process reference objects in parallel"),
2170 _proc_task(proc_task), _g1h(g1h), _cm(cm) { } 2169 _proc_task(proc_task), _g1h(g1h), _cm(cm) { }
2171 2170
2172 virtual void work(int i) { 2171 virtual void work(uint worker_id) {
2173 CMTask* marking_task = _cm->task(i); 2172 CMTask* marking_task = _cm->task(worker_id);
2174 G1CMIsAliveClosure g1_is_alive(_g1h); 2173 G1CMIsAliveClosure g1_is_alive(_g1h);
2175 G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task); 2174 G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task);
2176 G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task); 2175 G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
2177 2176
2178 _proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2177 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2179 } 2178 }
2180 }; 2179 };
2181 2180
2182 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2181 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2183 assert(_workers != NULL, "Need parallel worker threads."); 2182 assert(_workers != NULL, "Need parallel worker threads.");
2199 public: 2198 public:
2200 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2199 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2201 AbstractGangTask("Enqueue reference objects in parallel"), 2200 AbstractGangTask("Enqueue reference objects in parallel"),
2202 _enq_task(enq_task) { } 2201 _enq_task(enq_task) { }
2203 2202
2204 virtual void work(int i) { 2203 virtual void work(uint worker_id) {
2205 _enq_task.work(i); 2204 _enq_task.work(worker_id);
2206 } 2205 }
2207 }; 2206 };
2208 2207
2209 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2208 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2210 assert(_workers != NULL, "Need parallel worker threads."); 2209 assert(_workers != NULL, "Need parallel worker threads.");
2247 G1CMDrainMarkingStackClosure 2246 G1CMDrainMarkingStackClosure
2248 g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive); 2247 g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
2249 2248
2250 // We use the work gang from the G1CollectedHeap and we utilize all 2249 // We use the work gang from the G1CollectedHeap and we utilize all
2251 // the worker threads. 2250 // the worker threads.
2252 int active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1; 2251 uint active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1U;
2253 active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1); 2252 active_workers = MAX2(MIN2(active_workers, _max_task_num), 1U);
2254 2253
2255 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2254 G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2256 g1h->workers(), active_workers); 2255 g1h->workers(), active_workers);
2257 2256
2258 if (rp->processing_is_mt()) { 2257 if (rp->processing_is_mt()) {
2312 class CMRemarkTask: public AbstractGangTask { 2311 class CMRemarkTask: public AbstractGangTask {
2313 private: 2312 private:
2314 ConcurrentMark *_cm; 2313 ConcurrentMark *_cm;
2315 2314
2316 public: 2315 public:
2317 void work(int worker_i) { 2316 void work(uint worker_id) {
2318 // Since all available tasks are actually started, we should 2317 // Since all available tasks are actually started, we should
2319 // only proceed if we're supposed to be actived. 2318 // only proceed if we're supposed to be actived.
2320 if ((size_t)worker_i < _cm->active_tasks()) { 2319 if (worker_id < _cm->active_tasks()) {
2321 CMTask* task = _cm->task(worker_i); 2320 CMTask* task = _cm->task(worker_id);
2322 task->record_start_time(); 2321 task->record_start_time();
2323 do { 2322 do {
2324 task->do_marking_step(1000000000.0 /* something very large */, 2323 task->do_marking_step(1000000000.0 /* something very large */,
2325 true /* do_stealing */, 2324 true /* do_stealing */,
2326 true /* do_termination */); 2325 true /* do_termination */);
2345 g1h->ensure_parsability(false); 2344 g1h->ensure_parsability(false);
2346 2345
2347 if (G1CollectedHeap::use_parallel_gc_threads()) { 2346 if (G1CollectedHeap::use_parallel_gc_threads()) {
2348 G1CollectedHeap::StrongRootsScope srs(g1h); 2347 G1CollectedHeap::StrongRootsScope srs(g1h);
2349 // this is remark, so we'll use up all active threads 2348 // this is remark, so we'll use up all active threads
2350 int active_workers = g1h->workers()->active_workers(); 2349 uint active_workers = g1h->workers()->active_workers();
2351 if (active_workers == 0) { 2350 if (active_workers == 0) {
2352 assert(active_workers > 0, "Should have been set earlier"); 2351 assert(active_workers > 0, "Should have been set earlier");
2353 active_workers = ParallelGCThreads; 2352 active_workers = (uint) ParallelGCThreads;
2354 g1h->workers()->set_active_workers(active_workers); 2353 g1h->workers()->set_active_workers(active_workers);
2355 } 2354 }
2356 set_phase(active_workers, false /* concurrent */); 2355 set_phase(active_workers, false /* concurrent */);
2357 // Leave _parallel_marking_threads at it's 2356 // Leave _parallel_marking_threads at it's
2358 // value originally calculated in the ConcurrentMark 2357 // value originally calculated in the ConcurrentMark
2364 g1h->workers()->run_task(&remarkTask); 2363 g1h->workers()->run_task(&remarkTask);
2365 g1h->set_par_threads(0); 2364 g1h->set_par_threads(0);
2366 } else { 2365 } else {
2367 G1CollectedHeap::StrongRootsScope srs(g1h); 2366 G1CollectedHeap::StrongRootsScope srs(g1h);
2368 // this is remark, so we'll use up all available threads 2367 // this is remark, so we'll use up all available threads
2369 int active_workers = 1; 2368 uint active_workers = 1;
2370 set_phase(active_workers, false /* concurrent */); 2369 set_phase(active_workers, false /* concurrent */);
2371 2370
2372 CMRemarkTask remarkTask(this, active_workers); 2371 CMRemarkTask remarkTask(this, active_workers);
2373 // We will start all available threads, even if we decide that the 2372 // We will start all available threads, even if we decide that the
2374 // active_workers will be fewer. The extra ones will just bail out 2373 // active_workers will be fewer. The extra ones will just bail out
2919 oop* _ms; 2918 oop* _ms;
2920 jint* _array_ind_stack; 2919 jint* _array_ind_stack;
2921 int _ms_size; 2920 int _ms_size;
2922 int _ms_ind; 2921 int _ms_ind;
2923 int _array_increment; 2922 int _array_increment;
2924 int _worker_i; 2923 uint _worker_id;
2925 2924
2926 bool push(oop obj, int arr_ind = 0) { 2925 bool push(oop obj, int arr_ind = 0) {
2927 if (_ms_ind == _ms_size) { 2926 if (_ms_ind == _ms_size) {
2928 gclog_or_tty->print_cr("Mark stack is full."); 2927 gclog_or_tty->print_cr("Mark stack is full.");
2929 return false; 2928 return false;
2969 } 2968 }
2970 return true; 2969 return true;
2971 } 2970 }
2972 2971
2973 public: 2972 public:
2974 CSetMarkOopClosure(ConcurrentMark* cm, int ms_size, int worker_i) : 2973 CSetMarkOopClosure(ConcurrentMark* cm, int ms_size, uint worker_id) :
2975 _g1h(G1CollectedHeap::heap()), 2974 _g1h(G1CollectedHeap::heap()),
2976 _cm(cm), 2975 _cm(cm),
2977 _bm(cm->nextMarkBitMap()), 2976 _bm(cm->nextMarkBitMap()),
2978 _ms_size(ms_size), _ms_ind(0), 2977 _ms_size(ms_size), _ms_ind(0),
2979 _ms(NEW_C_HEAP_ARRAY(oop, ms_size)), 2978 _ms(NEW_C_HEAP_ARRAY(oop, ms_size)),
2980 _array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)), 2979 _array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)),
2981 _array_increment(MAX2(ms_size/8, 16)), 2980 _array_increment(MAX2(ms_size/8, 16)),
2982 _worker_i(worker_i) { } 2981 _worker_id(worker_id) { }
2983 2982
2984 ~CSetMarkOopClosure() { 2983 ~CSetMarkOopClosure() {
2985 FREE_C_HEAP_ARRAY(oop, _ms); 2984 FREE_C_HEAP_ARRAY(oop, _ms);
2986 FREE_C_HEAP_ARRAY(jint, _array_ind_stack); 2985 FREE_C_HEAP_ARRAY(jint, _array_ind_stack);
2987 } 2986 }
3022 class CSetMarkBitMapClosure: public BitMapClosure { 3021 class CSetMarkBitMapClosure: public BitMapClosure {
3023 G1CollectedHeap* _g1h; 3022 G1CollectedHeap* _g1h;
3024 CMBitMap* _bitMap; 3023 CMBitMap* _bitMap;
3025 ConcurrentMark* _cm; 3024 ConcurrentMark* _cm;
3026 CSetMarkOopClosure _oop_cl; 3025 CSetMarkOopClosure _oop_cl;
3027 int _worker_i; 3026 uint _worker_id;
3028 3027
3029 public: 3028 public:
3030 CSetMarkBitMapClosure(ConcurrentMark* cm, int ms_size, int worker_i) : 3029 CSetMarkBitMapClosure(ConcurrentMark* cm, int ms_size, int worker_id) :
3031 _g1h(G1CollectedHeap::heap()), 3030 _g1h(G1CollectedHeap::heap()),
3032 _bitMap(cm->nextMarkBitMap()), 3031 _bitMap(cm->nextMarkBitMap()),
3033 _oop_cl(cm, ms_size, worker_i), 3032 _oop_cl(cm, ms_size, worker_id),
3034 _worker_i(worker_i) { } 3033 _worker_id(worker_id) { }
3035 3034
3036 bool do_bit(size_t offset) { 3035 bool do_bit(size_t offset) {
3037 // convert offset into a HeapWord* 3036 // convert offset into a HeapWord*
3038 HeapWord* addr = _bitMap->offsetToHeapWord(offset); 3037 HeapWord* addr = _bitMap->offsetToHeapWord(offset);
3039 assert(_bitMap->endWord() && addr < _bitMap->endWord(), 3038 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
3054 }; 3053 };
3055 3054
3056 class CompleteMarkingInCSetHRClosure: public HeapRegionClosure { 3055 class CompleteMarkingInCSetHRClosure: public HeapRegionClosure {
3057 CMBitMap* _bm; 3056 CMBitMap* _bm;
3058 CSetMarkBitMapClosure _bit_cl; 3057 CSetMarkBitMapClosure _bit_cl;
3059 int _worker_i; 3058 uint _worker_id;
3060 3059
3061 enum SomePrivateConstants { 3060 enum SomePrivateConstants {
3062 MSSize = 1000 3061 MSSize = 1000
3063 }; 3062 };
3064 3063
3065 public: 3064 public:
3066 CompleteMarkingInCSetHRClosure(ConcurrentMark* cm, int worker_i) : 3065 CompleteMarkingInCSetHRClosure(ConcurrentMark* cm, int worker_id) :
3067 _bm(cm->nextMarkBitMap()), 3066 _bm(cm->nextMarkBitMap()),
3068 _bit_cl(cm, MSSize, worker_i), 3067 _bit_cl(cm, MSSize, worker_id),
3069 _worker_i(worker_i) { } 3068 _worker_id(worker_id) { }
3070 3069
3071 bool doHeapRegion(HeapRegion* hr) { 3070 bool doHeapRegion(HeapRegion* hr) {
3072 if (hr->claimHeapRegion(HeapRegion::CompleteMarkCSetClaimValue)) { 3071 if (hr->claimHeapRegion(HeapRegion::CompleteMarkCSetClaimValue)) {
3073 // The current worker has successfully claimed the region. 3072 // The current worker has successfully claimed the region.
3074 if (!hr->evacuation_failed()) { 3073 if (!hr->evacuation_failed()) {
3107 G1ParCompleteMarkInCSetTask(G1CollectedHeap* g1h, 3106 G1ParCompleteMarkInCSetTask(G1CollectedHeap* g1h,
3108 ConcurrentMark* cm) : 3107 ConcurrentMark* cm) :
3109 AbstractGangTask("Complete Mark in CSet"), 3108 AbstractGangTask("Complete Mark in CSet"),
3110 _g1h(g1h), _cm(cm) { } 3109 _g1h(g1h), _cm(cm) { }
3111 3110
3112 void work(int worker_i) { 3111 void work(uint worker_id) {
3113 CompleteMarkingInCSetHRClosure cmplt(_cm, worker_i); 3112 CompleteMarkingInCSetHRClosure cmplt(_cm, worker_id);
3114 HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_i); 3113 HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
3115 _g1h->collection_set_iterate_from(hr, &cmplt); 3114 _g1h->collection_set_iterate_from(hr, &cmplt);
3116 } 3115 }
3117 }; 3116 };
3118 3117
3119 void ConcurrentMark::complete_marking_in_collection_set() { 3118 void ConcurrentMark::complete_marking_in_collection_set() {
3305 3304
3306 // This closure is used to mark refs into the CMS generation in 3305 // This closure is used to mark refs into the CMS generation in
3307 // the CMS bit map. Called at the first checkpoint. 3306 // the CMS bit map. Called at the first checkpoint.
3308 3307
3309 // We take a break if someone is trying to stop the world. 3308 // We take a break if someone is trying to stop the world.
3310 bool ConcurrentMark::do_yield_check(int worker_i) { 3309 bool ConcurrentMark::do_yield_check(uint worker_id) {
3311 if (should_yield()) { 3310 if (should_yield()) {
3312 if (worker_i == 0) { 3311 if (worker_id == 0) {
3313 _g1h->g1_policy()->record_concurrent_pause(); 3312 _g1h->g1_policy()->record_concurrent_pause();
3314 } 3313 }
3315 cmThread()->yield(); 3314 cmThread()->yield();
3316 if (worker_i == 0) { 3315 if (worker_id == 0) {
3317 _g1h->g1_policy()->record_concurrent_pause_end(); 3316 _g1h->g1_policy()->record_concurrent_pause_end();
3318 } 3317 }
3319 return true; 3318 return true;
3320 } else { 3319 } else {
3321 return false; 3320 return false;