Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 1833:8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
Summary: Associate number of GC workers with the workgang as opposed to the task.
Reviewed-by: johnc, ysr
author | jmasa |
---|---|
date | Mon, 20 Sep 2010 14:38:38 -0700 |
parents | b63010841f78 |
children | 4805b9f4779e |
comparison
equal
deleted
inserted
replaced
1781:97fbf5beff7b | 1833:8b10f48633dc |
---|---|
581 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); | 581 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); |
582 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); | 582 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); |
583 #endif | 583 #endif |
584 | 584 |
585 guarantee(parallel_marking_threads() > 0, "peace of mind"); | 585 guarantee(parallel_marking_threads() > 0, "peace of mind"); |
586 _parallel_workers = new WorkGang("G1 Parallel Marking Threads", | 586 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", |
587 (int) parallel_marking_threads(), false, true); | 587 (int) _parallel_marking_threads, false, true); |
588 if (_parallel_workers == NULL) | 588 if (_parallel_workers == NULL) { |
589 vm_exit_during_initialization("Failed necessary allocation."); | 589 vm_exit_during_initialization("Failed necessary allocation."); |
590 } else { | |
591 _parallel_workers->initialize_workers(); | |
592 } | |
590 } | 593 } |
591 | 594 |
592 // so that the call below can read a sensible value | 595 // so that the call below can read a sensible value |
593 _heap_start = (HeapWord*) rs.base(); | 596 _heap_start = (HeapWord*) rs.base(); |
594 set_non_marking_state(); | 597 set_non_marking_state(); |
1449 void work(int i) { | 1452 void work(int i) { |
1450 CalcLiveObjectsClosure calccl(true /*final*/, | 1453 CalcLiveObjectsClosure calccl(true /*final*/, |
1451 _bm, _g1h->concurrent_mark(), | 1454 _bm, _g1h->concurrent_mark(), |
1452 _region_bm, _card_bm); | 1455 _region_bm, _card_bm); |
1453 calccl.no_yield(); | 1456 calccl.no_yield(); |
1454 if (ParallelGCThreads > 0) { | 1457 if (G1CollectedHeap::use_parallel_gc_threads()) { |
1455 _g1h->heap_region_par_iterate_chunked(&calccl, i, | 1458 _g1h->heap_region_par_iterate_chunked(&calccl, i, |
1456 HeapRegion::FinalCountClaimValue); | 1459 HeapRegion::FinalCountClaimValue); |
1457 } else { | 1460 } else { |
1458 _g1h->heap_region_iterate(&calccl); | 1461 _g1h->heap_region_iterate(&calccl); |
1459 } | 1462 } |
1529 void work(int i) { | 1532 void work(int i) { |
1530 double start = os::elapsedTime(); | 1533 double start = os::elapsedTime(); |
1531 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, | 1534 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, |
1532 &_par_cleanup_thread_state[i]->list, | 1535 &_par_cleanup_thread_state[i]->list, |
1533 i); | 1536 i); |
1534 if (ParallelGCThreads > 0) { | 1537 if (G1CollectedHeap::use_parallel_gc_threads()) { |
1535 _g1h->heap_region_par_iterate_chunked(&g1_note_end, i, | 1538 _g1h->heap_region_par_iterate_chunked(&g1_note_end, i, |
1536 HeapRegion::NoteEndClaimValue); | 1539 HeapRegion::NoteEndClaimValue); |
1537 } else { | 1540 } else { |
1538 _g1h->heap_region_iterate(&g1_note_end); | 1541 _g1h->heap_region_iterate(&g1_note_end); |
1539 } | 1542 } |
1573 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), | 1576 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), |
1574 _region_bm(region_bm), _card_bm(card_bm) | 1577 _region_bm(region_bm), _card_bm(card_bm) |
1575 {} | 1578 {} |
1576 | 1579 |
1577 void work(int i) { | 1580 void work(int i) { |
1578 if (ParallelGCThreads > 0) { | 1581 if (G1CollectedHeap::use_parallel_gc_threads()) { |
1579 _g1rs->scrub_par(_region_bm, _card_bm, i, | 1582 _g1rs->scrub_par(_region_bm, _card_bm, i, |
1580 HeapRegion::ScrubRemSetClaimValue); | 1583 HeapRegion::ScrubRemSetClaimValue); |
1581 } else { | 1584 } else { |
1582 _g1rs->scrub(_region_bm, _card_bm); | 1585 _g1rs->scrub(_region_bm, _card_bm); |
1583 } | 1586 } |
1645 double start = os::elapsedTime(); | 1648 double start = os::elapsedTime(); |
1646 | 1649 |
1647 // Do counting once more with the world stopped for good measure. | 1650 // Do counting once more with the world stopped for good measure. |
1648 G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), | 1651 G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), |
1649 &_region_bm, &_card_bm); | 1652 &_region_bm, &_card_bm); |
1650 if (ParallelGCThreads > 0) { | 1653 if (G1CollectedHeap::use_parallel_gc_threads()) { |
1651 assert(g1h->check_heap_region_claim_values( | 1654 assert(g1h->check_heap_region_claim_values( |
1652 HeapRegion::InitialClaimValue), | 1655 HeapRegion::InitialClaimValue), |
1653 "sanity check"); | 1656 "sanity check"); |
1654 | 1657 |
1655 int n_workers = g1h->workers()->total_workers(); | 1658 int n_workers = g1h->workers()->total_workers(); |
1693 g1h->reset_gc_time_stamp(); | 1696 g1h->reset_gc_time_stamp(); |
1694 | 1697 |
1695 // Note end of marking in all heap regions. | 1698 // Note end of marking in all heap regions. |
1696 double note_end_start = os::elapsedTime(); | 1699 double note_end_start = os::elapsedTime(); |
1697 G1ParNoteEndTask g1_par_note_end_task(g1h, _par_cleanup_thread_state); | 1700 G1ParNoteEndTask g1_par_note_end_task(g1h, _par_cleanup_thread_state); |
1698 if (ParallelGCThreads > 0) { | 1701 if (G1CollectedHeap::use_parallel_gc_threads()) { |
1699 int n_workers = g1h->workers()->total_workers(); | 1702 int n_workers = g1h->workers()->total_workers(); |
1700 g1h->set_par_threads(n_workers); | 1703 g1h->set_par_threads(n_workers); |
1701 g1h->workers()->run_task(&g1_par_note_end_task); | 1704 g1h->workers()->run_task(&g1_par_note_end_task); |
1702 g1h->set_par_threads(0); | 1705 g1h->set_par_threads(0); |
1703 | 1706 |
1718 // call below, since it affects the metric by which we sort the heap | 1721 // call below, since it affects the metric by which we sort the heap |
1719 // regions. | 1722 // regions. |
1720 if (G1ScrubRemSets) { | 1723 if (G1ScrubRemSets) { |
1721 double rs_scrub_start = os::elapsedTime(); | 1724 double rs_scrub_start = os::elapsedTime(); |
1722 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm); | 1725 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm); |
1723 if (ParallelGCThreads > 0) { | 1726 if (G1CollectedHeap::use_parallel_gc_threads()) { |
1724 int n_workers = g1h->workers()->total_workers(); | 1727 int n_workers = g1h->workers()->total_workers(); |
1725 g1h->set_par_threads(n_workers); | 1728 g1h->set_par_threads(n_workers); |
1726 g1h->workers()->run_task(&g1_par_scrub_rs_task); | 1729 g1h->workers()->run_task(&g1_par_scrub_rs_task); |
1727 g1h->set_par_threads(0); | 1730 g1h->set_par_threads(0); |
1728 | 1731 |
1932 HandleMark hm; | 1935 HandleMark hm; |
1933 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | 1936 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
1934 | 1937 |
1935 g1h->ensure_parsability(false); | 1938 g1h->ensure_parsability(false); |
1936 | 1939 |
1937 if (ParallelGCThreads > 0) { | 1940 if (G1CollectedHeap::use_parallel_gc_threads()) { |
1938 G1CollectedHeap::StrongRootsScope srs(g1h); | 1941 G1CollectedHeap::StrongRootsScope srs(g1h); |
1939 // this is remark, so we'll use up all available threads | 1942 // this is remark, so we'll use up all available threads |
1940 int active_workers = ParallelGCThreads; | 1943 int active_workers = ParallelGCThreads; |
1941 set_phase(active_workers, false); | 1944 set_phase(active_workers, false); |
1942 | 1945 |
3367 // very counter productive if it did that. :-) | 3370 // very counter productive if it did that. :-) |
3368 _draining_satb_buffers = true; | 3371 _draining_satb_buffers = true; |
3369 | 3372 |
3370 CMObjectClosure oc(this); | 3373 CMObjectClosure oc(this); |
3371 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); | 3374 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
3372 if (ParallelGCThreads > 0) | 3375 if (G1CollectedHeap::use_parallel_gc_threads()) |
3373 satb_mq_set.set_par_closure(_task_id, &oc); | 3376 satb_mq_set.set_par_closure(_task_id, &oc); |
3374 else | 3377 else |
3375 satb_mq_set.set_closure(&oc); | 3378 satb_mq_set.set_closure(&oc); |
3376 | 3379 |
3377 // This keeps claiming and applying the closure to completed buffers | 3380 // This keeps claiming and applying the closure to completed buffers |
3378 // until we run out of buffers or we need to abort. | 3381 // until we run out of buffers or we need to abort. |
3379 if (ParallelGCThreads > 0) { | 3382 if (G1CollectedHeap::use_parallel_gc_threads()) { |
3380 while (!has_aborted() && | 3383 while (!has_aborted() && |
3381 satb_mq_set.par_apply_closure_to_completed_buffer(_task_id)) { | 3384 satb_mq_set.par_apply_closure_to_completed_buffer(_task_id)) { |
3382 if (_cm->verbose_medium()) | 3385 if (_cm->verbose_medium()) |
3383 gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id); | 3386 gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id); |
3384 statsOnly( ++_satb_buffers_processed ); | 3387 statsOnly( ++_satb_buffers_processed ); |
3394 } | 3397 } |
3395 } | 3398 } |
3396 | 3399 |
3397 if (!concurrent() && !has_aborted()) { | 3400 if (!concurrent() && !has_aborted()) { |
3398 // We should only do this during remark. | 3401 // We should only do this during remark. |
3399 if (ParallelGCThreads > 0) | 3402 if (G1CollectedHeap::use_parallel_gc_threads()) |
3400 satb_mq_set.par_iterate_closure_all_threads(_task_id); | 3403 satb_mq_set.par_iterate_closure_all_threads(_task_id); |
3401 else | 3404 else |
3402 satb_mq_set.iterate_closure_all_threads(); | 3405 satb_mq_set.iterate_closure_all_threads(); |
3403 } | 3406 } |
3404 | 3407 |
3406 | 3409 |
3407 assert(has_aborted() || | 3410 assert(has_aborted() || |
3408 concurrent() || | 3411 concurrent() || |
3409 satb_mq_set.completed_buffers_num() == 0, "invariant"); | 3412 satb_mq_set.completed_buffers_num() == 0, "invariant"); |
3410 | 3413 |
3411 if (ParallelGCThreads > 0) | 3414 if (G1CollectedHeap::use_parallel_gc_threads()) |
3412 satb_mq_set.set_par_closure(_task_id, NULL); | 3415 satb_mq_set.set_par_closure(_task_id, NULL); |
3413 else | 3416 else |
3414 satb_mq_set.set_closure(NULL); | 3417 satb_mq_set.set_closure(NULL); |
3415 | 3418 |
3416 // again, this was a potentially expensive operation, decrease the | 3419 // again, this was a potentially expensive operation, decrease the |