comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 4097:dc467e8b2c5e

7112743: G1: Reduce overhead of marking closure during evacuation pauses Summary: Parallelize the serial code that was used to mark objects reachable from survivor objects in the collection set. Some minor improvments in the timers used to track the freeing of the collection set along with some tweaks to PrintGCDetails. Reviewed-by: tonyp, brutisso
author johnc
date Thu, 17 Nov 2011 12:40:15 -0800
parents bca17e38de00
children 3c648b9ad052
comparison
equal deleted inserted replaced
4096:00dd86e542eb 4097:dc467e8b2c5e
2615 public: 2615 public:
2616 CheckClaimValuesClosure(jint claim_value) : 2616 CheckClaimValuesClosure(jint claim_value) :
2617 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } 2617 _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
2618 bool doHeapRegion(HeapRegion* r) { 2618 bool doHeapRegion(HeapRegion* r) {
2619 if (r->claim_value() != _claim_value) { 2619 if (r->claim_value() != _claim_value) {
2620 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " 2620 gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2621 "claim value = %d, should be %d", 2621 "claim value = %d, should be %d",
2622 r->bottom(), r->end(), r->claim_value(), 2622 HR_FORMAT_PARAMS(r),
2623 _claim_value); 2623 r->claim_value(), _claim_value);
2624 ++_failures; 2624 ++_failures;
2625 } 2625 }
2626 if (!r->isHumongous()) { 2626 if (!r->isHumongous()) {
2627 _sh_region = NULL; 2627 _sh_region = NULL;
2628 } else if (r->startsHumongous()) { 2628 } else if (r->startsHumongous()) {
2629 _sh_region = r; 2629 _sh_region = r;
2630 } else if (r->continuesHumongous()) { 2630 } else if (r->continuesHumongous()) {
2631 if (r->humongous_start_region() != _sh_region) { 2631 if (r->humongous_start_region() != _sh_region) {
2632 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " 2632 gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2633 "HS = "PTR_FORMAT", should be "PTR_FORMAT, 2633 "HS = "PTR_FORMAT", should be "PTR_FORMAT,
2634 r->bottom(), r->end(), 2634 HR_FORMAT_PARAMS(r),
2635 r->humongous_start_region(), 2635 r->humongous_start_region(),
2636 _sh_region); 2636 _sh_region);
2637 ++_failures; 2637 ++_failures;
2638 } 2638 }
2639 } 2639 }
2647 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { 2647 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
2648 CheckClaimValuesClosure cl(claim_value); 2648 CheckClaimValuesClosure cl(claim_value);
2649 heap_region_iterate(&cl); 2649 heap_region_iterate(&cl);
2650 return cl.failures() == 0; 2650 return cl.failures() == 0;
2651 } 2651 }
2652
2653 class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
2654 jint _claim_value;
2655 size_t _failures;
2656
2657 public:
2658 CheckClaimValuesInCSetHRClosure(jint claim_value) :
2659 _claim_value(claim_value),
2660 _failures(0) { }
2661
2662 size_t failures() {
2663 return _failures;
2664 }
2665
2666 bool doHeapRegion(HeapRegion* hr) {
2667 assert(hr->in_collection_set(), "how?");
2668 assert(!hr->isHumongous(), "H-region in CSet");
2669 if (hr->claim_value() != _claim_value) {
2670 gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
2671 "claim value = %d, should be %d",
2672 HR_FORMAT_PARAMS(hr),
2673 hr->claim_value(), _claim_value);
2674 _failures += 1;
2675 }
2676 return false;
2677 }
2678 };
2679
2680 bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
2681 CheckClaimValuesInCSetHRClosure cl(claim_value);
2682 collection_set_iterate(&cl);
2683 return cl.failures() == 0;
2684 }
2652 #endif // ASSERT 2685 #endif // ASSERT
2686
2687 // We want the parallel threads to start their collection
2688 // set iteration at different collection set regions to
2689 // avoid contention.
2690 // If we have:
2691 // n collection set regions
2692 // p threads
2693 // Then thread t will start at region t * floor (n/p)
2694
2695 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
2696 HeapRegion* result = g1_policy()->collection_set();
2697 if (G1CollectedHeap::use_parallel_gc_threads()) {
2698 size_t cs_size = g1_policy()->cset_region_length();
2699 int n_workers = workers()->total_workers();
2700 size_t cs_spans = cs_size / n_workers;
2701 size_t ind = cs_spans * worker_i;
2702 for (size_t i = 0; i < ind; i++) {
2703 result = result->next_in_collection_set();
2704 }
2705 }
2706 return result;
2707 }
2653 2708
2654 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { 2709 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2655 HeapRegion* r = g1_policy()->collection_set(); 2710 HeapRegion* r = g1_policy()->collection_set();
2656 while (r != NULL) { 2711 while (r != NULL) {
2657 HeapRegion* next = r->next_in_collection_set(); 2712 HeapRegion* next = r->next_in_collection_set();
5391 concurrent_g1_refine()->clear_hot_cache(); 5446 concurrent_g1_refine()->clear_hot_cache();
5392 concurrent_g1_refine()->set_use_cache(true); 5447 concurrent_g1_refine()->set_use_cache(true);
5393 5448
5394 finalize_for_evac_failure(); 5449 finalize_for_evac_failure();
5395 5450
5396 // Must do this before removing self-forwarding pointers, which clears 5451 // Must do this before clearing the per-region evac-failure flags
5397 // the per-region evac-failure flags. 5452 // (which is currently done when we free the collection set).
5453 // We also only do this if marking is actually in progress and so
5454 // have to do this before we set the mark_in_progress flag at the
5455 // end of an initial mark pause.
5398 concurrent_mark()->complete_marking_in_collection_set(); 5456 concurrent_mark()->complete_marking_in_collection_set();
5399 5457
5400 if (evacuation_failed()) { 5458 if (evacuation_failed()) {
5401 remove_self_forwarding_pointers(); 5459 remove_self_forwarding_pointers();
5402 if (PrintGCDetails) { 5460 if (PrintGCDetails) {
5654 int age_bound = -1; 5712 int age_bound = -1;
5655 size_t rs_lengths = 0; 5713 size_t rs_lengths = 0;
5656 5714
5657 while (cur != NULL) { 5715 while (cur != NULL) {
5658 assert(!is_on_master_free_list(cur), "sanity"); 5716 assert(!is_on_master_free_list(cur), "sanity");
5659
5660 if (non_young) { 5717 if (non_young) {
5661 if (cur->is_young()) { 5718 if (cur->is_young()) {
5662 double end_sec = os::elapsedTime(); 5719 double end_sec = os::elapsedTime();
5663 double elapsed_ms = (end_sec - start_sec) * 1000.0; 5720 double elapsed_ms = (end_sec - start_sec) * 1000.0;
5664 non_young_time_ms += elapsed_ms; 5721 non_young_time_ms += elapsed_ms;
5665 5722
5666 start_sec = os::elapsedTime(); 5723 start_sec = os::elapsedTime();
5667 non_young = false; 5724 non_young = false;
5668 } 5725 }
5669 } else { 5726 } else {
5670 double end_sec = os::elapsedTime(); 5727 if (!cur->is_young()) {
5671 double elapsed_ms = (end_sec - start_sec) * 1000.0; 5728 double end_sec = os::elapsedTime();
5672 young_time_ms += elapsed_ms; 5729 double elapsed_ms = (end_sec - start_sec) * 1000.0;
5673 5730 young_time_ms += elapsed_ms;
5674 start_sec = os::elapsedTime(); 5731
5675 non_young = true; 5732 start_sec = os::elapsedTime();
5733 non_young = true;
5734 }
5676 } 5735 }
5677 5736
5678 rs_lengths += cur->rem_set()->occupied(); 5737 rs_lengths += cur->rem_set()->occupied();
5679 5738
5680 HeapRegion* next = cur->next_in_collection_set(); 5739 HeapRegion* next = cur->next_in_collection_set();
5702 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || 5761 assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
5703 (!cur->is_young() && cur->young_index_in_cset() == -1), 5762 (!cur->is_young() && cur->young_index_in_cset() == -1),
5704 "invariant" ); 5763 "invariant" );
5705 5764
5706 if (!cur->evacuation_failed()) { 5765 if (!cur->evacuation_failed()) {
5766 MemRegion used_mr = cur->used_region();
5767
5707 // And the region is empty. 5768 // And the region is empty.
5708 assert(!cur->is_empty(), "Should not have empty regions in a CS."); 5769 assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
5770
5771 // If marking is in progress then clear any objects marked in
5772 // the current region. Note mark_in_progress() returns false,
5773 // even during an initial mark pause, until the set_marking_started()
5774 // call which takes place later in the pause.
5775 if (mark_in_progress()) {
5776 assert(!g1_policy()->during_initial_mark_pause(), "sanity");
5777 _cm->nextMarkBitMap()->clearRange(used_mr);
5778 }
5779
5709 free_region(cur, &pre_used, &local_free_list, false /* par */); 5780 free_region(cur, &pre_used, &local_free_list, false /* par */);
5710 } else { 5781 } else {
5711 cur->uninstall_surv_rate_group(); 5782 cur->uninstall_surv_rate_group();
5712 if (cur->is_young()) { 5783 if (cur->is_young()) {
5713 cur->set_young_index_in_cset(-1); 5784 cur->set_young_index_in_cset(-1);
5723 policy->record_max_rs_lengths(rs_lengths); 5794 policy->record_max_rs_lengths(rs_lengths);
5724 policy->cset_regions_freed(); 5795 policy->cset_regions_freed();
5725 5796
5726 double end_sec = os::elapsedTime(); 5797 double end_sec = os::elapsedTime();
5727 double elapsed_ms = (end_sec - start_sec) * 1000.0; 5798 double elapsed_ms = (end_sec - start_sec) * 1000.0;
5728 if (non_young) 5799
5800 if (non_young) {
5729 non_young_time_ms += elapsed_ms; 5801 non_young_time_ms += elapsed_ms;
5730 else 5802 } else {
5731 young_time_ms += elapsed_ms; 5803 young_time_ms += elapsed_ms;
5804 }
5732 5805
5733 update_sets_after_freeing_regions(pre_used, &local_free_list, 5806 update_sets_after_freeing_regions(pre_used, &local_free_list,
5734 NULL /* old_proxy_set */, 5807 NULL /* old_proxy_set */,
5735 NULL /* humongous_proxy_set */, 5808 NULL /* humongous_proxy_set */,
5736 false /* par */); 5809 false /* par */);