Mercurial > hg > graal-jvmci-8
comparison src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @ 6010:720b6a76dd9d
7157073: G1: type change size_t -> uint for region counts / indexes
Summary: Change the type of fields / variables / etc. that represent region counts and indeces from size_t to uint.
Reviewed-by: iveresov, brutisso, jmasa, jwilhelm
author | tonyp |
---|---|
date | Wed, 18 Apr 2012 07:21:15 -0400 |
parents | 5c86f8211d1e |
children | f7a8920427a6 |
comparison
equal
deleted
inserted
replaced
6009:dde53abda3d6 | 6010:720b6a76dd9d |
---|---|
429 return; | 429 return; |
430 } | 430 } |
431 } | 431 } |
432 | 432 |
433 if (FLAG_IS_CMDLINE(NewSize)) { | 433 if (FLAG_IS_CMDLINE(NewSize)) { |
434 _min_desired_young_length = MAX2((size_t) 1, NewSize / HeapRegion::GrainBytes); | 434 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes), |
435 1U); | |
435 if (FLAG_IS_CMDLINE(MaxNewSize)) { | 436 if (FLAG_IS_CMDLINE(MaxNewSize)) { |
436 _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes); | 437 _max_desired_young_length = |
438 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), | |
439 1U); | |
437 _sizer_kind = SizerMaxAndNewSize; | 440 _sizer_kind = SizerMaxAndNewSize; |
438 _adaptive_size = _min_desired_young_length == _max_desired_young_length; | 441 _adaptive_size = _min_desired_young_length == _max_desired_young_length; |
439 } else { | 442 } else { |
440 _sizer_kind = SizerNewSizeOnly; | 443 _sizer_kind = SizerNewSizeOnly; |
441 } | 444 } |
442 } else if (FLAG_IS_CMDLINE(MaxNewSize)) { | 445 } else if (FLAG_IS_CMDLINE(MaxNewSize)) { |
443 _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes); | 446 _max_desired_young_length = |
447 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), | |
448 1U); | |
444 _sizer_kind = SizerMaxNewSizeOnly; | 449 _sizer_kind = SizerMaxNewSizeOnly; |
445 } | 450 } |
446 } | 451 } |
447 | 452 |
448 size_t G1YoungGenSizer::calculate_default_min_length(size_t new_number_of_heap_regions) { | 453 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) { |
449 size_t default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100; | 454 uint default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100; |
450 return MAX2((size_t)1, default_value); | 455 return MAX2(1U, default_value); |
451 } | 456 } |
452 | 457 |
453 size_t G1YoungGenSizer::calculate_default_max_length(size_t new_number_of_heap_regions) { | 458 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) { |
454 size_t default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100; | 459 uint default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100; |
455 return MAX2((size_t)1, default_value); | 460 return MAX2(1U, default_value); |
456 } | 461 } |
457 | 462 |
458 void G1YoungGenSizer::heap_size_changed(size_t new_number_of_heap_regions) { | 463 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { |
459 assert(new_number_of_heap_regions > 0, "Heap must be initialized"); | 464 assert(new_number_of_heap_regions > 0, "Heap must be initialized"); |
460 | 465 |
461 switch (_sizer_kind) { | 466 switch (_sizer_kind) { |
462 case SizerDefaults: | 467 case SizerDefaults: |
463 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions); | 468 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions); |
510 // Create the jstat counters for the policy. | 515 // Create the jstat counters for the policy. |
511 void G1CollectorPolicy::initialize_gc_policy_counters() { | 516 void G1CollectorPolicy::initialize_gc_policy_counters() { |
512 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); | 517 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); |
513 } | 518 } |
514 | 519 |
515 bool G1CollectorPolicy::predict_will_fit(size_t young_length, | 520 bool G1CollectorPolicy::predict_will_fit(uint young_length, |
516 double base_time_ms, | 521 double base_time_ms, |
517 size_t base_free_regions, | 522 uint base_free_regions, |
518 double target_pause_time_ms) { | 523 double target_pause_time_ms) { |
519 if (young_length >= base_free_regions) { | 524 if (young_length >= base_free_regions) { |
520 // end condition 1: not enough space for the young regions | 525 // end condition 1: not enough space for the young regions |
521 return false; | 526 return false; |
522 } | 527 } |
523 | 528 |
524 double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1)); | 529 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); |
525 size_t bytes_to_copy = | 530 size_t bytes_to_copy = |
526 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); | 531 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); |
527 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); | 532 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); |
528 double young_other_time_ms = predict_young_other_time_ms(young_length); | 533 double young_other_time_ms = predict_young_other_time_ms(young_length); |
529 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; | 534 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; |
531 // end condition 2: prediction is over the target pause time | 536 // end condition 2: prediction is over the target pause time |
532 return false; | 537 return false; |
533 } | 538 } |
534 | 539 |
535 size_t free_bytes = | 540 size_t free_bytes = |
536 (base_free_regions - young_length) * HeapRegion::GrainBytes; | 541 (base_free_regions - young_length) * HeapRegion::GrainBytes; |
537 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) { | 542 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) { |
538 // end condition 3: out-of-space (conservatively!) | 543 // end condition 3: out-of-space (conservatively!) |
539 return false; | 544 return false; |
540 } | 545 } |
541 | 546 |
542 // success! | 547 // success! |
543 return true; | 548 return true; |
544 } | 549 } |
545 | 550 |
546 void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) { | 551 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { |
547 // re-calculate the necessary reserve | 552 // re-calculate the necessary reserve |
548 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; | 553 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; |
549 // We use ceiling so that if reserve_regions_d is > 0.0 (but | 554 // We use ceiling so that if reserve_regions_d is > 0.0 (but |
550 // smaller than 1.0) we'll get 1. | 555 // smaller than 1.0) we'll get 1. |
551 _reserve_regions = (size_t) ceil(reserve_regions_d); | 556 _reserve_regions = (uint) ceil(reserve_regions_d); |
552 | 557 |
553 _young_gen_sizer->heap_size_changed(new_number_of_regions); | 558 _young_gen_sizer->heap_size_changed(new_number_of_regions); |
554 } | 559 } |
555 | 560 |
556 size_t G1CollectorPolicy::calculate_young_list_desired_min_length( | 561 uint G1CollectorPolicy::calculate_young_list_desired_min_length( |
557 size_t base_min_length) { | 562 uint base_min_length) { |
558 size_t desired_min_length = 0; | 563 uint desired_min_length = 0; |
559 if (adaptive_young_list_length()) { | 564 if (adaptive_young_list_length()) { |
560 if (_alloc_rate_ms_seq->num() > 3) { | 565 if (_alloc_rate_ms_seq->num() > 3) { |
561 double now_sec = os::elapsedTime(); | 566 double now_sec = os::elapsedTime(); |
562 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; | 567 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; |
563 double alloc_rate_ms = predict_alloc_rate_ms(); | 568 double alloc_rate_ms = predict_alloc_rate_ms(); |
564 desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms); | 569 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); |
565 } else { | 570 } else { |
566 // otherwise we don't have enough info to make the prediction | 571 // otherwise we don't have enough info to make the prediction |
567 } | 572 } |
568 } | 573 } |
569 desired_min_length += base_min_length; | 574 desired_min_length += base_min_length; |
570 // make sure we don't go below any user-defined minimum bound | 575 // make sure we don't go below any user-defined minimum bound |
571 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); | 576 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); |
572 } | 577 } |
573 | 578 |
574 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() { | 579 uint G1CollectorPolicy::calculate_young_list_desired_max_length() { |
575 // Here, we might want to also take into account any additional | 580 // Here, we might want to also take into account any additional |
576 // constraints (i.e., user-defined minimum bound). Currently, we | 581 // constraints (i.e., user-defined minimum bound). Currently, we |
577 // effectively don't set this bound. | 582 // effectively don't set this bound. |
578 return _young_gen_sizer->max_desired_young_length(); | 583 return _young_gen_sizer->max_desired_young_length(); |
579 } | 584 } |
586 } | 591 } |
587 | 592 |
588 // Calculate the absolute and desired min bounds. | 593 // Calculate the absolute and desired min bounds. |
589 | 594 |
590 // This is how many young regions we already have (currently: the survivors). | 595 // This is how many young regions we already have (currently: the survivors). |
591 size_t base_min_length = recorded_survivor_regions(); | 596 uint base_min_length = recorded_survivor_regions(); |
592 // This is the absolute minimum young length, which ensures that we | 597 // This is the absolute minimum young length, which ensures that we |
593 // can allocate one eden region in the worst-case. | 598 // can allocate one eden region in the worst-case. |
594 size_t absolute_min_length = base_min_length + 1; | 599 uint absolute_min_length = base_min_length + 1; |
595 size_t desired_min_length = | 600 uint desired_min_length = |
596 calculate_young_list_desired_min_length(base_min_length); | 601 calculate_young_list_desired_min_length(base_min_length); |
597 if (desired_min_length < absolute_min_length) { | 602 if (desired_min_length < absolute_min_length) { |
598 desired_min_length = absolute_min_length; | 603 desired_min_length = absolute_min_length; |
599 } | 604 } |
600 | 605 |
601 // Calculate the absolute and desired max bounds. | 606 // Calculate the absolute and desired max bounds. |
602 | 607 |
603 // We will try our best not to "eat" into the reserve. | 608 // We will try our best not to "eat" into the reserve. |
604 size_t absolute_max_length = 0; | 609 uint absolute_max_length = 0; |
605 if (_free_regions_at_end_of_collection > _reserve_regions) { | 610 if (_free_regions_at_end_of_collection > _reserve_regions) { |
606 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; | 611 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; |
607 } | 612 } |
608 size_t desired_max_length = calculate_young_list_desired_max_length(); | 613 uint desired_max_length = calculate_young_list_desired_max_length(); |
609 if (desired_max_length > absolute_max_length) { | 614 if (desired_max_length > absolute_max_length) { |
610 desired_max_length = absolute_max_length; | 615 desired_max_length = absolute_max_length; |
611 } | 616 } |
612 | 617 |
613 size_t young_list_target_length = 0; | 618 uint young_list_target_length = 0; |
614 if (adaptive_young_list_length()) { | 619 if (adaptive_young_list_length()) { |
615 if (gcs_are_young()) { | 620 if (gcs_are_young()) { |
616 young_list_target_length = | 621 young_list_target_length = |
617 calculate_young_list_target_length(rs_lengths, | 622 calculate_young_list_target_length(rs_lengths, |
618 base_min_length, | 623 base_min_length, |
646 _young_list_target_length = young_list_target_length; | 651 _young_list_target_length = young_list_target_length; |
647 | 652 |
648 update_max_gc_locker_expansion(); | 653 update_max_gc_locker_expansion(); |
649 } | 654 } |
650 | 655 |
651 size_t | 656 uint |
652 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, | 657 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, |
653 size_t base_min_length, | 658 uint base_min_length, |
654 size_t desired_min_length, | 659 uint desired_min_length, |
655 size_t desired_max_length) { | 660 uint desired_max_length) { |
656 assert(adaptive_young_list_length(), "pre-condition"); | 661 assert(adaptive_young_list_length(), "pre-condition"); |
657 assert(gcs_are_young(), "only call this for young GCs"); | 662 assert(gcs_are_young(), "only call this for young GCs"); |
658 | 663 |
659 // In case some edge-condition makes the desired max length too small... | 664 // In case some edge-condition makes the desired max length too small... |
660 if (desired_max_length <= desired_min_length) { | 665 if (desired_max_length <= desired_min_length) { |
665 // the already allocated young regions (i.e., so they reflect the | 670 // the already allocated young regions (i.e., so they reflect the |
666 // min and max eden regions we'll allocate). The base_min_length | 671 // min and max eden regions we'll allocate). The base_min_length |
667 // will be reflected in the predictions by the | 672 // will be reflected in the predictions by the |
668 // survivor_regions_evac_time prediction. | 673 // survivor_regions_evac_time prediction. |
669 assert(desired_min_length > base_min_length, "invariant"); | 674 assert(desired_min_length > base_min_length, "invariant"); |
670 size_t min_young_length = desired_min_length - base_min_length; | 675 uint min_young_length = desired_min_length - base_min_length; |
671 assert(desired_max_length > base_min_length, "invariant"); | 676 assert(desired_max_length > base_min_length, "invariant"); |
672 size_t max_young_length = desired_max_length - base_min_length; | 677 uint max_young_length = desired_max_length - base_min_length; |
673 | 678 |
674 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; | 679 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; |
675 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); | 680 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); |
676 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq); | 681 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq); |
677 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); | 682 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); |
678 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); | 683 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); |
679 double base_time_ms = | 684 double base_time_ms = |
680 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + | 685 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + |
681 survivor_regions_evac_time; | 686 survivor_regions_evac_time; |
682 size_t available_free_regions = _free_regions_at_end_of_collection; | 687 uint available_free_regions = _free_regions_at_end_of_collection; |
683 size_t base_free_regions = 0; | 688 uint base_free_regions = 0; |
684 if (available_free_regions > _reserve_regions) { | 689 if (available_free_regions > _reserve_regions) { |
685 base_free_regions = available_free_regions - _reserve_regions; | 690 base_free_regions = available_free_regions - _reserve_regions; |
686 } | 691 } |
687 | 692 |
688 // Here, we will make sure that the shortest young length that | 693 // Here, we will make sure that the shortest young length that |
715 // max_young_length fits into the target pause time. If it | 720 // max_young_length fits into the target pause time. If it |
716 // does, it becomes the new min. If it doesn't, it becomes | 721 // does, it becomes the new min. If it doesn't, it becomes |
717 // the new max. This way we maintain the loop invariants. | 722 // the new max. This way we maintain the loop invariants. |
718 | 723 |
719 assert(min_young_length < max_young_length, "invariant"); | 724 assert(min_young_length < max_young_length, "invariant"); |
720 size_t diff = (max_young_length - min_young_length) / 2; | 725 uint diff = (max_young_length - min_young_length) / 2; |
721 while (diff > 0) { | 726 while (diff > 0) { |
722 size_t young_length = min_young_length + diff; | 727 uint young_length = min_young_length + diff; |
723 if (predict_will_fit(young_length, base_time_ms, | 728 if (predict_will_fit(young_length, base_time_ms, |
724 base_free_regions, target_pause_time_ms)) { | 729 base_free_regions, target_pause_time_ms)) { |
725 min_young_length = young_length; | 730 min_young_length = young_length; |
726 } else { | 731 } else { |
727 max_young_length = young_length; | 732 max_young_length = young_length; |
1320 // calculate the application's allocate rate. The only exception | 1325 // calculate the application's allocate rate. The only exception |
1321 // to that is humongous objects that are allocated separately. But | 1326 // to that is humongous objects that are allocated separately. But |
1322 // given that humongous object allocations do not really affect | 1327 // given that humongous object allocations do not really affect |
1323 // either the pause's duration nor when the next pause will take | 1328 // either the pause's duration nor when the next pause will take |
1324 // place we can safely ignore them here. | 1329 // place we can safely ignore them here. |
1325 size_t regions_allocated = eden_cset_region_length(); | 1330 uint regions_allocated = eden_cset_region_length(); |
1326 double alloc_rate_ms = (double) regions_allocated / app_time_ms; | 1331 double alloc_rate_ms = (double) regions_allocated / app_time_ms; |
1327 _alloc_rate_ms_seq->add(alloc_rate_ms); | 1332 _alloc_rate_ms_seq->add(alloc_rate_ms); |
1328 | 1333 |
1329 double interval_ms = | 1334 double interval_ms = |
1330 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; | 1335 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; |
1504 | 1509 |
1505 if (update_stats) { | 1510 if (update_stats) { |
1506 double pause_time_ms = elapsed_ms; | 1511 double pause_time_ms = elapsed_ms; |
1507 | 1512 |
1508 size_t diff = 0; | 1513 size_t diff = 0; |
1509 if (_max_pending_cards >= _pending_cards) | 1514 if (_max_pending_cards >= _pending_cards) { |
1510 diff = _max_pending_cards - _pending_cards; | 1515 diff = _max_pending_cards - _pending_cards; |
1516 } | |
1511 _pending_card_diff_seq->add((double) diff); | 1517 _pending_card_diff_seq->add((double) diff); |
1512 | 1518 |
1513 double cost_per_card_ms = 0.0; | 1519 double cost_per_card_ms = 0.0; |
1514 if (_pending_cards > 0) { | 1520 if (_pending_cards > 0) { |
1515 cost_per_card_ms = update_rs_time / (double) _pending_cards; | 1521 cost_per_card_ms = update_rs_time / (double) _pending_cards; |
1739 region_elapsed_time_ms += predict_non_young_other_time_ms(1); | 1745 region_elapsed_time_ms += predict_non_young_other_time_ms(1); |
1740 | 1746 |
1741 return region_elapsed_time_ms; | 1747 return region_elapsed_time_ms; |
1742 } | 1748 } |
1743 | 1749 |
1744 size_t | 1750 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) { |
1745 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) { | |
1746 size_t bytes_to_copy; | 1751 size_t bytes_to_copy; |
1747 if (hr->is_marked()) | 1752 if (hr->is_marked()) |
1748 bytes_to_copy = hr->max_live_bytes(); | 1753 bytes_to_copy = hr->max_live_bytes(); |
1749 else { | 1754 else { |
1750 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); | 1755 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); |
1754 } | 1759 } |
1755 return bytes_to_copy; | 1760 return bytes_to_copy; |
1756 } | 1761 } |
1757 | 1762 |
1758 void | 1763 void |
1759 G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length, | 1764 G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length, |
1760 size_t survivor_cset_region_length) { | 1765 uint survivor_cset_region_length) { |
1761 _eden_cset_region_length = eden_cset_region_length; | 1766 _eden_cset_region_length = eden_cset_region_length; |
1762 _survivor_cset_region_length = survivor_cset_region_length; | 1767 _survivor_cset_region_length = survivor_cset_region_length; |
1763 _old_cset_region_length = 0; | 1768 _old_cset_region_length = 0; |
1764 } | 1769 } |
1765 | 1770 |
2019 sprintf(buffer, "%7.2lfMB", mbs); | 2024 sprintf(buffer, "%7.2lfMB", mbs); |
2020 return buffer; | 2025 return buffer; |
2021 } | 2026 } |
2022 #endif // PRODUCT | 2027 #endif // PRODUCT |
2023 | 2028 |
2024 size_t G1CollectorPolicy::max_regions(int purpose) { | 2029 uint G1CollectorPolicy::max_regions(int purpose) { |
2025 switch (purpose) { | 2030 switch (purpose) { |
2026 case GCAllocForSurvived: | 2031 case GCAllocForSurvived: |
2027 return _max_survivor_regions; | 2032 return _max_survivor_regions; |
2028 case GCAllocForTenured: | 2033 case GCAllocForTenured: |
2029 return REGIONS_UNLIMITED; | 2034 return REGIONS_UNLIMITED; |
2032 return REGIONS_UNLIMITED; | 2037 return REGIONS_UNLIMITED; |
2033 }; | 2038 }; |
2034 } | 2039 } |
2035 | 2040 |
2036 void G1CollectorPolicy::update_max_gc_locker_expansion() { | 2041 void G1CollectorPolicy::update_max_gc_locker_expansion() { |
2037 size_t expansion_region_num = 0; | 2042 uint expansion_region_num = 0; |
2038 if (GCLockerEdenExpansionPercent > 0) { | 2043 if (GCLockerEdenExpansionPercent > 0) { |
2039 double perc = (double) GCLockerEdenExpansionPercent / 100.0; | 2044 double perc = (double) GCLockerEdenExpansionPercent / 100.0; |
2040 double expansion_region_num_d = perc * (double) _young_list_target_length; | 2045 double expansion_region_num_d = perc * (double) _young_list_target_length; |
2041 // We use ceiling so that if expansion_region_num_d is > 0.0 (but | 2046 // We use ceiling so that if expansion_region_num_d is > 0.0 (but |
2042 // less than 1.0) we'll get 1. | 2047 // less than 1.0) we'll get 1. |
2043 expansion_region_num = (size_t) ceil(expansion_region_num_d); | 2048 expansion_region_num = (uint) ceil(expansion_region_num_d); |
2044 } else { | 2049 } else { |
2045 assert(expansion_region_num == 0, "sanity"); | 2050 assert(expansion_region_num == 0, "sanity"); |
2046 } | 2051 } |
2047 _young_list_max_length = _young_list_target_length + expansion_region_num; | 2052 _young_list_max_length = _young_list_target_length + expansion_region_num; |
2048 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); | 2053 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); |
2052 void G1CollectorPolicy::update_survivors_policy() { | 2057 void G1CollectorPolicy::update_survivors_policy() { |
2053 double max_survivor_regions_d = | 2058 double max_survivor_regions_d = |
2054 (double) _young_list_target_length / (double) SurvivorRatio; | 2059 (double) _young_list_target_length / (double) SurvivorRatio; |
2055 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but | 2060 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but |
2056 // smaller than 1.0) we'll get 1. | 2061 // smaller than 1.0) we'll get 1. |
2057 _max_survivor_regions = (size_t) ceil(max_survivor_regions_d); | 2062 _max_survivor_regions = (uint) ceil(max_survivor_regions_d); |
2058 | 2063 |
2059 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( | 2064 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( |
2060 HeapRegion::GrainWords * _max_survivor_regions); | 2065 HeapRegion::GrainWords * _max_survivor_regions); |
2061 } | 2066 } |
2062 | 2067 |
2286 clear_marked_end_sec = os::elapsedTime(); | 2291 clear_marked_end_sec = os::elapsedTime(); |
2287 gclog_or_tty->print_cr(" clear marked regions: %8.3f ms.", | 2292 gclog_or_tty->print_cr(" clear marked regions: %8.3f ms.", |
2288 (clear_marked_end_sec - start_sec) * 1000.0); | 2293 (clear_marked_end_sec - start_sec) * 1000.0); |
2289 } | 2294 } |
2290 | 2295 |
2296 uint region_num = _g1->n_regions(); | |
2291 if (G1CollectedHeap::use_parallel_gc_threads()) { | 2297 if (G1CollectedHeap::use_parallel_gc_threads()) { |
2292 const size_t OverpartitionFactor = 4; | 2298 const uint OverpartitionFactor = 4; |
2293 size_t WorkUnit; | 2299 uint WorkUnit; |
2294 // The use of MinChunkSize = 8 in the original code | 2300 // The use of MinChunkSize = 8 in the original code |
2295 // causes some assertion failures when the total number of | 2301 // causes some assertion failures when the total number of |
2296 // region is less than 8. The code here tries to fix that. | 2302 // region is less than 8. The code here tries to fix that. |
2297 // Should the original code also be fixed? | 2303 // Should the original code also be fixed? |
2298 if (no_of_gc_threads > 0) { | 2304 if (no_of_gc_threads > 0) { |
2299 const size_t MinWorkUnit = | 2305 const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U); |
2300 MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U); | 2306 WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor), |
2301 WorkUnit = | 2307 MinWorkUnit); |
2302 MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor), | |
2303 MinWorkUnit); | |
2304 } else { | 2308 } else { |
2305 assert(no_of_gc_threads > 0, | 2309 assert(no_of_gc_threads > 0, |
2306 "The active gc workers should be greater than 0"); | 2310 "The active gc workers should be greater than 0"); |
2307 // In a product build do something reasonable to avoid a crash. | 2311 // In a product build do something reasonable to avoid a crash. |
2308 const size_t MinWorkUnit = | 2312 const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U); |
2309 MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U); | |
2310 WorkUnit = | 2313 WorkUnit = |
2311 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor), | 2314 MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor), |
2312 MinWorkUnit); | 2315 MinWorkUnit); |
2313 } | 2316 } |
2314 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(), | 2317 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(), |
2315 WorkUnit); | 2318 WorkUnit); |
2316 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, | 2319 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, |
2622 | 2625 |
2623 // The young list is laid with the survivor regions from the previous | 2626 // The young list is laid with the survivor regions from the previous |
2624 // pause are appended to the RHS of the young list, i.e. | 2627 // pause are appended to the RHS of the young list, i.e. |
2625 // [Newly Young Regions ++ Survivors from last pause]. | 2628 // [Newly Young Regions ++ Survivors from last pause]. |
2626 | 2629 |
2627 size_t survivor_region_length = young_list->survivor_length(); | 2630 uint survivor_region_length = young_list->survivor_length(); |
2628 size_t eden_region_length = young_list->length() - survivor_region_length; | 2631 uint eden_region_length = young_list->length() - survivor_region_length; |
2629 init_cset_region_lengths(eden_region_length, survivor_region_length); | 2632 init_cset_region_lengths(eden_region_length, survivor_region_length); |
2630 hr = young_list->first_survivor_region(); | 2633 hr = young_list->first_survivor_region(); |
2631 while (hr != NULL) { | 2634 while (hr != NULL) { |
2632 assert(hr->is_survivor(), "badly formed young list"); | 2635 assert(hr->is_survivor(), "badly formed young list"); |
2633 hr->set_young(); | 2636 hr->set_young(); |
2662 non_young_start_time_sec = young_end_time_sec; | 2665 non_young_start_time_sec = young_end_time_sec; |
2663 | 2666 |
2664 if (!gcs_are_young()) { | 2667 if (!gcs_are_young()) { |
2665 CollectionSetChooser* cset_chooser = _collectionSetChooser; | 2668 CollectionSetChooser* cset_chooser = _collectionSetChooser; |
2666 assert(cset_chooser->verify(), "CSet Chooser verification - pre"); | 2669 assert(cset_chooser->verify(), "CSet Chooser verification - pre"); |
2667 const size_t min_old_cset_length = cset_chooser->calcMinOldCSetLength(); | 2670 const uint min_old_cset_length = cset_chooser->calcMinOldCSetLength(); |
2668 const size_t max_old_cset_length = cset_chooser->calcMaxOldCSetLength(); | 2671 const uint max_old_cset_length = cset_chooser->calcMaxOldCSetLength(); |
2669 | 2672 |
2670 size_t expensive_region_num = 0; | 2673 uint expensive_region_num = 0; |
2671 bool check_time_remaining = adaptive_young_list_length(); | 2674 bool check_time_remaining = adaptive_young_list_length(); |
2672 HeapRegion* hr = cset_chooser->peek(); | 2675 HeapRegion* hr = cset_chooser->peek(); |
2673 while (hr != NULL) { | 2676 while (hr != NULL) { |
2674 if (old_cset_region_length() >= max_old_cset_length) { | 2677 if (old_cset_region_length() >= max_old_cset_length) { |
2675 // Added maximum number of old regions to the CSet. | 2678 // Added maximum number of old regions to the CSet. |