comparison src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @ 20336:6701abbc4441

8054818: Refactor HeapRegionSeq to manage heap region and auxiliary data Summary: Let HeapRegionSeq manage the heap region and auxiliary data to decrease the amount of responsibilities of G1CollectedHeap, and encapsulate this work from other code. Reviewed-by: jwilhelm, jmasa, mgerdin, brutisso
author tschatzl
date Tue, 19 Aug 2014 10:50:27 +0200
parents 5d7a63aee595
children c02ec279b062
comparison
equal deleted inserted replaced
20335:eec72fa4b108 20336:6701abbc4441
453 if (adaptive_young_list_length()) { 453 if (adaptive_young_list_length()) {
454 _young_list_fixed_length = 0; 454 _young_list_fixed_length = 0;
455 } else { 455 } else {
456 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); 456 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
457 } 457 }
458 _free_regions_at_end_of_collection = _g1->free_regions(); 458 _free_regions_at_end_of_collection = _g1->num_free_regions();
459 update_young_list_target_length(); 459 update_young_list_target_length();
460 460
461 // We may immediately start allocating regions and placing them on the 461 // We may immediately start allocating regions and placing them on the
462 // collection set list. Initialize the per-collection set info 462 // collection set list. Initialize the per-collection set info
463 start_incremental_cset_building(); 463 start_incremental_cset_building();
826 _short_lived_surv_rate_group->start_adding_regions(); 826 _short_lived_surv_rate_group->start_adding_regions();
827 // also call this on any additional surv rate groups 827 // also call this on any additional surv rate groups
828 828
829 record_survivor_regions(0, NULL, NULL); 829 record_survivor_regions(0, NULL, NULL);
830 830
831 _free_regions_at_end_of_collection = _g1->free_regions(); 831 _free_regions_at_end_of_collection = _g1->num_free_regions();
832 // Reset survivors SurvRateGroup. 832 // Reset survivors SurvRateGroup.
833 _survivor_surv_rate_group->reset(); 833 _survivor_surv_rate_group->reset();
834 update_young_list_target_length(); 834 update_young_list_target_length();
835 _collectionSetChooser->clear(); 835 _collectionSetChooser->clear();
836 } 836 }
1178 _rs_lengths_seq->add((double) _max_rs_lengths); 1178 _rs_lengths_seq->add((double) _max_rs_lengths);
1179 } 1179 }
1180 1180
1181 _in_marking_window = new_in_marking_window; 1181 _in_marking_window = new_in_marking_window;
1182 _in_marking_window_im = new_in_marking_window_im; 1182 _in_marking_window_im = new_in_marking_window_im;
1183 _free_regions_at_end_of_collection = _g1->free_regions(); 1183 _free_regions_at_end_of_collection = _g1->num_free_regions();
1184 update_young_list_target_length(); 1184 update_young_list_target_length();
1185 1185
1186 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 1186 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1187 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 1187 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1188 adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(), 1188 adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
1200 YoungList* young_list = _g1->young_list(); 1200 YoungList* young_list = _g1->young_list();
1201 _eden_used_bytes_before_gc = young_list->eden_used_bytes(); 1201 _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1202 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes(); 1202 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1203 _heap_capacity_bytes_before_gc = _g1->capacity(); 1203 _heap_capacity_bytes_before_gc = _g1->capacity();
1204 _heap_used_bytes_before_gc = _g1->used(); 1204 _heap_used_bytes_before_gc = _g1->used();
1205 _cur_collection_pause_used_regions_at_start = _g1->used_regions(); 1205 _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
1206 1206
1207 _eden_capacity_bytes_before_gc = 1207 _eden_capacity_bytes_before_gc =
1208 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc; 1208 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
1209 1209
1210 if (full) { 1210 if (full) {
1615 1615
1616 void 1616 void
1617 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { 1617 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
1618 _collectionSetChooser->clear(); 1618 _collectionSetChooser->clear();
1619 1619
1620 uint region_num = _g1->n_regions(); 1620 uint region_num = _g1->num_regions();
1621 if (G1CollectedHeap::use_parallel_gc_threads()) { 1621 if (G1CollectedHeap::use_parallel_gc_threads()) {
1622 const uint OverpartitionFactor = 4; 1622 const uint OverpartitionFactor = 4;
1623 uint WorkUnit; 1623 uint WorkUnit;
1624 // The use of MinChunkSize = 8 in the original code 1624 // The use of MinChunkSize = 8 in the original code
1625 // causes some assertion failures when the total number of 1625 // causes some assertion failures when the total number of
1636 const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U); 1636 const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
1637 WorkUnit = 1637 WorkUnit =
1638 MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor), 1638 MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
1639 MinWorkUnit); 1639 MinWorkUnit);
1640 } 1640 }
1641 _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(), 1641 _collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
1642 WorkUnit); 1642 WorkUnit);
1643 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, 1643 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
1644 (int) WorkUnit); 1644 (int) WorkUnit);
1645 _g1->workers()->run_task(&parKnownGarbageTask); 1645 _g1->workers()->run_task(&parKnownGarbageTask);
1646 1646
1933 // as a percentage of the heap size. I.e., it should bound the 1933 // as a percentage of the heap size. I.e., it should bound the
1934 // number of old regions added to the CSet irrespective of how many 1934 // number of old regions added to the CSet irrespective of how many
1935 // of them are available. 1935 // of them are available.
1936 1936
1937 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1937 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1938 const size_t region_num = g1h->n_regions(); 1938 const size_t region_num = g1h->num_regions();
1939 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 1939 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
1940 size_t result = region_num * perc / 100; 1940 size_t result = region_num * perc / 100;
1941 // emulate ceiling 1941 // emulate ceiling
1942 if (100 * result < region_num * perc) { 1942 if (100 * result < region_num * perc) {
1943 result += 1; 1943 result += 1;