Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @ 6011:f7a8920427a6
7145441: G1: collection set chooser-related cleanup
Summary: Cleanup of the CSet chooser class: standardize on uints for region num and indexes (instead of int, jint, etc.), make the method / field naming style more consistent, remove a lot of dead code.
Reviewed-by: johnc, brutisso
author | tonyp |
---|---|
date | Wed, 18 Apr 2012 13:39:55 -0400 |
parents | 720b6a76dd9d |
children | 8a2e5a6a19a4 |
comparison
equal
deleted
inserted
replaced
6010:720b6a76dd9d | 6011:f7a8920427a6 |
---|---|
189 _mixed_pause_num(0), | 189 _mixed_pause_num(0), |
190 | 190 |
191 _during_marking(false), | 191 _during_marking(false), |
192 _in_marking_window(false), | 192 _in_marking_window(false), |
193 _in_marking_window_im(false), | 193 _in_marking_window_im(false), |
194 | |
195 _known_garbage_ratio(0.0), | |
196 _known_garbage_bytes(0), | |
197 | |
198 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)), | |
199 | 194 |
200 _recent_prev_end_times_for_all_gcs_sec( | 195 _recent_prev_end_times_for_all_gcs_sec( |
201 new TruncatedSeq(NumPrevPausesForHeuristics)), | 196 new TruncatedSeq(NumPrevPausesForHeuristics)), |
202 | 197 |
203 _recent_avg_pause_time_ratio(0.0), | 198 _recent_avg_pause_time_ratio(0.0), |
866 // transitions and make sure we start with young GCs after the Full GC. | 861 // transitions and make sure we start with young GCs after the Full GC. |
867 set_gcs_are_young(true); | 862 set_gcs_are_young(true); |
868 _last_young_gc = false; | 863 _last_young_gc = false; |
869 clear_initiate_conc_mark_if_possible(); | 864 clear_initiate_conc_mark_if_possible(); |
870 clear_during_initial_mark_pause(); | 865 clear_during_initial_mark_pause(); |
871 _known_garbage_bytes = 0; | |
872 _known_garbage_ratio = 0.0; | |
873 _in_marking_window = false; | 866 _in_marking_window = false; |
874 _in_marking_window_im = false; | 867 _in_marking_window_im = false; |
875 | 868 |
876 _short_lived_surv_rate_group->start_adding_regions(); | 869 _short_lived_surv_rate_group->start_adding_regions(); |
877 // also call this on any additional surv rate groups | 870 // also call this on any additional surv rate groups |
880 | 873 |
881 _free_regions_at_end_of_collection = _g1->free_regions(); | 874 _free_regions_at_end_of_collection = _g1->free_regions(); |
882 // Reset survivors SurvRateGroup. | 875 // Reset survivors SurvRateGroup. |
883 _survivor_surv_rate_group->reset(); | 876 _survivor_surv_rate_group->reset(); |
884 update_young_list_target_length(); | 877 update_young_list_target_length(); |
885 _collectionSetChooser->clearMarkedHeapRegions(); | 878 _collectionSetChooser->clear(); |
886 } | 879 } |
887 | 880 |
888 void G1CollectorPolicy::record_stop_world_start() { | 881 void G1CollectorPolicy::record_stop_world_start() { |
889 _stop_world_start = os::elapsedTime(); | 882 _stop_world_start = os::elapsedTime(); |
890 } | 883 } |
1454 print_stats(1, buffer, _cur_aux_times_ms[i]); | 1447 print_stats(1, buffer, _cur_aux_times_ms[i]); |
1455 } | 1448 } |
1456 } | 1449 } |
1457 } | 1450 } |
1458 | 1451 |
1459 // Update the efficiency-since-mark vars. | |
1460 double proc_ms = elapsed_ms * (double) _parallel_gc_threads; | |
1461 if (elapsed_ms < MIN_TIMER_GRANULARITY) { | |
1462 // This usually happens due to the timer not having the required | |
1463 // granularity. Some Linuxes are the usual culprits. | |
1464 // We'll just set it to something (arbitrarily) small. | |
1465 proc_ms = 1.0; | |
1466 } | |
1467 double cur_efficiency = (double) freed_bytes / proc_ms; | |
1468 | |
1469 bool new_in_marking_window = _in_marking_window; | 1452 bool new_in_marking_window = _in_marking_window; |
1470 bool new_in_marking_window_im = false; | 1453 bool new_in_marking_window_im = false; |
1471 if (during_initial_mark_pause()) { | 1454 if (during_initial_mark_pause()) { |
1472 new_in_marking_window = true; | 1455 new_in_marking_window = true; |
1473 new_in_marking_window_im = true; | 1456 new_in_marking_window_im = true; |
1496 | 1479 |
1497 if (!next_gc_should_be_mixed("continue mixed GCs", | 1480 if (!next_gc_should_be_mixed("continue mixed GCs", |
1498 "do not continue mixed GCs")) { | 1481 "do not continue mixed GCs")) { |
1499 set_gcs_are_young(true); | 1482 set_gcs_are_young(true); |
1500 } | 1483 } |
1501 } | |
1502 | |
1503 if (_last_gc_was_young && !_during_marking) { | |
1504 _young_gc_eff_seq->add(cur_efficiency); | |
1505 } | 1484 } |
1506 | 1485 |
1507 _short_lived_surv_rate_group->start_adding_regions(); | 1486 _short_lived_surv_rate_group->start_adding_regions(); |
1508 // do that for any other surv rate groupsx | 1487 // do that for any other surv rate groupsx |
1509 | 1488 |
1616 | 1595 |
1617 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. | 1596 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. |
1618 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; | 1597 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; |
1619 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms); | 1598 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms); |
1620 | 1599 |
1621 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end."); | 1600 _collectionSetChooser->verify(); |
1622 } | 1601 } |
1623 | 1602 |
1624 #define EXT_SIZE_FORMAT "%d%s" | 1603 #define EXT_SIZE_FORMAT "%d%s" |
1625 #define EXT_SIZE_PARAMS(bytes) \ | 1604 #define EXT_SIZE_PARAMS(bytes) \ |
1626 byte_size_in_proper_unit((bytes)), \ | 1605 byte_size_in_proper_unit((bytes)), \ |
2063 | 2042 |
2064 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( | 2043 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( |
2065 HeapRegion::GrainWords * _max_survivor_regions); | 2044 HeapRegion::GrainWords * _max_survivor_regions); |
2066 } | 2045 } |
2067 | 2046 |
2068 #ifndef PRODUCT | |
2069 class HRSortIndexIsOKClosure: public HeapRegionClosure { | |
2070 CollectionSetChooser* _chooser; | |
2071 public: | |
2072 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) : | |
2073 _chooser(chooser) {} | |
2074 | |
2075 bool doHeapRegion(HeapRegion* r) { | |
2076 if (!r->continuesHumongous()) { | |
2077 assert(_chooser->regionProperlyOrdered(r), "Ought to be."); | |
2078 } | |
2079 return false; | |
2080 } | |
2081 }; | |
2082 | |
2083 bool G1CollectorPolicy::assertMarkedBytesDataOK() { | |
2084 HRSortIndexIsOKClosure cl(_collectionSetChooser); | |
2085 _g1->heap_region_iterate(&cl); | |
2086 return true; | |
2087 } | |
2088 #endif | |
2089 | |
2090 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle( | 2047 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle( |
2091 GCCause::Cause gc_cause) { | 2048 GCCause::Cause gc_cause) { |
2092 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); | 2049 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); |
2093 if (!during_cycle) { | 2050 if (!during_cycle) { |
2094 ergo_verbose1(ErgoConcCycles, | 2051 ergo_verbose1(ErgoConcCycles, |
2182 // Do we have any marking information for this region? | 2139 // Do we have any marking information for this region? |
2183 if (r->is_marked()) { | 2140 if (r->is_marked()) { |
2184 // We will skip any region that's currently used as an old GC | 2141 // We will skip any region that's currently used as an old GC |
2185 // alloc region (we should not consider those for collection | 2142 // alloc region (we should not consider those for collection |
2186 // before we fill them up). | 2143 // before we fill them up). |
2187 if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) { | 2144 if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { |
2188 _hrSorted->addMarkedHeapRegion(r); | 2145 _hrSorted->add_region(r); |
2189 } | 2146 } |
2190 } | 2147 } |
2191 return false; | 2148 return false; |
2192 } | 2149 } |
2193 }; | 2150 }; |
2194 | 2151 |
2195 class ParKnownGarbageHRClosure: public HeapRegionClosure { | 2152 class ParKnownGarbageHRClosure: public HeapRegionClosure { |
2196 G1CollectedHeap* _g1h; | 2153 G1CollectedHeap* _g1h; |
2197 CollectionSetChooser* _hrSorted; | 2154 CollectionSetChooser* _hrSorted; |
2198 jint _marked_regions_added; | 2155 uint _marked_regions_added; |
2199 size_t _reclaimable_bytes_added; | 2156 size_t _reclaimable_bytes_added; |
2200 jint _chunk_size; | 2157 uint _chunk_size; |
2201 jint _cur_chunk_idx; | 2158 uint _cur_chunk_idx; |
2202 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end) | 2159 uint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end) |
2203 int _worker; | |
2204 int _invokes; | |
2205 | 2160 |
2206 void get_new_chunk() { | 2161 void get_new_chunk() { |
2207 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size); | 2162 _cur_chunk_idx = _hrSorted->claim_array_chunk(_chunk_size); |
2208 _cur_chunk_end = _cur_chunk_idx + _chunk_size; | 2163 _cur_chunk_end = _cur_chunk_idx + _chunk_size; |
2209 } | 2164 } |
2210 void add_region(HeapRegion* r) { | 2165 void add_region(HeapRegion* r) { |
2211 if (_cur_chunk_idx == _cur_chunk_end) { | 2166 if (_cur_chunk_idx == _cur_chunk_end) { |
2212 get_new_chunk(); | 2167 get_new_chunk(); |
2213 } | 2168 } |
2214 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition"); | 2169 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition"); |
2215 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r); | 2170 _hrSorted->set_region(_cur_chunk_idx, r); |
2216 _marked_regions_added++; | 2171 _marked_regions_added++; |
2217 _reclaimable_bytes_added += r->reclaimable_bytes(); | 2172 _reclaimable_bytes_added += r->reclaimable_bytes(); |
2218 _cur_chunk_idx++; | 2173 _cur_chunk_idx++; |
2219 } | 2174 } |
2220 | 2175 |
2221 public: | 2176 public: |
2222 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, | 2177 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, |
2223 jint chunk_size, | 2178 uint chunk_size) : |
2224 int worker) : | |
2225 _g1h(G1CollectedHeap::heap()), | 2179 _g1h(G1CollectedHeap::heap()), |
2226 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker), | 2180 _hrSorted(hrSorted), _chunk_size(chunk_size), |
2227 _marked_regions_added(0), _reclaimable_bytes_added(0), | 2181 _marked_regions_added(0), _reclaimable_bytes_added(0), |
2228 _cur_chunk_idx(0), _cur_chunk_end(0), _invokes(0) { } | 2182 _cur_chunk_idx(0), _cur_chunk_end(0) { } |
2229 | 2183 |
2230 bool doHeapRegion(HeapRegion* r) { | 2184 bool doHeapRegion(HeapRegion* r) { |
2231 // We only include humongous regions in collection | |
2232 // sets when concurrent mark shows that their contained object is | |
2233 // unreachable. | |
2234 _invokes++; | |
2235 | |
2236 // Do we have any marking information for this region? | 2185 // Do we have any marking information for this region? |
2237 if (r->is_marked()) { | 2186 if (r->is_marked()) { |
2238 // We will skip any region that's currently used as an old GC | 2187 // We will skip any region that's currently used as an old GC |
2239 // alloc region (we should not consider those for collection | 2188 // alloc region (we should not consider those for collection |
2240 // before we fill them up). | 2189 // before we fill them up). |
2241 if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) { | 2190 if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { |
2242 add_region(r); | 2191 add_region(r); |
2243 } | 2192 } |
2244 } | 2193 } |
2245 return false; | 2194 return false; |
2246 } | 2195 } |
2247 jint marked_regions_added() { return _marked_regions_added; } | 2196 uint marked_regions_added() { return _marked_regions_added; } |
2248 size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; } | 2197 size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; } |
2249 int invokes() { return _invokes; } | |
2250 }; | 2198 }; |
2251 | 2199 |
2252 class ParKnownGarbageTask: public AbstractGangTask { | 2200 class ParKnownGarbageTask: public AbstractGangTask { |
2253 CollectionSetChooser* _hrSorted; | 2201 CollectionSetChooser* _hrSorted; |
2254 jint _chunk_size; | 2202 uint _chunk_size; |
2255 G1CollectedHeap* _g1; | 2203 G1CollectedHeap* _g1; |
2256 public: | 2204 public: |
2257 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) : | 2205 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) : |
2258 AbstractGangTask("ParKnownGarbageTask"), | 2206 AbstractGangTask("ParKnownGarbageTask"), |
2259 _hrSorted(hrSorted), _chunk_size(chunk_size), | 2207 _hrSorted(hrSorted), _chunk_size(chunk_size), |
2260 _g1(G1CollectedHeap::heap()) { } | 2208 _g1(G1CollectedHeap::heap()) { } |
2261 | 2209 |
2262 void work(uint worker_id) { | 2210 void work(uint worker_id) { |
2263 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, | 2211 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); |
2264 _chunk_size, | 2212 |
2265 worker_id); | |
2266 // Back to zero for the claim value. | 2213 // Back to zero for the claim value. |
2267 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id, | 2214 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id, |
2268 _g1->workers()->active_workers(), | 2215 _g1->workers()->active_workers(), |
2269 HeapRegion::InitialClaimValue); | 2216 HeapRegion::InitialClaimValue); |
2270 jint regions_added = parKnownGarbageCl.marked_regions_added(); | 2217 uint regions_added = parKnownGarbageCl.marked_regions_added(); |
2271 size_t reclaimable_bytes_added = | 2218 size_t reclaimable_bytes_added = |
2272 parKnownGarbageCl.reclaimable_bytes_added(); | 2219 parKnownGarbageCl.reclaimable_bytes_added(); |
2273 _hrSorted->updateTotals(regions_added, reclaimable_bytes_added); | 2220 _hrSorted->update_totals(regions_added, reclaimable_bytes_added); |
2274 if (G1PrintParCleanupStats) { | |
2275 gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.", | |
2276 worker_id, parKnownGarbageCl.invokes(), regions_added); | |
2277 } | |
2278 } | 2221 } |
2279 }; | 2222 }; |
2280 | 2223 |
2281 void | 2224 void |
2282 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { | 2225 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { |
2283 double start_sec; | 2226 _collectionSetChooser->clear(); |
2284 if (G1PrintParCleanupStats) { | |
2285 start_sec = os::elapsedTime(); | |
2286 } | |
2287 | |
2288 _collectionSetChooser->clearMarkedHeapRegions(); | |
2289 double clear_marked_end_sec; | |
2290 if (G1PrintParCleanupStats) { | |
2291 clear_marked_end_sec = os::elapsedTime(); | |
2292 gclog_or_tty->print_cr(" clear marked regions: %8.3f ms.", | |
2293 (clear_marked_end_sec - start_sec) * 1000.0); | |
2294 } | |
2295 | 2227 |
2296 uint region_num = _g1->n_regions(); | 2228 uint region_num = _g1->n_regions(); |
2297 if (G1CollectedHeap::use_parallel_gc_threads()) { | 2229 if (G1CollectedHeap::use_parallel_gc_threads()) { |
2298 const uint OverpartitionFactor = 4; | 2230 const uint OverpartitionFactor = 4; |
2299 uint WorkUnit; | 2231 uint WorkUnit; |
2312 const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U); | 2244 const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U); |
2313 WorkUnit = | 2245 WorkUnit = |
2314 MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor), | 2246 MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor), |
2315 MinWorkUnit); | 2247 MinWorkUnit); |
2316 } | 2248 } |
2317 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(), | 2249 _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(), |
2318 WorkUnit); | 2250 WorkUnit); |
2319 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, | 2251 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, |
2320 (int) WorkUnit); | 2252 (int) WorkUnit); |
2321 _g1->workers()->run_task(&parKnownGarbageTask); | 2253 _g1->workers()->run_task(&parKnownGarbageTask); |
2322 | 2254 |
2323 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue), | 2255 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue), |
2324 "sanity check"); | 2256 "sanity check"); |
2325 } else { | 2257 } else { |
2326 KnownGarbageClosure knownGarbagecl(_collectionSetChooser); | 2258 KnownGarbageClosure knownGarbagecl(_collectionSetChooser); |
2327 _g1->heap_region_iterate(&knownGarbagecl); | 2259 _g1->heap_region_iterate(&knownGarbagecl); |
2328 } | 2260 } |
2329 double known_garbage_end_sec; | 2261 |
2330 if (G1PrintParCleanupStats) { | 2262 _collectionSetChooser->sort_regions(); |
2331 known_garbage_end_sec = os::elapsedTime(); | 2263 |
2332 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.", | |
2333 (known_garbage_end_sec - clear_marked_end_sec) * 1000.0); | |
2334 } | |
2335 | |
2336 _collectionSetChooser->sortMarkedHeapRegions(); | |
2337 double end_sec = os::elapsedTime(); | 2264 double end_sec = os::elapsedTime(); |
2338 if (G1PrintParCleanupStats) { | |
2339 gclog_or_tty->print_cr(" sorting: %8.3f ms.", | |
2340 (end_sec - known_garbage_end_sec) * 1000.0); | |
2341 } | |
2342 | |
2343 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; | 2265 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; |
2344 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); | 2266 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); |
2345 _cur_mark_stop_world_time_ms += elapsed_time_ms; | 2267 _cur_mark_stop_world_time_ms += elapsed_time_ms; |
2346 _prev_collection_pause_end_ms += elapsed_time_ms; | 2268 _prev_collection_pause_end_ms += elapsed_time_ms; |
2347 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true); | 2269 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true); |
2553 #endif // !PRODUCT | 2475 #endif // !PRODUCT |
2554 | 2476 |
2555 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, | 2477 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, |
2556 const char* false_action_str) { | 2478 const char* false_action_str) { |
2557 CollectionSetChooser* cset_chooser = _collectionSetChooser; | 2479 CollectionSetChooser* cset_chooser = _collectionSetChooser; |
2558 if (cset_chooser->isEmpty()) { | 2480 if (cset_chooser->is_empty()) { |
2559 ergo_verbose0(ErgoMixedGCs, | 2481 ergo_verbose0(ErgoMixedGCs, |
2560 false_action_str, | 2482 false_action_str, |
2561 ergo_format_reason("candidate old regions not available")); | 2483 ergo_format_reason("candidate old regions not available")); |
2562 return false; | 2484 return false; |
2563 } | 2485 } |
2564 size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes(); | 2486 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes(); |
2565 size_t capacity_bytes = _g1->capacity(); | 2487 size_t capacity_bytes = _g1->capacity(); |
2566 double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; | 2488 double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; |
2567 double threshold = (double) G1HeapWastePercent; | 2489 double threshold = (double) G1HeapWastePercent; |
2568 if (perc < threshold) { | 2490 if (perc < threshold) { |
2569 ergo_verbose4(ErgoMixedGCs, | 2491 ergo_verbose4(ErgoMixedGCs, |
2570 false_action_str, | 2492 false_action_str, |
2571 ergo_format_reason("reclaimable percentage lower than threshold") | 2493 ergo_format_reason("reclaimable percentage lower than threshold") |
2572 ergo_format_region("candidate old regions") | 2494 ergo_format_region("candidate old regions") |
2573 ergo_format_byte_perc("reclaimable") | 2495 ergo_format_byte_perc("reclaimable") |
2574 ergo_format_perc("threshold"), | 2496 ergo_format_perc("threshold"), |
2575 cset_chooser->remainingRegions(), | 2497 cset_chooser->remaining_regions(), |
2576 reclaimable_bytes, perc, threshold); | 2498 reclaimable_bytes, perc, threshold); |
2577 return false; | 2499 return false; |
2578 } | 2500 } |
2579 | 2501 |
2580 ergo_verbose4(ErgoMixedGCs, | 2502 ergo_verbose4(ErgoMixedGCs, |
2581 true_action_str, | 2503 true_action_str, |
2582 ergo_format_reason("candidate old regions available") | 2504 ergo_format_reason("candidate old regions available") |
2583 ergo_format_region("candidate old regions") | 2505 ergo_format_region("candidate old regions") |
2584 ergo_format_byte_perc("reclaimable") | 2506 ergo_format_byte_perc("reclaimable") |
2585 ergo_format_perc("threshold"), | 2507 ergo_format_perc("threshold"), |
2586 cset_chooser->remainingRegions(), | 2508 cset_chooser->remaining_regions(), |
2587 reclaimable_bytes, perc, threshold); | 2509 reclaimable_bytes, perc, threshold); |
2588 return true; | 2510 return true; |
2589 } | 2511 } |
2590 | 2512 |
2591 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { | 2513 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { |
2664 // We are doing young collections so reset this. | 2586 // We are doing young collections so reset this. |
2665 non_young_start_time_sec = young_end_time_sec; | 2587 non_young_start_time_sec = young_end_time_sec; |
2666 | 2588 |
2667 if (!gcs_are_young()) { | 2589 if (!gcs_are_young()) { |
2668 CollectionSetChooser* cset_chooser = _collectionSetChooser; | 2590 CollectionSetChooser* cset_chooser = _collectionSetChooser; |
2669 assert(cset_chooser->verify(), "CSet Chooser verification - pre"); | 2591 cset_chooser->verify(); |
2670 const uint min_old_cset_length = cset_chooser->calcMinOldCSetLength(); | 2592 const uint min_old_cset_length = cset_chooser->calc_min_old_cset_length(); |
2671 const uint max_old_cset_length = cset_chooser->calcMaxOldCSetLength(); | 2593 const uint max_old_cset_length = cset_chooser->calc_max_old_cset_length(); |
2672 | 2594 |
2673 uint expensive_region_num = 0; | 2595 uint expensive_region_num = 0; |
2674 bool check_time_remaining = adaptive_young_list_length(); | 2596 bool check_time_remaining = adaptive_young_list_length(); |
2675 HeapRegion* hr = cset_chooser->peek(); | 2597 HeapRegion* hr = cset_chooser->peek(); |
2676 while (hr != NULL) { | 2598 while (hr != NULL) { |
2753 expensive_region_num, | 2675 expensive_region_num, |
2754 min_old_cset_length, | 2676 min_old_cset_length, |
2755 time_remaining_ms); | 2677 time_remaining_ms); |
2756 } | 2678 } |
2757 | 2679 |
2758 assert(cset_chooser->verify(), "CSet Chooser verification - post"); | 2680 cset_chooser->verify(); |
2759 } | 2681 } |
2760 | 2682 |
2761 stop_incremental_cset_building(); | 2683 stop_incremental_cset_building(); |
2762 | 2684 |
2763 count_CS_bytes_used(); | 2685 count_CS_bytes_used(); |