comparison src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @ 342:37f87013dfd8

6711316: Open source the Garbage-First garbage collector Summary: First mercurial integration of the code for the Garbage-First garbage collector. Reviewed-by: apetrusenko, iveresov, jmasa, sgoldman, tonyp, ysr
author ysr
date Thu, 05 Jun 2008 15:57:56 -0700
parents
children 0edda524b58c
comparison
equal deleted inserted replaced
189:0b27f3512f9e 342:37f87013dfd8
1 /*
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_g1CollectorPolicy.cpp.incl"
27
28 #define PREDICTIONS_VERBOSE 0
29
30 // <NEW PREDICTION>
31
32 // Different defaults for different number of GC threads
33 // They were chosen by running GCOld and SPECjbb on debris with different
34 // numbers of GC threads and choosing them based on the results
35
36 // all the same
37 static double rs_length_diff_defaults[] = {
38 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
39 };
40
41 static double cost_per_card_ms_defaults[] = {
42 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
43 };
44
45 static double cost_per_scan_only_region_ms_defaults[] = {
46 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
47 };
48
49 // all the same
50 static double fully_young_cards_per_entry_ratio_defaults[] = {
51 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
52 };
53
54 static double cost_per_entry_ms_defaults[] = {
55 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
56 };
57
58 static double cost_per_byte_ms_defaults[] = {
59 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
60 };
61
62 // these should be pretty consistent
63 static double constant_other_time_ms_defaults[] = {
64 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
65 };
66
67
68 static double young_other_cost_per_region_ms_defaults[] = {
69 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
70 };
71
72 static double non_young_other_cost_per_region_ms_defaults[] = {
73 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
74 };
75
76 // </NEW PREDICTION>
77
78 G1CollectorPolicy::G1CollectorPolicy() :
79 _parallel_gc_threads((ParallelGCThreads > 0) ? ParallelGCThreads : 1),
80 _n_pauses(0),
81 _recent_CH_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
82 _recent_G1_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
83 _recent_evac_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
84 _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
85 _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
86 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
87 _all_pause_times_ms(new NumberSeq()),
88 _stop_world_start(0.0),
89 _all_stop_world_times_ms(new NumberSeq()),
90 _all_yield_times_ms(new NumberSeq()),
91
92 _all_mod_union_times_ms(new NumberSeq()),
93
94 _non_pop_summary(new NonPopSummary()),
95 _pop_summary(new PopSummary()),
96 _non_pop_abandoned_summary(new NonPopAbandonedSummary()),
97 _pop_abandoned_summary(new PopAbandonedSummary()),
98
99 _cur_clear_ct_time_ms(0.0),
100
101 _region_num_young(0),
102 _region_num_tenured(0),
103 _prev_region_num_young(0),
104 _prev_region_num_tenured(0),
105
106 _aux_num(10),
107 _all_aux_times_ms(new NumberSeq[_aux_num]),
108 _cur_aux_start_times_ms(new double[_aux_num]),
109 _cur_aux_times_ms(new double[_aux_num]),
110 _cur_aux_times_set(new bool[_aux_num]),
111
112 _pop_compute_rc_start(0.0),
113 _pop_evac_start(0.0),
114
115 _concurrent_mark_init_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
116 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
117 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
118
119 // <NEW PREDICTION>
120
121 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
122 _prev_collection_pause_end_ms(0.0),
123 _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
124 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
125 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
126 _cost_per_scan_only_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
127 _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
128 _partially_young_cards_per_entry_ratio_seq(
129 new TruncatedSeq(TruncatedSeqLength)),
130 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
131 _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
132 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
133 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
134 _cost_per_scan_only_region_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
135 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
136 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
137 _non_young_other_cost_per_region_ms_seq(
138 new TruncatedSeq(TruncatedSeqLength)),
139
140 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
141 _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
142 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
143
144 _pause_time_target_ms((double) G1MaxPauseTimeMS),
145
146 // </NEW PREDICTION>
147
148 _in_young_gc_mode(false),
149 _full_young_gcs(true),
150 _full_young_pause_num(0),
151 _partial_young_pause_num(0),
152
153 _during_marking(false),
154 _in_marking_window(false),
155 _in_marking_window_im(false),
156
157 _known_garbage_ratio(0.0),
158 _known_garbage_bytes(0),
159
160 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
161 _target_pause_time_ms(-1.0),
162
163 _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
164
165 _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
166 _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
167
168 _recent_avg_pause_time_ratio(0.0),
169 _num_markings(0),
170 _n_marks(0),
171 _n_pauses_at_mark_end(0),
172
173 _all_full_gc_times_ms(new NumberSeq()),
174
175 _conc_refine_enabled(0),
176 _conc_refine_zero_traversals(0),
177 _conc_refine_max_traversals(0),
178 _conc_refine_current_delta(G1ConcRefineInitialDelta),
179
180 // G1PausesBtwnConcMark defaults to -1
181 // so the hack is to do the cast QQQ FIXME
182 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
183 _n_marks_since_last_pause(0),
184 _conc_mark_initiated(false),
185 _should_initiate_conc_mark(false),
186 _should_revert_to_full_young_gcs(false),
187 _last_full_young_gc(false),
188
189 _prev_collection_pause_used_at_end_bytes(0),
190
191 _collection_set(NULL),
192 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
193 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
194 #endif // _MSC_VER
195
196 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
197 G1YoungSurvRateNumRegionsSummary)),
198 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
199 G1YoungSurvRateNumRegionsSummary))
200 // add here any more surv rate groups
201 {
202 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
203 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
204
205 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
206 _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
207 _par_last_scan_only_times_ms = new double[_parallel_gc_threads];
208 _par_last_scan_only_regions_scanned = new double[_parallel_gc_threads];
209
210 _par_last_update_rs_start_times_ms = new double[_parallel_gc_threads];
211 _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
212 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
213
214 _par_last_scan_rs_start_times_ms = new double[_parallel_gc_threads];
215 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
216 _par_last_scan_new_refs_times_ms = new double[_parallel_gc_threads];
217
218 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
219
220 _par_last_termination_times_ms = new double[_parallel_gc_threads];
221
222 // we store the data from the first pass during popularity pauses
223 _pop_par_last_update_rs_start_times_ms = new double[_parallel_gc_threads];
224 _pop_par_last_update_rs_times_ms = new double[_parallel_gc_threads];
225 _pop_par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
226
227 _pop_par_last_scan_rs_start_times_ms = new double[_parallel_gc_threads];
228 _pop_par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
229
230 _pop_par_last_closure_app_times_ms = new double[_parallel_gc_threads];
231
232 // start conservatively
233 _expensive_region_limit_ms = 0.5 * (double) G1MaxPauseTimeMS;
234
235 // <NEW PREDICTION>
236
237 int index;
238 if (ParallelGCThreads == 0)
239 index = 0;
240 else if (ParallelGCThreads > 8)
241 index = 7;
242 else
243 index = ParallelGCThreads - 1;
244
245 _pending_card_diff_seq->add(0.0);
246 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
247 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
248 _cost_per_scan_only_region_ms_seq->add(
249 cost_per_scan_only_region_ms_defaults[index]);
250 _fully_young_cards_per_entry_ratio_seq->add(
251 fully_young_cards_per_entry_ratio_defaults[index]);
252 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
253 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
254 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
255 _young_other_cost_per_region_ms_seq->add(
256 young_other_cost_per_region_ms_defaults[index]);
257 _non_young_other_cost_per_region_ms_seq->add(
258 non_young_other_cost_per_region_ms_defaults[index]);
259
260 // </NEW PREDICTION>
261
262 double time_slice = (double) G1TimeSliceMS / 1000.0;
263 double max_gc_time = (double) G1MaxPauseTimeMS / 1000.0;
264 guarantee(max_gc_time < time_slice,
265 "Max GC time should not be greater than the time slice");
266 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
267 _sigma = (double) G1ConfidencePerc / 100.0;
268
269 // start conservatively (around 50ms is about right)
270 _concurrent_mark_init_times_ms->add(0.05);
271 _concurrent_mark_remark_times_ms->add(0.05);
272 _concurrent_mark_cleanup_times_ms->add(0.20);
273 _tenuring_threshold = MaxTenuringThreshold;
274
275 initialize_all();
276 }
277
278 // Increment "i", mod "len"
279 static void inc_mod(int& i, int len) {
280 i++; if (i == len) i = 0;
281 }
282
283 void G1CollectorPolicy::initialize_flags() {
284 set_min_alignment(HeapRegion::GrainBytes);
285 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
286 CollectorPolicy::initialize_flags();
287 }
288
289 void G1CollectorPolicy::init() {
290 // Set aside an initial future to_space.
291 _g1 = G1CollectedHeap::heap();
292 size_t regions = Universe::heap()->capacity() / HeapRegion::GrainBytes;
293
294 assert(Heap_lock->owned_by_self(), "Locking discipline.");
295
296 if (G1SteadyStateUsed < 50) {
297 vm_exit_during_initialization("G1SteadyStateUsed must be at least 50%.");
298 }
299 if (UseConcMarkSweepGC) {
300 vm_exit_during_initialization("-XX:+UseG1GC is incompatible with "
301 "-XX:+UseConcMarkSweepGC.");
302 }
303
304 if (G1Gen) {
305 _in_young_gc_mode = true;
306
307 if (G1YoungGenSize == 0) {
308 set_adaptive_young_list_length(true);
309 _young_list_fixed_length = 0;
310 } else {
311 set_adaptive_young_list_length(false);
312 _young_list_fixed_length = (G1YoungGenSize / HeapRegion::GrainBytes);
313 }
314 _free_regions_at_end_of_collection = _g1->free_regions();
315 _scan_only_regions_at_end_of_collection = 0;
316 calculate_young_list_min_length();
317 guarantee( _young_list_min_length == 0, "invariant, not enough info" );
318 calculate_young_list_target_config();
319 } else {
320 _young_list_fixed_length = 0;
321 _in_young_gc_mode = false;
322 }
323 }
324
325 void G1CollectorPolicy::calculate_young_list_min_length() {
326 _young_list_min_length = 0;
327
328 if (!adaptive_young_list_length())
329 return;
330
331 if (_alloc_rate_ms_seq->num() > 3) {
332 double now_sec = os::elapsedTime();
333 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
334 double alloc_rate_ms = predict_alloc_rate_ms();
335 int min_regions = (int) ceil(alloc_rate_ms * when_ms);
336 int current_region_num = (int) _g1->young_list_length();
337 _young_list_min_length = min_regions + current_region_num;
338 }
339 }
340
341 void G1CollectorPolicy::calculate_young_list_target_config() {
342 if (adaptive_young_list_length()) {
343 size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
344 calculate_young_list_target_config(rs_lengths);
345 } else {
346 if (full_young_gcs())
347 _young_list_target_length = _young_list_fixed_length;
348 else
349 _young_list_target_length = _young_list_fixed_length / 2;
350 _young_list_target_length = MAX2(_young_list_target_length, (size_t)1);
351 size_t so_length = calculate_optimal_so_length(_young_list_target_length);
352 guarantee( so_length < _young_list_target_length, "invariant" );
353 _young_list_so_prefix_length = so_length;
354 }
355 }
356
357 // This method calculate the optimal scan-only set for a fixed young
358 // gen size. I couldn't work out how to reuse the more elaborate one,
359 // i.e. calculate_young_list_target_config(rs_length), as the loops are
360 // fundamentally different (the other one finds a config for different
361 // S-O lengths, whereas here we need to do the opposite).
362 size_t G1CollectorPolicy::calculate_optimal_so_length(
363 size_t young_list_length) {
364 if (!G1UseScanOnlyPrefix)
365 return 0;
366
367 if (_all_pause_times_ms->num() < 3) {
368 // we won't use a scan-only set at the beginning to allow the rest
369 // of the predictors to warm up
370 return 0;
371 }
372
373 if (_cost_per_scan_only_region_ms_seq->num() < 3) {
374 // then, we'll only set the S-O set to 1 for a little bit of time,
375 // to get enough information on the scanning cost
376 return 1;
377 }
378
379 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
380 size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
381 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
382 size_t scanned_cards;
383 if (full_young_gcs())
384 scanned_cards = predict_young_card_num(adj_rs_lengths);
385 else
386 scanned_cards = predict_non_young_card_num(adj_rs_lengths);
387 double base_time_ms = predict_base_elapsed_time_ms(pending_cards,
388 scanned_cards);
389
390 size_t so_length = 0;
391 double max_gc_eff = 0.0;
392 for (size_t i = 0; i < young_list_length; ++i) {
393 double gc_eff = 0.0;
394 double pause_time_ms = 0.0;
395 predict_gc_eff(young_list_length, i, base_time_ms,
396 &gc_eff, &pause_time_ms);
397 if (gc_eff > max_gc_eff) {
398 max_gc_eff = gc_eff;
399 so_length = i;
400 }
401 }
402
403 // set it to 95% of the optimal to make sure we sample the "area"
404 // around the optimal length to get up-to-date survival rate data
405 return so_length * 950 / 1000;
406 }
407
408 // This is a really cool piece of code! It finds the best
409 // target configuration (young length / scan-only prefix length) so
410 // that GC efficiency is maximized and that we also meet a pause
411 // time. It's a triple nested loop. These loops are explained below
412 // from the inside-out :-)
413 //
414 // (a) The innermost loop will try to find the optimal young length
415 // for a fixed S-O length. It uses a binary search to speed up the
416 // process. We assume that, for a fixed S-O length, as we add more
417 // young regions to the CSet, the GC efficiency will only go up (I'll
418 // skip the proof). So, using a binary search to optimize this process
419 // makes perfect sense.
420 //
421 // (b) The middle loop will fix the S-O length before calling the
422 // innermost one. It will vary it between two parameters, increasing
423 // it by a given increment.
424 //
425 // (c) The outermost loop will call the middle loop three times.
426 // (1) The first time it will explore all possible S-O length values
427 // from 0 to as large as it can get, using a coarse increment (to
428 // quickly "home in" to where the optimal seems to be).
429 // (2) The second time it will explore the values around the optimal
430 // that was found by the first iteration using a fine increment.
431 // (3) Once the optimal config has been determined by the second
432 // iteration, we'll redo the calculation, but setting the S-O length
433 // to 95% of the optimal to make sure we sample the "area"
434 // around the optimal length to get up-to-date survival rate data
435 //
436 // Termination conditions for the iterations are several: the pause
437 // time is over the limit, we do not have enough to-space, etc.
438
439 void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
440 guarantee( adaptive_young_list_length(), "pre-condition" );
441
442 double start_time_sec = os::elapsedTime();
443 size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1MinReservePerc);
444 min_reserve_perc = MIN2((size_t) 50, min_reserve_perc);
445 size_t reserve_regions =
446 (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0);
447
448 if (full_young_gcs() && _free_regions_at_end_of_collection > 0) {
449 // we are in fully-young mode and there are free regions in the heap
450
451 size_t min_so_length = 0;
452 size_t max_so_length = 0;
453
454 if (G1UseScanOnlyPrefix) {
455 if (_all_pause_times_ms->num() < 3) {
456 // we won't use a scan-only set at the beginning to allow the rest
457 // of the predictors to warm up
458 min_so_length = 0;
459 max_so_length = 0;
460 } else if (_cost_per_scan_only_region_ms_seq->num() < 3) {
461 // then, we'll only set the S-O set to 1 for a little bit of time,
462 // to get enough information on the scanning cost
463 min_so_length = 1;
464 max_so_length = 1;
465 } else if (_in_marking_window || _last_full_young_gc) {
466 // no S-O prefix during a marking phase either, as at the end
467 // of the marking phase we'll have to use a very small young
468 // length target to fill up the rest of the CSet with
469 // non-young regions and, if we have lots of scan-only regions
470 // left-over, we will not be able to add any more non-young
471 // regions.
472 min_so_length = 0;
473 max_so_length = 0;
474 } else {
475 // this is the common case; we'll never reach the maximum, we
476 // one of the end conditions will fire well before that
477 // (hopefully!)
478 min_so_length = 0;
479 max_so_length = _free_regions_at_end_of_collection - 1;
480 }
481 } else {
482 // no S-O prefix, as the switch is not set, but we still need to
483 // do one iteration to calculate the best young target that
484 // meets the pause time; this way we reuse the same code instead
485 // of replicating it
486 min_so_length = 0;
487 max_so_length = 0;
488 }
489
490 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
491 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
492 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
493 size_t scanned_cards;
494 if (full_young_gcs())
495 scanned_cards = predict_young_card_num(adj_rs_lengths);
496 else
497 scanned_cards = predict_non_young_card_num(adj_rs_lengths);
498 // calculate this once, so that we don't have to recalculate it in
499 // the innermost loop
500 double base_time_ms = predict_base_elapsed_time_ms(pending_cards,
501 scanned_cards);
502
503 // the result
504 size_t final_young_length = 0;
505 size_t final_so_length = 0;
506 double final_gc_eff = 0.0;
507 // we'll also keep track of how many times we go into the inner loop
508 // this is for profiling reasons
509 size_t calculations = 0;
510
511 // this determines which of the three iterations the outer loop is in
512 typedef enum {
513 pass_type_coarse,
514 pass_type_fine,
515 pass_type_final
516 } pass_type_t;
517
518 // range of the outer loop's iteration
519 size_t from_so_length = min_so_length;
520 size_t to_so_length = max_so_length;
521 guarantee( from_so_length <= to_so_length, "invariant" );
522
523 // this will keep the S-O length that's found by the second
524 // iteration of the outer loop; we'll keep it just in case the third
525 // iteration fails to find something
526 size_t fine_so_length = 0;
527
528 // the increment step for the coarse (first) iteration
529 size_t so_coarse_increments = 5;
530
531 // the common case, we'll start with the coarse iteration
532 pass_type_t pass = pass_type_coarse;
533 size_t so_length_incr = so_coarse_increments;
534
535 if (from_so_length == to_so_length) {
536 // not point in doing the coarse iteration, we'll go directly into
537 // the fine one (we essentially trying to find the optimal young
538 // length for a fixed S-O length).
539 so_length_incr = 1;
540 pass = pass_type_final;
541 } else if (to_so_length - from_so_length < 3 * so_coarse_increments) {
542 // again, the range is too short so no point in foind the coarse
543 // iteration either
544 so_length_incr = 1;
545 pass = pass_type_fine;
546 }
547
548 bool done = false;
549 // this is the outermost loop
550 while (!done) {
551 #if 0
552 // leave this in for debugging, just in case
553 gclog_or_tty->print_cr("searching between " SIZE_FORMAT " and " SIZE_FORMAT
554 ", incr " SIZE_FORMAT ", pass %s",
555 from_so_length, to_so_length, so_length_incr,
556 (pass == pass_type_coarse) ? "coarse" :
557 (pass == pass_type_fine) ? "fine" : "final");
558 #endif // 0
559
560 size_t so_length = from_so_length;
561 size_t init_free_regions =
562 MAX2((size_t)0,
563 _free_regions_at_end_of_collection +
564 _scan_only_regions_at_end_of_collection - reserve_regions);
565
566 // this determines whether a configuration was found
567 bool gc_eff_set = false;
568 // this is the middle loop
569 while (so_length <= to_so_length) {
570 // base time, which excludes region-related time; again we
571 // calculate it once to avoid recalculating it in the
572 // innermost loop
573 double base_time_with_so_ms =
574 base_time_ms + predict_scan_only_time_ms(so_length);
575 // it's already over the pause target, go around
576 if (base_time_with_so_ms > target_pause_time_ms)
577 break;
578
579 size_t starting_young_length = so_length+1;
580
581 // we make sure that the short young length that makes sense
582 // (one more than the S-O length) is feasible
583 size_t min_young_length = starting_young_length;
584 double min_gc_eff;
585 bool min_ok;
586 ++calculations;
587 min_ok = predict_gc_eff(min_young_length, so_length,
588 base_time_with_so_ms,
589 init_free_regions, target_pause_time_ms,
590 &min_gc_eff);
591
592 if (min_ok) {
593 // the shortest young length is indeed feasible; we'll know
594 // set up the max young length and we'll do a binary search
595 // between min_young_length and max_young_length
596 size_t max_young_length = _free_regions_at_end_of_collection - 1;
597 double max_gc_eff = 0.0;
598 bool max_ok = false;
599
600 // the innermost loop! (finally!)
601 while (max_young_length > min_young_length) {
602 // we'll make sure that min_young_length is always at a
603 // feasible config
604 guarantee( min_ok, "invariant" );
605
606 ++calculations;
607 max_ok = predict_gc_eff(max_young_length, so_length,
608 base_time_with_so_ms,
609 init_free_regions, target_pause_time_ms,
610 &max_gc_eff);
611
612 size_t diff = (max_young_length - min_young_length) / 2;
613 if (max_ok) {
614 min_young_length = max_young_length;
615 min_gc_eff = max_gc_eff;
616 min_ok = true;
617 }
618 max_young_length = min_young_length + diff;
619 }
620
621 // the innermost loop found a config
622 guarantee( min_ok, "invariant" );
623 if (min_gc_eff > final_gc_eff) {
624 // it's the best config so far, so we'll keep it
625 final_gc_eff = min_gc_eff;
626 final_young_length = min_young_length;
627 final_so_length = so_length;
628 gc_eff_set = true;
629 }
630 }
631
632 // incremental the fixed S-O length and go around
633 so_length += so_length_incr;
634 }
635
636 // this is the end of the outermost loop and we need to decide
637 // what to do during the next iteration
638 if (pass == pass_type_coarse) {
639 // we just did the coarse pass (first iteration)
640
641 if (!gc_eff_set)
642 // we didn't find a feasible config so we'll just bail out; of
643 // course, it might be the case that we missed it; but I'd say
644 // it's a bit unlikely
645 done = true;
646 else {
647 // We did find a feasible config with optimal GC eff during
648 // the first pass. So the second pass we'll only consider the
649 // S-O lengths around that config with a fine increment.
650
651 guarantee( so_length_incr == so_coarse_increments, "invariant" );
652 guarantee( final_so_length >= min_so_length, "invariant" );
653
654 #if 0
655 // leave this in for debugging, just in case
656 gclog_or_tty->print_cr(" coarse pass: SO length " SIZE_FORMAT,
657 final_so_length);
658 #endif // 0
659
660 from_so_length =
661 (final_so_length - min_so_length > so_coarse_increments) ?
662 final_so_length - so_coarse_increments + 1 : min_so_length;
663 to_so_length =
664 (max_so_length - final_so_length > so_coarse_increments) ?
665 final_so_length + so_coarse_increments - 1 : max_so_length;
666
667 pass = pass_type_fine;
668 so_length_incr = 1;
669 }
670 } else if (pass == pass_type_fine) {
671 // we just finished the second pass
672
673 if (!gc_eff_set) {
674 // we didn't find a feasible config (yes, it's possible;
675 // notice that, sometimes, we go directly into the fine
676 // iteration and skip the coarse one) so we bail out
677 done = true;
678 } else {
679 // We did find a feasible config with optimal GC eff
680 guarantee( so_length_incr == 1, "invariant" );
681
682 if (final_so_length == 0) {
683 // The config is of an empty S-O set, so we'll just bail out
684 done = true;
685 } else {
686 // we'll go around once more, setting the S-O length to 95%
687 // of the optimal
688 size_t new_so_length = 950 * final_so_length / 1000;
689
690 #if 0
691 // leave this in for debugging, just in case
692 gclog_or_tty->print_cr(" fine pass: SO length " SIZE_FORMAT
693 ", setting it to " SIZE_FORMAT,
694 final_so_length, new_so_length);
695 #endif // 0
696
697 from_so_length = new_so_length;
698 to_so_length = new_so_length;
699 fine_so_length = final_so_length;
700
701 pass = pass_type_final;
702 }
703 }
704 } else if (pass == pass_type_final) {
705 // we just finished the final (third) pass
706
707 if (!gc_eff_set)
708 // we didn't find a feasible config, so we'll just use the one
709 // we found during the second pass, which we saved
710 final_so_length = fine_so_length;
711
712 // and we're done!
713 done = true;
714 } else {
715 guarantee( false, "should never reach here" );
716 }
717
718 // we now go around the outermost loop
719 }
720
721 // we should have at least one region in the target young length
722 _young_list_target_length = MAX2((size_t) 1, final_young_length);
723 if (final_so_length >= final_young_length)
724 // and we need to ensure that the S-O length is not greater than
725 // the target young length (this is being a bit careful)
726 final_so_length = 0;
727 _young_list_so_prefix_length = final_so_length;
728 guarantee( !_in_marking_window || !_last_full_young_gc ||
729 _young_list_so_prefix_length == 0, "invariant" );
730
731 // let's keep an eye of how long we spend on this calculation
732 // right now, I assume that we'll print it when we need it; we
733 // should really adde it to the breakdown of a pause
734 double end_time_sec = os::elapsedTime();
735 double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
736
737 #if 0
738 // leave this in for debugging, just in case
739 gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT
740 ", SO = " SIZE_FORMAT ", "
741 "elapsed %1.2lf ms, calcs: " SIZE_FORMAT " (%s%s) "
742 SIZE_FORMAT SIZE_FORMAT,
743 target_pause_time_ms,
744 _young_list_target_length - _young_list_so_prefix_length,
745 _young_list_so_prefix_length,
746 elapsed_time_ms,
747 calculations,
748 full_young_gcs() ? "full" : "partial",
749 should_initiate_conc_mark() ? " i-m" : "",
750 in_marking_window(),
751 in_marking_window_im());
752 #endif // 0
753
754 if (_young_list_target_length < _young_list_min_length) {
755 // bummer; this means that, if we do a pause when the optimal
756 // config dictates, we'll violate the pause spacing target (the
757 // min length was calculate based on the application's current
758 // alloc rate);
759
760 // so, we have to bite the bullet, and allocate the minimum
761 // number. We'll violate our target, but we just can't meet it.
762
763 size_t so_length = 0;
764 // a note further up explains why we do not want an S-O length
765 // during marking
766 if (!_in_marking_window && !_last_full_young_gc)
767 // but we can still try to see whether we can find an optimal
768 // S-O length
769 so_length = calculate_optimal_so_length(_young_list_min_length);
770
771 #if 0
772 // leave this in for debugging, just in case
773 gclog_or_tty->print_cr("adjusted target length from "
774 SIZE_FORMAT " to " SIZE_FORMAT
775 ", SO " SIZE_FORMAT,
776 _young_list_target_length, _young_list_min_length,
777 so_length);
778 #endif // 0
779
780 _young_list_target_length =
781 MAX2(_young_list_min_length, (size_t)1);
782 _young_list_so_prefix_length = so_length;
783 }
784 } else {
785 // we are in a partially-young mode or we've run out of regions (due
786 // to evacuation failure)
787
788 #if 0
789 // leave this in for debugging, just in case
790 gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
791 ", SO " SIZE_FORMAT,
792 _young_list_min_length, 0);
793 #endif // 0
794
795 // we'll do the pause as soon as possible and with no S-O prefix
796 // (see above for the reasons behind the latter)
797 _young_list_target_length =
798 MAX2(_young_list_min_length, (size_t) 1);
799 _young_list_so_prefix_length = 0;
800 }
801
802 _rs_lengths_prediction = rs_lengths;
803 }
804
805 // This is used by: calculate_optimal_so_length(length). It returns
806 // the GC eff and predicted pause time for a particular config
807 void
808 G1CollectorPolicy::predict_gc_eff(size_t young_length,
809 size_t so_length,
810 double base_time_ms,
811 double* ret_gc_eff,
812 double* ret_pause_time_ms) {
813 double so_time_ms = predict_scan_only_time_ms(so_length);
814 double accum_surv_rate_adj = 0.0;
815 if (so_length > 0)
816 accum_surv_rate_adj = accum_yg_surv_rate_pred((int)(so_length - 1));
817 double accum_surv_rate =
818 accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
819 size_t bytes_to_copy =
820 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
821 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
822 double young_other_time_ms =
823 predict_young_other_time_ms(young_length - so_length);
824 double pause_time_ms =
825 base_time_ms + so_time_ms + copy_time_ms + young_other_time_ms;
826 size_t reclaimed_bytes =
827 (young_length - so_length) * HeapRegion::GrainBytes - bytes_to_copy;
828 double gc_eff = (double) reclaimed_bytes / pause_time_ms;
829
830 *ret_gc_eff = gc_eff;
831 *ret_pause_time_ms = pause_time_ms;
832 }
833
834 // This is used by: calculate_young_list_target_config(rs_length). It
835 // returns the GC eff of a particular config. It returns false if that
836 // config violates any of the end conditions of the search in the
837 // calling method, or true upon success. The end conditions were put
838 // here since it's called twice and it was best not to replicate them
839 // in the caller. Also, passing the parameteres avoids having to
840 // recalculate them in the innermost loop.
841 bool
842 G1CollectorPolicy::predict_gc_eff(size_t young_length,
843 size_t so_length,
844 double base_time_with_so_ms,
845 size_t init_free_regions,
846 double target_pause_time_ms,
847 double* ret_gc_eff) {
848 *ret_gc_eff = 0.0;
849
850 if (young_length >= init_free_regions)
851 // end condition 1: not enough space for the young regions
852 return false;
853
854 double accum_surv_rate_adj = 0.0;
855 if (so_length > 0)
856 accum_surv_rate_adj = accum_yg_surv_rate_pred((int)(so_length - 1));
857 double accum_surv_rate =
858 accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
859 size_t bytes_to_copy =
860 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
861 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
862 double young_other_time_ms =
863 predict_young_other_time_ms(young_length - so_length);
864 double pause_time_ms =
865 base_time_with_so_ms + copy_time_ms + young_other_time_ms;
866
867 if (pause_time_ms > target_pause_time_ms)
868 // end condition 2: over the target pause time
869 return false;
870
871 size_t reclaimed_bytes =
872 (young_length - so_length) * HeapRegion::GrainBytes - bytes_to_copy;
873 size_t free_bytes =
874 (init_free_regions - young_length) * HeapRegion::GrainBytes;
875
876 if ((2.0 + sigma()) * (double) bytes_to_copy > (double) free_bytes)
877 // end condition 3: out of to-space (conservatively)
878 return false;
879
880 // success!
881 double gc_eff = (double) reclaimed_bytes / pause_time_ms;
882 *ret_gc_eff = gc_eff;
883
884 return true;
885 }
886
887 void G1CollectorPolicy::check_prediction_validity() {
888 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
889
890 size_t rs_lengths = _g1->young_list_sampled_rs_lengths();
891 if (rs_lengths > _rs_lengths_prediction) {
892 // add 10% to avoid having to recalculate often
893 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
894 calculate_young_list_target_config(rs_lengths_prediction);
895 }
896 }
897
898 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
899 bool is_tlab,
900 bool* gc_overhead_limit_was_exceeded) {
901 guarantee(false, "Not using this policy feature yet.");
902 return NULL;
903 }
904
905 // This method controls how a collector handles one or more
906 // of its generations being fully allocated.
907 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
908 bool is_tlab) {
909 guarantee(false, "Not using this policy feature yet.");
910 return NULL;
911 }
912
913
914 #ifndef PRODUCT
915 bool G1CollectorPolicy::verify_young_ages() {
916 HeapRegion* head = _g1->young_list_first_region();
917 return
918 verify_young_ages(head, _short_lived_surv_rate_group);
919 // also call verify_young_ages on any additional surv rate groups
920 }
921
922 bool
923 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
924 SurvRateGroup *surv_rate_group) {
925 guarantee( surv_rate_group != NULL, "pre-condition" );
926
927 const char* name = surv_rate_group->name();
928 bool ret = true;
929 int prev_age = -1;
930
931 for (HeapRegion* curr = head;
932 curr != NULL;
933 curr = curr->get_next_young_region()) {
934 SurvRateGroup* group = curr->surv_rate_group();
935 if (group == NULL && !curr->is_survivor()) {
936 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
937 ret = false;
938 }
939
940 if (surv_rate_group == group) {
941 int age = curr->age_in_surv_rate_group();
942
943 if (age < 0) {
944 gclog_or_tty->print_cr("## %s: encountered negative age", name);
945 ret = false;
946 }
947
948 if (age <= prev_age) {
949 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
950 "(%d, %d)", name, age, prev_age);
951 ret = false;
952 }
953 prev_age = age;
954 }
955 }
956
957 return ret;
958 }
959 #endif // PRODUCT
960
961 void G1CollectorPolicy::record_full_collection_start() {
962 _cur_collection_start_sec = os::elapsedTime();
963 // Release the future to-space so that it is available for compaction into.
964 _g1->set_full_collection();
965 }
966
967 void G1CollectorPolicy::record_full_collection_end() {
968 // Consider this like a collection pause for the purposes of allocation
969 // since last pause.
970 double end_sec = os::elapsedTime();
971 double full_gc_time_sec = end_sec - _cur_collection_start_sec;
972 double full_gc_time_ms = full_gc_time_sec * 1000.0;
973
974 checkpoint_conc_overhead();
975
976 _all_full_gc_times_ms->add(full_gc_time_ms);
977
978 update_recent_gc_times(end_sec, full_gc_time_sec);
979
980 _g1->clear_full_collection();
981
982 // "Nuke" the heuristics that control the fully/partially young GC
983 // transitions and make sure we start with fully young GCs after the
984 // Full GC.
985 set_full_young_gcs(true);
986 _last_full_young_gc = false;
987 _should_revert_to_full_young_gcs = false;
988 _should_initiate_conc_mark = false;
989 _known_garbage_bytes = 0;
990 _known_garbage_ratio = 0.0;
991 _in_marking_window = false;
992 _in_marking_window_im = false;
993
994 _short_lived_surv_rate_group->record_scan_only_prefix(0);
995 _short_lived_surv_rate_group->start_adding_regions();
996 // also call this on any additional surv rate groups
997
998 _prev_region_num_young = _region_num_young;
999 _prev_region_num_tenured = _region_num_tenured;
1000
1001 _free_regions_at_end_of_collection = _g1->free_regions();
1002 _scan_only_regions_at_end_of_collection = 0;
1003 calculate_young_list_min_length();
1004 calculate_young_list_target_config();
1005 }
1006
1007 void G1CollectorPolicy::record_pop_compute_rc_start() {
1008 _pop_compute_rc_start = os::elapsedTime();
1009 }
1010 void G1CollectorPolicy::record_pop_compute_rc_end() {
1011 double ms = (os::elapsedTime() - _pop_compute_rc_start)*1000.0;
1012 _cur_popular_compute_rc_time_ms = ms;
1013 _pop_compute_rc_start = 0.0;
1014 }
1015 void G1CollectorPolicy::record_pop_evac_start() {
1016 _pop_evac_start = os::elapsedTime();
1017 }
1018 void G1CollectorPolicy::record_pop_evac_end() {
1019 double ms = (os::elapsedTime() - _pop_evac_start)*1000.0;
1020 _cur_popular_evac_time_ms = ms;
1021 _pop_evac_start = 0.0;
1022 }
1023
1024 void G1CollectorPolicy::record_before_bytes(size_t bytes) {
1025 _bytes_in_to_space_before_gc += bytes;
1026 }
1027
1028 void G1CollectorPolicy::record_after_bytes(size_t bytes) {
1029 _bytes_in_to_space_after_gc += bytes;
1030 }
1031
1032 void G1CollectorPolicy::record_stop_world_start() {
1033 _stop_world_start = os::elapsedTime();
1034 }
1035
1036 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
1037 size_t start_used) {
1038 if (PrintGCDetails) {
1039 gclog_or_tty->stamp(PrintGCTimeStamps);
1040 gclog_or_tty->print("[GC pause");
1041 if (in_young_gc_mode())
1042 gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
1043 }
1044
1045 assert(_g1->used_regions() == _g1->recalculate_used_regions(),
1046 "sanity");
1047
1048 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
1049 _all_stop_world_times_ms->add(s_w_t_ms);
1050 _stop_world_start = 0.0;
1051
1052 _cur_collection_start_sec = start_time_sec;
1053 _cur_collection_pause_used_at_start_bytes = start_used;
1054 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
1055 _pending_cards = _g1->pending_card_num();
1056 _max_pending_cards = _g1->max_pending_card_num();
1057
1058 _bytes_in_to_space_before_gc = 0;
1059 _bytes_in_to_space_after_gc = 0;
1060 _bytes_in_collection_set_before_gc = 0;
1061
1062 #ifdef DEBUG
1063 // initialise these to something well known so that we can spot
1064 // if they are not set properly
1065
1066 for (int i = 0; i < _parallel_gc_threads; ++i) {
1067 _par_last_ext_root_scan_times_ms[i] = -666.0;
1068 _par_last_mark_stack_scan_times_ms[i] = -666.0;
1069 _par_last_scan_only_times_ms[i] = -666.0;
1070 _par_last_scan_only_regions_scanned[i] = -666.0;
1071 _par_last_update_rs_start_times_ms[i] = -666.0;
1072 _par_last_update_rs_times_ms[i] = -666.0;
1073 _par_last_update_rs_processed_buffers[i] = -666.0;
1074 _par_last_scan_rs_start_times_ms[i] = -666.0;
1075 _par_last_scan_rs_times_ms[i] = -666.0;
1076 _par_last_scan_new_refs_times_ms[i] = -666.0;
1077 _par_last_obj_copy_times_ms[i] = -666.0;
1078 _par_last_termination_times_ms[i] = -666.0;
1079
1080 _pop_par_last_update_rs_start_times_ms[i] = -666.0;
1081 _pop_par_last_update_rs_times_ms[i] = -666.0;
1082 _pop_par_last_update_rs_processed_buffers[i] = -666.0;
1083 _pop_par_last_scan_rs_start_times_ms[i] = -666.0;
1084 _pop_par_last_scan_rs_times_ms[i] = -666.0;
1085 _pop_par_last_closure_app_times_ms[i] = -666.0;
1086 }
1087 #endif
1088
1089 for (int i = 0; i < _aux_num; ++i) {
1090 _cur_aux_times_ms[i] = 0.0;
1091 _cur_aux_times_set[i] = false;
1092 }
1093
1094 _satb_drain_time_set = false;
1095 _last_satb_drain_processed_buffers = -1;
1096
1097 if (in_young_gc_mode())
1098 _last_young_gc_full = false;
1099
1100
1101 // do that for any other surv rate groups
1102 _short_lived_surv_rate_group->stop_adding_regions();
1103 size_t short_lived_so_length = _young_list_so_prefix_length;
1104 _short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length);
1105 tag_scan_only(short_lived_so_length);
1106
1107 assert( verify_young_ages(), "region age verification" );
1108 }
1109
1110 void G1CollectorPolicy::tag_scan_only(size_t short_lived_scan_only_length) {
1111 // done in a way that it can be extended for other surv rate groups too...
1112
1113 HeapRegion* head = _g1->young_list_first_region();
1114 bool finished_short_lived = (short_lived_scan_only_length == 0);
1115
1116 if (finished_short_lived)
1117 return;
1118
1119 for (HeapRegion* curr = head;
1120 curr != NULL;
1121 curr = curr->get_next_young_region()) {
1122 SurvRateGroup* surv_rate_group = curr->surv_rate_group();
1123 int age = curr->age_in_surv_rate_group();
1124
1125 if (surv_rate_group == _short_lived_surv_rate_group) {
1126 if ((size_t)age < short_lived_scan_only_length)
1127 curr->set_scan_only();
1128 else
1129 finished_short_lived = true;
1130 }
1131
1132
1133 if (finished_short_lived)
1134 return;
1135 }
1136
1137 guarantee( false, "we should never reach here" );
1138 }
1139
1140 void G1CollectorPolicy::record_popular_pause_preamble_start() {
1141 _cur_popular_preamble_start_ms = os::elapsedTime() * 1000.0;
1142 }
1143
1144 void G1CollectorPolicy::record_popular_pause_preamble_end() {
1145 _cur_popular_preamble_time_ms =
1146 (os::elapsedTime() * 1000.0) - _cur_popular_preamble_start_ms;
1147
1148 // copy the recorded statistics of the first pass to temporary arrays
1149 for (int i = 0; i < _parallel_gc_threads; ++i) {
1150 _pop_par_last_update_rs_start_times_ms[i] = _par_last_update_rs_start_times_ms[i];
1151 _pop_par_last_update_rs_times_ms[i] = _par_last_update_rs_times_ms[i];
1152 _pop_par_last_update_rs_processed_buffers[i] = _par_last_update_rs_processed_buffers[i];
1153 _pop_par_last_scan_rs_start_times_ms[i] = _par_last_scan_rs_start_times_ms[i];
1154 _pop_par_last_scan_rs_times_ms[i] = _par_last_scan_rs_times_ms[i];
1155 _pop_par_last_closure_app_times_ms[i] = _par_last_obj_copy_times_ms[i];
1156 }
1157 }
1158
1159 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
1160 _mark_closure_time_ms = mark_closure_time_ms;
1161 }
1162
1163 void G1CollectorPolicy::record_concurrent_mark_init_start() {
1164 _mark_init_start_sec = os::elapsedTime();
1165 guarantee(!in_young_gc_mode(), "should not do be here in young GC mode");
1166 }
1167
1168 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
1169 mark_init_elapsed_time_ms) {
1170 _during_marking = true;
1171 _should_initiate_conc_mark = false;
1172 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
1173 }
1174
1175 void G1CollectorPolicy::record_concurrent_mark_init_end() {
1176 double end_time_sec = os::elapsedTime();
1177 double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
1178 _concurrent_mark_init_times_ms->add(elapsed_time_ms);
1179 checkpoint_conc_overhead();
1180 record_concurrent_mark_init_end_pre(elapsed_time_ms);
1181
1182 _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
1183 }
1184
1185 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
1186 _mark_remark_start_sec = os::elapsedTime();
1187 _during_marking = false;
1188 }
1189
1190 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
1191 double end_time_sec = os::elapsedTime();
1192 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
1193 checkpoint_conc_overhead();
1194 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
1195 _cur_mark_stop_world_time_ms += elapsed_time_ms;
1196 _prev_collection_pause_end_ms += elapsed_time_ms;
1197
1198 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
1199 }
1200
1201 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
1202 _mark_cleanup_start_sec = os::elapsedTime();
1203 }
1204
1205 void
1206 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
1207 size_t max_live_bytes) {
1208 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
1209 record_concurrent_mark_cleanup_end_work2();
1210 }
1211
1212 void
1213 G1CollectorPolicy::
1214 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
1215 size_t max_live_bytes) {
1216 if (_n_marks < 2) _n_marks++;
1217 if (G1PolicyVerbose > 0)
1218 gclog_or_tty->print_cr("At end of marking, max_live is " SIZE_FORMAT " MB "
1219 " (of " SIZE_FORMAT " MB heap).",
1220 max_live_bytes/M, _g1->capacity()/M);
1221 }
1222
1223 // The important thing about this is that it includes "os::elapsedTime".
1224 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
1225 checkpoint_conc_overhead();
1226 double end_time_sec = os::elapsedTime();
1227 double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
1228 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1229 _cur_mark_stop_world_time_ms += elapsed_time_ms;
1230 _prev_collection_pause_end_ms += elapsed_time_ms;
1231
1232 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
1233
1234 _num_markings++;
1235
1236 // We did a marking, so reset the "since_last_mark" variables.
1237 double considerConcMarkCost = 1.0;
1238 // If there are available processors, concurrent activity is free...
1239 if (Threads::number_of_non_daemon_threads() * 2 <
1240 os::active_processor_count()) {
1241 considerConcMarkCost = 0.0;
1242 }
1243 _n_pauses_at_mark_end = _n_pauses;
1244 _n_marks_since_last_pause++;
1245 _conc_mark_initiated = false;
1246 }
1247
1248 void
1249 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
1250 if (in_young_gc_mode()) {
1251 _should_revert_to_full_young_gcs = false;
1252 _last_full_young_gc = true;
1253 _in_marking_window = false;
1254 if (adaptive_young_list_length())
1255 calculate_young_list_target_config();
1256 }
1257 }
1258
1259 void G1CollectorPolicy::record_concurrent_pause() {
1260 if (_stop_world_start > 0.0) {
1261 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
1262 _all_yield_times_ms->add(yield_ms);
1263 }
1264 }
1265
1266 void G1CollectorPolicy::record_concurrent_pause_end() {
1267 }
1268
1269 void G1CollectorPolicy::record_collection_pause_end_CH_strong_roots() {
1270 _cur_CH_strong_roots_end_sec = os::elapsedTime();
1271 _cur_CH_strong_roots_dur_ms =
1272 (_cur_CH_strong_roots_end_sec - _cur_collection_start_sec) * 1000.0;
1273 }
1274
1275 void G1CollectorPolicy::record_collection_pause_end_G1_strong_roots() {
1276 _cur_G1_strong_roots_end_sec = os::elapsedTime();
1277 _cur_G1_strong_roots_dur_ms =
1278 (_cur_G1_strong_roots_end_sec - _cur_CH_strong_roots_end_sec) * 1000.0;
1279 }
1280
1281 template<class T>
1282 T sum_of(T* sum_arr, int start, int n, int N) {
1283 T sum = (T)0;
1284 for (int i = 0; i < n; i++) {
1285 int j = (start + i) % N;
1286 sum += sum_arr[j];
1287 }
1288 return sum;
1289 }
1290
1291 void G1CollectorPolicy::print_par_stats (int level,
1292 const char* str,
1293 double* data,
1294 bool summary) {
1295 double min = data[0], max = data[0];
1296 double total = 0.0;
1297 int j;
1298 for (j = 0; j < level; ++j)
1299 gclog_or_tty->print(" ");
1300 gclog_or_tty->print("[%s (ms):", str);
1301 for (uint i = 0; i < ParallelGCThreads; ++i) {
1302 double val = data[i];
1303 if (val < min)
1304 min = val;
1305 if (val > max)
1306 max = val;
1307 total += val;
1308 gclog_or_tty->print(" %3.1lf", val);
1309 }
1310 if (summary) {
1311 gclog_or_tty->print_cr("");
1312 double avg = total / (double) ParallelGCThreads;
1313 gclog_or_tty->print(" ");
1314 for (j = 0; j < level; ++j)
1315 gclog_or_tty->print(" ");
1316 gclog_or_tty->print("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf",
1317 avg, min, max);
1318 }
1319 gclog_or_tty->print_cr("]");
1320 }
1321
1322 void G1CollectorPolicy::print_par_buffers (int level,
1323 const char* str,
1324 double* data,
1325 bool summary) {
1326 double min = data[0], max = data[0];
1327 double total = 0.0;
1328 int j;
1329 for (j = 0; j < level; ++j)
1330 gclog_or_tty->print(" ");
1331 gclog_or_tty->print("[%s :", str);
1332 for (uint i = 0; i < ParallelGCThreads; ++i) {
1333 double val = data[i];
1334 if (val < min)
1335 min = val;
1336 if (val > max)
1337 max = val;
1338 total += val;
1339 gclog_or_tty->print(" %d", (int) val);
1340 }
1341 if (summary) {
1342 gclog_or_tty->print_cr("");
1343 double avg = total / (double) ParallelGCThreads;
1344 gclog_or_tty->print(" ");
1345 for (j = 0; j < level; ++j)
1346 gclog_or_tty->print(" ");
1347 gclog_or_tty->print("Sum: %d, Avg: %d, Min: %d, Max: %d",
1348 (int)total, (int)avg, (int)min, (int)max);
1349 }
1350 gclog_or_tty->print_cr("]");
1351 }
1352
1353 void G1CollectorPolicy::print_stats (int level,
1354 const char* str,
1355 double value) {
1356 for (int j = 0; j < level; ++j)
1357 gclog_or_tty->print(" ");
1358 gclog_or_tty->print_cr("[%s: %5.1lf ms]", str, value);
1359 }
1360
1361 void G1CollectorPolicy::print_stats (int level,
1362 const char* str,
1363 int value) {
1364 for (int j = 0; j < level; ++j)
1365 gclog_or_tty->print(" ");
1366 gclog_or_tty->print_cr("[%s: %d]", str, value);
1367 }
1368
1369 double G1CollectorPolicy::avg_value (double* data) {
1370 if (ParallelGCThreads > 0) {
1371 double ret = 0.0;
1372 for (uint i = 0; i < ParallelGCThreads; ++i)
1373 ret += data[i];
1374 return ret / (double) ParallelGCThreads;
1375 } else {
1376 return data[0];
1377 }
1378 }
1379
1380 double G1CollectorPolicy::max_value (double* data) {
1381 if (ParallelGCThreads > 0) {
1382 double ret = data[0];
1383 for (uint i = 1; i < ParallelGCThreads; ++i)
1384 if (data[i] > ret)
1385 ret = data[i];
1386 return ret;
1387 } else {
1388 return data[0];
1389 }
1390 }
1391
1392 double G1CollectorPolicy::sum_of_values (double* data) {
1393 if (ParallelGCThreads > 0) {
1394 double sum = 0.0;
1395 for (uint i = 0; i < ParallelGCThreads; i++)
1396 sum += data[i];
1397 return sum;
1398 } else {
1399 return data[0];
1400 }
1401 }
1402
1403 double G1CollectorPolicy::max_sum (double* data1,
1404 double* data2) {
1405 double ret = data1[0] + data2[0];
1406
1407 if (ParallelGCThreads > 0) {
1408 for (uint i = 1; i < ParallelGCThreads; ++i) {
1409 double data = data1[i] + data2[i];
1410 if (data > ret)
1411 ret = data;
1412 }
1413 }
1414 return ret;
1415 }
1416
1417 // Anything below that is considered to be zero
1418 #define MIN_TIMER_GRANULARITY 0.0000001
1419
1420 void G1CollectorPolicy::record_collection_pause_end(bool popular,
1421 bool abandoned) {
1422 double end_time_sec = os::elapsedTime();
1423 double elapsed_ms = _last_pause_time_ms;
1424 bool parallel = ParallelGCThreads > 0;
1425 double evac_ms = (end_time_sec - _cur_G1_strong_roots_end_sec) * 1000.0;
1426 size_t rs_size =
1427 _cur_collection_pause_used_regions_at_start - collection_set_size();
1428 size_t cur_used_bytes = _g1->used();
1429 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1430 bool last_pause_included_initial_mark = false;
1431
1432 #ifndef PRODUCT
1433 if (G1YoungSurvRateVerbose) {
1434 gclog_or_tty->print_cr("");
1435 _short_lived_surv_rate_group->print();
1436 // do that for any other surv rate groups too
1437 }
1438 #endif // PRODUCT
1439
1440 checkpoint_conc_overhead();
1441
1442 if (in_young_gc_mode()) {
1443 last_pause_included_initial_mark = _should_initiate_conc_mark;
1444 if (last_pause_included_initial_mark)
1445 record_concurrent_mark_init_end_pre(0.0);
1446
1447 size_t min_used_targ =
1448 (_g1->capacity() / 100) * (G1SteadyStateUsed - G1SteadyStateUsedDelta);
1449
1450 if (cur_used_bytes > min_used_targ) {
1451 if (cur_used_bytes <= _prev_collection_pause_used_at_end_bytes) {
1452 } else if (!_g1->mark_in_progress() && !_last_full_young_gc) {
1453 _should_initiate_conc_mark = true;
1454 }
1455 }
1456
1457 _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
1458 }
1459
1460 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
1461 end_time_sec, false);
1462
1463 guarantee(_cur_collection_pause_used_regions_at_start >=
1464 collection_set_size(),
1465 "Negative RS size?");
1466
1467 // This assert is exempted when we're doing parallel collection pauses,
1468 // because the fragmentation caused by the parallel GC allocation buffers
1469 // can lead to more memory being used during collection than was used
1470 // before. Best leave this out until the fragmentation problem is fixed.
1471 // Pauses in which evacuation failed can also lead to negative
1472 // collections, since no space is reclaimed from a region containing an
1473 // object whose evacuation failed.
1474 // Further, we're now always doing parallel collection. But I'm still
1475 // leaving this here as a placeholder for a more precise assertion later.
1476 // (DLD, 10/05.)
1477 assert((true || parallel) // Always using GC LABs now.
1478 || _g1->evacuation_failed()
1479 || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
1480 "Negative collection");
1481
1482 size_t freed_bytes =
1483 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
1484 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
1485 double survival_fraction =
1486 (double)surviving_bytes/
1487 (double)_collection_set_bytes_used_before;
1488
1489 _n_pauses++;
1490
1491 if (!abandoned) {
1492 _recent_CH_strong_roots_times_ms->add(_cur_CH_strong_roots_dur_ms);
1493 _recent_G1_strong_roots_times_ms->add(_cur_G1_strong_roots_dur_ms);
1494 _recent_evac_times_ms->add(evac_ms);
1495 _recent_pause_times_ms->add(elapsed_ms);
1496
1497 _recent_rs_sizes->add(rs_size);
1498
1499 // We exempt parallel collection from this check because Alloc Buffer
1500 // fragmentation can produce negative collections. Same with evac
1501 // failure.
1502 // Further, we're now always doing parallel collection. But I'm still
1503 // leaving this here as a placeholder for a more precise assertion later.
1504 // (DLD, 10/05.
1505 assert((true || parallel)
1506 || _g1->evacuation_failed()
1507 || surviving_bytes <= _collection_set_bytes_used_before,
1508 "Or else negative collection!");
1509 _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
1510 _recent_CS_bytes_surviving->add(surviving_bytes);
1511
1512 // this is where we update the allocation rate of the application
1513 double app_time_ms =
1514 (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
1515 if (app_time_ms < MIN_TIMER_GRANULARITY) {
1516 // This usually happens due to the timer not having the required
1517 // granularity. Some Linuxes are the usual culprits.
1518 // We'll just set it to something (arbitrarily) small.
1519 app_time_ms = 1.0;
1520 }
1521 size_t regions_allocated =
1522 (_region_num_young - _prev_region_num_young) +
1523 (_region_num_tenured - _prev_region_num_tenured);
1524 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1525 _alloc_rate_ms_seq->add(alloc_rate_ms);
1526 _prev_region_num_young = _region_num_young;
1527 _prev_region_num_tenured = _region_num_tenured;
1528
1529 double interval_ms =
1530 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
1531 update_recent_gc_times(end_time_sec, elapsed_ms);
1532 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
1533 assert(recent_avg_pause_time_ratio() < 1.00, "All GC?");
1534 }
1535
1536 if (G1PolicyVerbose > 1) {
1537 gclog_or_tty->print_cr(" Recording collection pause(%d)", _n_pauses);
1538 }
1539
1540 PauseSummary* summary;
1541 if (!abandoned && !popular)
1542 summary = _non_pop_summary;
1543 else if (!abandoned && popular)
1544 summary = _pop_summary;
1545 else if (abandoned && !popular)
1546 summary = _non_pop_abandoned_summary;
1547 else if (abandoned && popular)
1548 summary = _pop_abandoned_summary;
1549 else
1550 guarantee(false, "should not get here!");
1551
1552 double pop_update_rs_time;
1553 double pop_update_rs_processed_buffers;
1554 double pop_scan_rs_time;
1555 double pop_closure_app_time;
1556 double pop_other_time;
1557
1558 if (popular) {
1559 PopPreambleSummary* preamble_summary = summary->pop_preamble_summary();
1560 guarantee(preamble_summary != NULL, "should not be null!");
1561
1562 pop_update_rs_time = avg_value(_pop_par_last_update_rs_times_ms);
1563 pop_update_rs_processed_buffers =
1564 sum_of_values(_pop_par_last_update_rs_processed_buffers);
1565 pop_scan_rs_time = avg_value(_pop_par_last_scan_rs_times_ms);
1566 pop_closure_app_time = avg_value(_pop_par_last_closure_app_times_ms);
1567 pop_other_time = _cur_popular_preamble_time_ms -
1568 (pop_update_rs_time + pop_scan_rs_time + pop_closure_app_time +
1569 _cur_popular_evac_time_ms);
1570
1571 preamble_summary->record_pop_preamble_time_ms(_cur_popular_preamble_time_ms);
1572 preamble_summary->record_pop_update_rs_time_ms(pop_update_rs_time);
1573 preamble_summary->record_pop_scan_rs_time_ms(pop_scan_rs_time);
1574 preamble_summary->record_pop_closure_app_time_ms(pop_closure_app_time);
1575 preamble_summary->record_pop_evacuation_time_ms(_cur_popular_evac_time_ms);
1576 preamble_summary->record_pop_other_time_ms(pop_other_time);
1577 }
1578
1579 double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
1580 double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
1581 double scan_only_time = avg_value(_par_last_scan_only_times_ms);
1582 double scan_only_regions_scanned =
1583 sum_of_values(_par_last_scan_only_regions_scanned);
1584 double update_rs_time = avg_value(_par_last_update_rs_times_ms);
1585 double update_rs_processed_buffers =
1586 sum_of_values(_par_last_update_rs_processed_buffers);
1587 double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
1588 double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
1589 double termination_time = avg_value(_par_last_termination_times_ms);
1590
1591 double parallel_other_time;
1592 if (!abandoned) {
1593 MainBodySummary* body_summary = summary->main_body_summary();
1594 guarantee(body_summary != NULL, "should not be null!");
1595
1596 if (_satb_drain_time_set)
1597 body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
1598 else
1599 body_summary->record_satb_drain_time_ms(0.0);
1600 body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
1601 body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
1602 body_summary->record_scan_only_time_ms(scan_only_time);
1603 body_summary->record_update_rs_time_ms(update_rs_time);
1604 body_summary->record_scan_rs_time_ms(scan_rs_time);
1605 body_summary->record_obj_copy_time_ms(obj_copy_time);
1606 if (parallel) {
1607 body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
1608 body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
1609 body_summary->record_termination_time_ms(termination_time);
1610 parallel_other_time = _cur_collection_par_time_ms -
1611 (update_rs_time + ext_root_scan_time + mark_stack_scan_time +
1612 scan_only_time + scan_rs_time + obj_copy_time + termination_time);
1613 body_summary->record_parallel_other_time_ms(parallel_other_time);
1614 }
1615 body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
1616 }
1617
1618 if (G1PolicyVerbose > 1) {
1619 gclog_or_tty->print_cr(" ET: %10.6f ms (avg: %10.6f ms)\n"
1620 " CH Strong: %10.6f ms (avg: %10.6f ms)\n"
1621 " G1 Strong: %10.6f ms (avg: %10.6f ms)\n"
1622 " Evac: %10.6f ms (avg: %10.6f ms)\n"
1623 " ET-RS: %10.6f ms (avg: %10.6f ms)\n"
1624 " |RS|: " SIZE_FORMAT,
1625 elapsed_ms, recent_avg_time_for_pauses_ms(),
1626 _cur_CH_strong_roots_dur_ms, recent_avg_time_for_CH_strong_ms(),
1627 _cur_G1_strong_roots_dur_ms, recent_avg_time_for_G1_strong_ms(),
1628 evac_ms, recent_avg_time_for_evac_ms(),
1629 scan_rs_time,
1630 recent_avg_time_for_pauses_ms() -
1631 recent_avg_time_for_G1_strong_ms(),
1632 rs_size);
1633
1634 gclog_or_tty->print_cr(" Used at start: " SIZE_FORMAT"K"
1635 " At end " SIZE_FORMAT "K\n"
1636 " garbage : " SIZE_FORMAT "K"
1637 " of " SIZE_FORMAT "K\n"
1638 " survival : %6.2f%% (%6.2f%% avg)",
1639 _cur_collection_pause_used_at_start_bytes/K,
1640 _g1->used()/K, freed_bytes/K,
1641 _collection_set_bytes_used_before/K,
1642 survival_fraction*100.0,
1643 recent_avg_survival_fraction()*100.0);
1644 gclog_or_tty->print_cr(" Recent %% gc pause time: %6.2f",
1645 recent_avg_pause_time_ratio() * 100.0);
1646 }
1647
1648 double other_time_ms = elapsed_ms;
1649 if (popular)
1650 other_time_ms -= _cur_popular_preamble_time_ms;
1651
1652 if (!abandoned) {
1653 if (_satb_drain_time_set)
1654 other_time_ms -= _cur_satb_drain_time_ms;
1655
1656 if (parallel)
1657 other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
1658 else
1659 other_time_ms -=
1660 update_rs_time +
1661 ext_root_scan_time + mark_stack_scan_time + scan_only_time +
1662 scan_rs_time + obj_copy_time;
1663 }
1664
1665 if (PrintGCDetails) {
1666 gclog_or_tty->print_cr("%s%s, %1.8lf secs]",
1667 (popular && !abandoned) ? " (popular)" :
1668 (!popular && abandoned) ? " (abandoned)" :
1669 (popular && abandoned) ? " (popular/abandoned)" : "",
1670 (last_pause_included_initial_mark) ? " (initial-mark)" : "",
1671 elapsed_ms / 1000.0);
1672
1673 if (!abandoned) {
1674 if (_satb_drain_time_set)
1675 print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
1676 if (_last_satb_drain_processed_buffers >= 0)
1677 print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
1678 }
1679 if (popular)
1680 print_stats(1, "Popularity Preamble", _cur_popular_preamble_time_ms);
1681 if (parallel) {
1682 if (popular) {
1683 print_par_stats(2, "Update RS (Start)", _pop_par_last_update_rs_start_times_ms, false);
1684 print_par_stats(2, "Update RS", _pop_par_last_update_rs_times_ms);
1685 if (G1RSBarrierUseQueue)
1686 print_par_buffers(3, "Processed Buffers",
1687 _pop_par_last_update_rs_processed_buffers, true);
1688 print_par_stats(2, "Scan RS", _pop_par_last_scan_rs_times_ms);
1689 print_par_stats(2, "Closure app", _pop_par_last_closure_app_times_ms);
1690 print_stats(2, "Evacuation", _cur_popular_evac_time_ms);
1691 print_stats(2, "Other", pop_other_time);
1692 }
1693 if (!abandoned) {
1694 print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
1695 if (!popular) {
1696 print_par_stats(2, "Update RS (Start)", _par_last_update_rs_start_times_ms, false);
1697 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
1698 if (G1RSBarrierUseQueue)
1699 print_par_buffers(3, "Processed Buffers",
1700 _par_last_update_rs_processed_buffers, true);
1701 }
1702 print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
1703 print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
1704 print_par_stats(2, "Scan-Only Scanning", _par_last_scan_only_times_ms);
1705 print_par_buffers(3, "Scan-Only Regions",
1706 _par_last_scan_only_regions_scanned, true);
1707 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
1708 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
1709 print_par_stats(2, "Termination", _par_last_termination_times_ms);
1710 print_stats(2, "Other", parallel_other_time);
1711 print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
1712 }
1713 } else {
1714 if (popular) {
1715 print_stats(2, "Update RS", pop_update_rs_time);
1716 if (G1RSBarrierUseQueue)
1717 print_stats(3, "Processed Buffers",
1718 (int)pop_update_rs_processed_buffers);
1719 print_stats(2, "Scan RS", pop_scan_rs_time);
1720 print_stats(2, "Closure App", pop_closure_app_time);
1721 print_stats(2, "Evacuation", _cur_popular_evac_time_ms);
1722 print_stats(2, "Other", pop_other_time);
1723 }
1724 if (!abandoned) {
1725 if (!popular) {
1726 print_stats(1, "Update RS", update_rs_time);
1727 if (G1RSBarrierUseQueue)
1728 print_stats(2, "Processed Buffers",
1729 (int)update_rs_processed_buffers);
1730 }
1731 print_stats(1, "Ext Root Scanning", ext_root_scan_time);
1732 print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
1733 print_stats(1, "Scan-Only Scanning", scan_only_time);
1734 print_stats(1, "Scan RS", scan_rs_time);
1735 print_stats(1, "Object Copying", obj_copy_time);
1736 }
1737 }
1738 print_stats(1, "Other", other_time_ms);
1739 for (int i = 0; i < _aux_num; ++i) {
1740 if (_cur_aux_times_set[i]) {
1741 char buffer[96];
1742 sprintf(buffer, "Aux%d", i);
1743 print_stats(1, buffer, _cur_aux_times_ms[i]);
1744 }
1745 }
1746 }
1747 if (PrintGCDetails)
1748 gclog_or_tty->print(" [");
1749 if (PrintGC || PrintGCDetails)
1750 _g1->print_size_transition(gclog_or_tty,
1751 _cur_collection_pause_used_at_start_bytes,
1752 _g1->used(), _g1->capacity());
1753 if (PrintGCDetails)
1754 gclog_or_tty->print_cr("]");
1755
1756 _all_pause_times_ms->add(elapsed_ms);
1757 summary->record_total_time_ms(elapsed_ms);
1758 summary->record_other_time_ms(other_time_ms);
1759 for (int i = 0; i < _aux_num; ++i)
1760 if (_cur_aux_times_set[i])
1761 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
1762
1763 // Reset marks-between-pauses counter.
1764 _n_marks_since_last_pause = 0;
1765
1766 // Update the efficiency-since-mark vars.
1767 double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
1768 if (elapsed_ms < MIN_TIMER_GRANULARITY) {
1769 // This usually happens due to the timer not having the required
1770 // granularity. Some Linuxes are the usual culprits.
1771 // We'll just set it to something (arbitrarily) small.
1772 proc_ms = 1.0;
1773 }
1774 double cur_efficiency = (double) freed_bytes / proc_ms;
1775
1776 bool new_in_marking_window = _in_marking_window;
1777 bool new_in_marking_window_im = false;
1778 if (_should_initiate_conc_mark) {
1779 new_in_marking_window = true;
1780 new_in_marking_window_im = true;
1781 }
1782
1783 if (in_young_gc_mode()) {
1784 if (_last_full_young_gc) {
1785 set_full_young_gcs(false);
1786 _last_full_young_gc = false;
1787 }
1788
1789 if ( !_last_young_gc_full ) {
1790 if ( _should_revert_to_full_young_gcs ||
1791 _known_garbage_ratio < 0.05 ||
1792 (adaptive_young_list_length() &&
1793 (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) {
1794 set_full_young_gcs(true);
1795 }
1796 }
1797 _should_revert_to_full_young_gcs = false;
1798
1799 if (_last_young_gc_full && !_during_marking)
1800 _young_gc_eff_seq->add(cur_efficiency);
1801 }
1802
1803 _short_lived_surv_rate_group->start_adding_regions();
1804 // do that for any other surv rate groupsx
1805
1806 // <NEW PREDICTION>
1807
1808 if (!popular && !abandoned) {
1809 double pause_time_ms = elapsed_ms;
1810
1811 size_t diff = 0;
1812 if (_max_pending_cards >= _pending_cards)
1813 diff = _max_pending_cards - _pending_cards;
1814 _pending_card_diff_seq->add((double) diff);
1815
1816 double cost_per_card_ms = 0.0;
1817 if (_pending_cards > 0) {
1818 cost_per_card_ms = update_rs_time / (double) _pending_cards;
1819 _cost_per_card_ms_seq->add(cost_per_card_ms);
1820 }
1821
1822 double cost_per_scan_only_region_ms = 0.0;
1823 if (scan_only_regions_scanned > 0.0) {
1824 cost_per_scan_only_region_ms =
1825 scan_only_time / scan_only_regions_scanned;
1826 if (_in_marking_window_im)
1827 _cost_per_scan_only_region_ms_during_cm_seq->add(cost_per_scan_only_region_ms);
1828 else
1829 _cost_per_scan_only_region_ms_seq->add(cost_per_scan_only_region_ms);
1830 }
1831
1832 size_t cards_scanned = _g1->cards_scanned();
1833
1834 double cost_per_entry_ms = 0.0;
1835 if (cards_scanned > 10) {
1836 cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
1837 if (_last_young_gc_full)
1838 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1839 else
1840 _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1841 }
1842
1843 if (_max_rs_lengths > 0) {
1844 double cards_per_entry_ratio =
1845 (double) cards_scanned / (double) _max_rs_lengths;
1846 if (_last_young_gc_full)
1847 _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1848 else
1849 _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1850 }
1851
1852 size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1853 if (rs_length_diff >= 0)
1854 _rs_length_diff_seq->add((double) rs_length_diff);
1855
1856 size_t copied_bytes = surviving_bytes;
1857 double cost_per_byte_ms = 0.0;
1858 if (copied_bytes > 0) {
1859 cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
1860 if (_in_marking_window)
1861 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1862 else
1863 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1864 }
1865
1866 double all_other_time_ms = pause_time_ms -
1867 (update_rs_time + scan_only_time + scan_rs_time + obj_copy_time +
1868 _mark_closure_time_ms + termination_time);
1869
1870 double young_other_time_ms = 0.0;
1871 if (_recorded_young_regions > 0) {
1872 young_other_time_ms =
1873 _recorded_young_cset_choice_time_ms +
1874 _recorded_young_free_cset_time_ms;
1875 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1876 (double) _recorded_young_regions);
1877 }
1878 double non_young_other_time_ms = 0.0;
1879 if (_recorded_non_young_regions > 0) {
1880 non_young_other_time_ms =
1881 _recorded_non_young_cset_choice_time_ms +
1882 _recorded_non_young_free_cset_time_ms;
1883
1884 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1885 (double) _recorded_non_young_regions);
1886 }
1887
1888 double constant_other_time_ms = all_other_time_ms -
1889 (young_other_time_ms + non_young_other_time_ms);
1890 _constant_other_time_ms_seq->add(constant_other_time_ms);
1891
1892 double survival_ratio = 0.0;
1893 if (_bytes_in_collection_set_before_gc > 0) {
1894 survival_ratio = (double) bytes_in_to_space_during_gc() /
1895 (double) _bytes_in_collection_set_before_gc;
1896 }
1897
1898 _pending_cards_seq->add((double) _pending_cards);
1899 _scanned_cards_seq->add((double) cards_scanned);
1900 _rs_lengths_seq->add((double) _max_rs_lengths);
1901
1902 double expensive_region_limit_ms =
1903 (double) G1MaxPauseTimeMS - predict_constant_other_time_ms();
1904 if (expensive_region_limit_ms < 0.0) {
1905 // this means that the other time was predicted to be longer than
1906 // than the max pause time
1907 expensive_region_limit_ms = (double) G1MaxPauseTimeMS;
1908 }
1909 _expensive_region_limit_ms = expensive_region_limit_ms;
1910
1911 if (PREDICTIONS_VERBOSE) {
1912 gclog_or_tty->print_cr("");
1913 gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
1914 "REGIONS %d %d %d %d "
1915 "PENDING_CARDS %d %d "
1916 "CARDS_SCANNED %d %d "
1917 "RS_LENGTHS %d %d "
1918 "SCAN_ONLY_SCAN %1.6lf %1.6lf "
1919 "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
1920 "SURVIVAL_RATIO %1.6lf %1.6lf "
1921 "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
1922 "OTHER_YOUNG %1.6lf %1.6lf "
1923 "OTHER_NON_YOUNG %1.6lf %1.6lf "
1924 "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
1925 "ELAPSED %1.6lf %1.6lf ",
1926 _cur_collection_start_sec,
1927 (!_last_young_gc_full) ? 2 :
1928 (last_pause_included_initial_mark) ? 1 : 0,
1929 _recorded_region_num,
1930 _recorded_young_regions,
1931 _recorded_scan_only_regions,
1932 _recorded_non_young_regions,
1933 _predicted_pending_cards, _pending_cards,
1934 _predicted_cards_scanned, cards_scanned,
1935 _predicted_rs_lengths, _max_rs_lengths,
1936 _predicted_scan_only_scan_time_ms, scan_only_time,
1937 _predicted_rs_update_time_ms, update_rs_time,
1938 _predicted_rs_scan_time_ms, scan_rs_time,
1939 _predicted_survival_ratio, survival_ratio,
1940 _predicted_object_copy_time_ms, obj_copy_time,
1941 _predicted_constant_other_time_ms, constant_other_time_ms,
1942 _predicted_young_other_time_ms, young_other_time_ms,
1943 _predicted_non_young_other_time_ms,
1944 non_young_other_time_ms,
1945 _vtime_diff_ms, termination_time,
1946 _predicted_pause_time_ms, elapsed_ms);
1947 }
1948
1949 if (G1PolicyVerbose > 0) {
1950 gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
1951 _predicted_pause_time_ms,
1952 (_within_target) ? "within" : "outside",
1953 elapsed_ms);
1954 }
1955
1956 }
1957
1958 _in_marking_window = new_in_marking_window;
1959 _in_marking_window_im = new_in_marking_window_im;
1960 _free_regions_at_end_of_collection = _g1->free_regions();
1961 _scan_only_regions_at_end_of_collection = _g1->young_list_length();
1962 calculate_young_list_min_length();
1963 calculate_young_list_target_config();
1964
1965 // </NEW PREDICTION>
1966
1967 _target_pause_time_ms = -1.0;
1968
1969 // TODO: calculate tenuring threshold
1970 _tenuring_threshold = MaxTenuringThreshold;
1971 }
1972
1973 // <NEW PREDICTION>
1974
1975 double
1976 G1CollectorPolicy::
1977 predict_young_collection_elapsed_time_ms(size_t adjustment) {
1978 guarantee( adjustment == 0 || adjustment == 1, "invariant" );
1979
1980 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1981 size_t young_num = g1h->young_list_length();
1982 if (young_num == 0)
1983 return 0.0;
1984
1985 young_num += adjustment;
1986 size_t pending_cards = predict_pending_cards();
1987 size_t rs_lengths = g1h->young_list_sampled_rs_lengths() +
1988 predict_rs_length_diff();
1989 size_t card_num;
1990 if (full_young_gcs())
1991 card_num = predict_young_card_num(rs_lengths);
1992 else
1993 card_num = predict_non_young_card_num(rs_lengths);
1994 size_t young_byte_size = young_num * HeapRegion::GrainBytes;
1995 double accum_yg_surv_rate =
1996 _short_lived_surv_rate_group->accum_surv_rate(adjustment);
1997
1998 size_t bytes_to_copy =
1999 (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
2000
2001 return
2002 predict_rs_update_time_ms(pending_cards) +
2003 predict_rs_scan_time_ms(card_num) +
2004 predict_object_copy_time_ms(bytes_to_copy) +
2005 predict_young_other_time_ms(young_num) +
2006 predict_constant_other_time_ms();
2007 }
2008
2009 double
2010 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
2011 size_t rs_length = predict_rs_length_diff();
2012 size_t card_num;
2013 if (full_young_gcs())
2014 card_num = predict_young_card_num(rs_length);
2015 else
2016 card_num = predict_non_young_card_num(rs_length);
2017 return predict_base_elapsed_time_ms(pending_cards, card_num);
2018 }
2019
2020 double
2021 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
2022 size_t scanned_cards) {
2023 return
2024 predict_rs_update_time_ms(pending_cards) +
2025 predict_rs_scan_time_ms(scanned_cards) +
2026 predict_constant_other_time_ms();
2027 }
2028
2029 double
2030 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
2031 bool young) {
2032 size_t rs_length = hr->rem_set()->occupied();
2033 size_t card_num;
2034 if (full_young_gcs())
2035 card_num = predict_young_card_num(rs_length);
2036 else
2037 card_num = predict_non_young_card_num(rs_length);
2038 size_t bytes_to_copy = predict_bytes_to_copy(hr);
2039
2040 double region_elapsed_time_ms =
2041 predict_rs_scan_time_ms(card_num) +
2042 predict_object_copy_time_ms(bytes_to_copy);
2043
2044 if (young)
2045 region_elapsed_time_ms += predict_young_other_time_ms(1);
2046 else
2047 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
2048
2049 return region_elapsed_time_ms;
2050 }
2051
2052 size_t
2053 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
2054 size_t bytes_to_copy;
2055 if (hr->is_marked())
2056 bytes_to_copy = hr->max_live_bytes();
2057 else {
2058 guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
2059 "invariant" );
2060 int age = hr->age_in_surv_rate_group();
2061 double yg_surv_rate = predict_yg_surv_rate(age);
2062 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
2063 }
2064
2065 return bytes_to_copy;
2066 }
2067
2068 void
2069 G1CollectorPolicy::start_recording_regions() {
2070 _recorded_rs_lengths = 0;
2071 _recorded_scan_only_regions = 0;
2072 _recorded_young_regions = 0;
2073 _recorded_non_young_regions = 0;
2074
2075 #if PREDICTIONS_VERBOSE
2076 _predicted_rs_lengths = 0;
2077 _predicted_cards_scanned = 0;
2078
2079 _recorded_marked_bytes = 0;
2080 _recorded_young_bytes = 0;
2081 _predicted_bytes_to_copy = 0;
2082 #endif // PREDICTIONS_VERBOSE
2083 }
2084
2085 void
2086 G1CollectorPolicy::record_cset_region(HeapRegion* hr, bool young) {
2087 if (young) {
2088 ++_recorded_young_regions;
2089 } else {
2090 ++_recorded_non_young_regions;
2091 }
2092 #if PREDICTIONS_VERBOSE
2093 if (young) {
2094 _recorded_young_bytes += hr->asSpace()->used();
2095 } else {
2096 _recorded_marked_bytes += hr->max_live_bytes();
2097 }
2098 _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
2099 #endif // PREDICTIONS_VERBOSE
2100
2101 size_t rs_length = hr->rem_set()->occupied();
2102 _recorded_rs_lengths += rs_length;
2103 }
2104
2105 void
2106 G1CollectorPolicy::record_scan_only_regions(size_t scan_only_length) {
2107 _recorded_scan_only_regions = scan_only_length;
2108 }
2109
2110 void
2111 G1CollectorPolicy::end_recording_regions() {
2112 #if PREDICTIONS_VERBOSE
2113 _predicted_pending_cards = predict_pending_cards();
2114 _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
2115 if (full_young_gcs())
2116 _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
2117 else
2118 _predicted_cards_scanned +=
2119 predict_non_young_card_num(_predicted_rs_lengths);
2120 _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
2121
2122 _predicted_young_survival_ratio = 0.0;
2123 for (int i = 0; i < _recorded_young_regions; ++i)
2124 _predicted_young_survival_ratio += predict_yg_surv_rate(i);
2125 _predicted_young_survival_ratio /= (double) _recorded_young_regions;
2126
2127 _predicted_scan_only_scan_time_ms =
2128 predict_scan_only_time_ms(_recorded_scan_only_regions);
2129 _predicted_rs_update_time_ms =
2130 predict_rs_update_time_ms(_g1->pending_card_num());
2131 _predicted_rs_scan_time_ms =
2132 predict_rs_scan_time_ms(_predicted_cards_scanned);
2133 _predicted_object_copy_time_ms =
2134 predict_object_copy_time_ms(_predicted_bytes_to_copy);
2135 _predicted_constant_other_time_ms =
2136 predict_constant_other_time_ms();
2137 _predicted_young_other_time_ms =
2138 predict_young_other_time_ms(_recorded_young_regions);
2139 _predicted_non_young_other_time_ms =
2140 predict_non_young_other_time_ms(_recorded_non_young_regions);
2141
2142 _predicted_pause_time_ms =
2143 _predicted_scan_only_scan_time_ms +
2144 _predicted_rs_update_time_ms +
2145 _predicted_rs_scan_time_ms +
2146 _predicted_object_copy_time_ms +
2147 _predicted_constant_other_time_ms +
2148 _predicted_young_other_time_ms +
2149 _predicted_non_young_other_time_ms;
2150 #endif // PREDICTIONS_VERBOSE
2151 }
2152
2153 void G1CollectorPolicy::check_if_region_is_too_expensive(double
2154 predicted_time_ms) {
2155 // I don't think we need to do this when in young GC mode since
2156 // marking will be initiated next time we hit the soft limit anyway...
2157 if (predicted_time_ms > _expensive_region_limit_ms) {
2158 if (!in_young_gc_mode()) {
2159 set_full_young_gcs(true);
2160 _should_initiate_conc_mark = true;
2161 } else
2162 // no point in doing another partial one
2163 _should_revert_to_full_young_gcs = true;
2164 }
2165 }
2166
2167 // </NEW PREDICTION>
2168
2169
2170 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
2171 double elapsed_ms) {
2172 _recent_gc_times_ms->add(elapsed_ms);
2173 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
2174 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
2175 }
2176
2177 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
2178 if (_recent_pause_times_ms->num() == 0) return (double) G1MaxPauseTimeMS;
2179 else return _recent_pause_times_ms->avg();
2180 }
2181
2182 double G1CollectorPolicy::recent_avg_time_for_CH_strong_ms() {
2183 if (_recent_CH_strong_roots_times_ms->num() == 0)
2184 return (double)G1MaxPauseTimeMS/3.0;
2185 else return _recent_CH_strong_roots_times_ms->avg();
2186 }
2187
2188 double G1CollectorPolicy::recent_avg_time_for_G1_strong_ms() {
2189 if (_recent_G1_strong_roots_times_ms->num() == 0)
2190 return (double)G1MaxPauseTimeMS/3.0;
2191 else return _recent_G1_strong_roots_times_ms->avg();
2192 }
2193
2194 double G1CollectorPolicy::recent_avg_time_for_evac_ms() {
2195 if (_recent_evac_times_ms->num() == 0) return (double)G1MaxPauseTimeMS/3.0;
2196 else return _recent_evac_times_ms->avg();
2197 }
2198
2199 int G1CollectorPolicy::number_of_recent_gcs() {
2200 assert(_recent_CH_strong_roots_times_ms->num() ==
2201 _recent_G1_strong_roots_times_ms->num(), "Sequence out of sync");
2202 assert(_recent_G1_strong_roots_times_ms->num() ==
2203 _recent_evac_times_ms->num(), "Sequence out of sync");
2204 assert(_recent_evac_times_ms->num() ==
2205 _recent_pause_times_ms->num(), "Sequence out of sync");
2206 assert(_recent_pause_times_ms->num() ==
2207 _recent_CS_bytes_used_before->num(), "Sequence out of sync");
2208 assert(_recent_CS_bytes_used_before->num() ==
2209 _recent_CS_bytes_surviving->num(), "Sequence out of sync");
2210 return _recent_pause_times_ms->num();
2211 }
2212
2213 double G1CollectorPolicy::recent_avg_survival_fraction() {
2214 return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
2215 _recent_CS_bytes_used_before);
2216 }
2217
2218 double G1CollectorPolicy::last_survival_fraction() {
2219 return last_survival_fraction_work(_recent_CS_bytes_surviving,
2220 _recent_CS_bytes_used_before);
2221 }
2222
2223 double
2224 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
2225 TruncatedSeq* before) {
2226 assert(surviving->num() == before->num(), "Sequence out of sync");
2227 if (before->sum() > 0.0) {
2228 double recent_survival_rate = surviving->sum() / before->sum();
2229 // We exempt parallel collection from this check because Alloc Buffer
2230 // fragmentation can produce negative collections.
2231 // Further, we're now always doing parallel collection. But I'm still
2232 // leaving this here as a placeholder for a more precise assertion later.
2233 // (DLD, 10/05.)
2234 assert((true || ParallelGCThreads > 0) ||
2235 _g1->evacuation_failed() ||
2236 recent_survival_rate <= 1.0, "Or bad frac");
2237 return recent_survival_rate;
2238 } else {
2239 return 1.0; // Be conservative.
2240 }
2241 }
2242
2243 double
2244 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
2245 TruncatedSeq* before) {
2246 assert(surviving->num() == before->num(), "Sequence out of sync");
2247 if (surviving->num() > 0 && before->last() > 0.0) {
2248 double last_survival_rate = surviving->last() / before->last();
2249 // We exempt parallel collection from this check because Alloc Buffer
2250 // fragmentation can produce negative collections.
2251 // Further, we're now always doing parallel collection. But I'm still
2252 // leaving this here as a placeholder for a more precise assertion later.
2253 // (DLD, 10/05.)
2254 assert((true || ParallelGCThreads > 0) ||
2255 last_survival_rate <= 1.0, "Or bad frac");
2256 return last_survival_rate;
2257 } else {
2258 return 1.0;
2259 }
2260 }
2261
2262 static const int survival_min_obs = 5;
2263 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
2264 static const double min_survival_rate = 0.1;
2265
2266 double
2267 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
2268 double latest) {
2269 double res = avg;
2270 if (number_of_recent_gcs() < survival_min_obs) {
2271 res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
2272 }
2273 res = MAX2(res, latest);
2274 res = MAX2(res, min_survival_rate);
2275 // In the parallel case, LAB fragmentation can produce "negative
2276 // collections"; so can evac failure. Cap at 1.0
2277 res = MIN2(res, 1.0);
2278 return res;
2279 }
2280
2281 size_t G1CollectorPolicy::expansion_amount() {
2282 if ((int)(recent_avg_pause_time_ratio() * 100.0) > G1GCPct) {
2283 // We will double the existing space, or take G1ExpandByPctOfAvail % of
2284 // the available expansion space, whichever is smaller, bounded below
2285 // by a minimum expansion (unless that's all that's left.)
2286 const size_t min_expand_bytes = 1*M;
2287 size_t reserved_bytes = _g1->g1_reserved_obj_bytes();
2288 size_t committed_bytes = _g1->capacity();
2289 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
2290 size_t expand_bytes;
2291 size_t expand_bytes_via_pct =
2292 uncommitted_bytes * G1ExpandByPctOfAvail / 100;
2293 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
2294 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
2295 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
2296 if (G1PolicyVerbose > 1) {
2297 gclog_or_tty->print("Decided to expand: ratio = %5.2f, "
2298 "committed = %d%s, uncommited = %d%s, via pct = %d%s.\n"
2299 " Answer = %d.\n",
2300 recent_avg_pause_time_ratio(),
2301 byte_size_in_proper_unit(committed_bytes),
2302 proper_unit_for_byte_size(committed_bytes),
2303 byte_size_in_proper_unit(uncommitted_bytes),
2304 proper_unit_for_byte_size(uncommitted_bytes),
2305 byte_size_in_proper_unit(expand_bytes_via_pct),
2306 proper_unit_for_byte_size(expand_bytes_via_pct),
2307 byte_size_in_proper_unit(expand_bytes),
2308 proper_unit_for_byte_size(expand_bytes));
2309 }
2310 return expand_bytes;
2311 } else {
2312 return 0;
2313 }
2314 }
2315
2316 void G1CollectorPolicy::note_start_of_mark_thread() {
2317 _mark_thread_startup_sec = os::elapsedTime();
2318 }
2319
2320 class CountCSClosure: public HeapRegionClosure {
2321 G1CollectorPolicy* _g1_policy;
2322 public:
2323 CountCSClosure(G1CollectorPolicy* g1_policy) :
2324 _g1_policy(g1_policy) {}
2325 bool doHeapRegion(HeapRegion* r) {
2326 _g1_policy->_bytes_in_collection_set_before_gc += r->used();
2327 return false;
2328 }
2329 };
2330
2331 void G1CollectorPolicy::count_CS_bytes_used() {
2332 CountCSClosure cs_closure(this);
2333 _g1->collection_set_iterate(&cs_closure);
2334 }
2335
2336 static void print_indent(int level) {
2337 for (int j = 0; j < level+1; ++j)
2338 gclog_or_tty->print(" ");
2339 }
2340
2341 void G1CollectorPolicy::print_summary (int level,
2342 const char* str,
2343 NumberSeq* seq) const {
2344 double sum = seq->sum();
2345 print_indent(level);
2346 gclog_or_tty->print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
2347 str, sum / 1000.0, seq->avg());
2348 }
2349
2350 void G1CollectorPolicy::print_summary_sd (int level,
2351 const char* str,
2352 NumberSeq* seq) const {
2353 print_summary(level, str, seq);
2354 print_indent(level + 5);
2355 gclog_or_tty->print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2356 seq->num(), seq->sd(), seq->maximum());
2357 }
2358
2359 void G1CollectorPolicy::check_other_times(int level,
2360 NumberSeq* other_times_ms,
2361 NumberSeq* calc_other_times_ms) const {
2362 bool should_print = false;
2363
2364 double max_sum = MAX2(fabs(other_times_ms->sum()),
2365 fabs(calc_other_times_ms->sum()));
2366 double min_sum = MIN2(fabs(other_times_ms->sum()),
2367 fabs(calc_other_times_ms->sum()));
2368 double sum_ratio = max_sum / min_sum;
2369 if (sum_ratio > 1.1) {
2370 should_print = true;
2371 print_indent(level + 1);
2372 gclog_or_tty->print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
2373 }
2374
2375 double max_avg = MAX2(fabs(other_times_ms->avg()),
2376 fabs(calc_other_times_ms->avg()));
2377 double min_avg = MIN2(fabs(other_times_ms->avg()),
2378 fabs(calc_other_times_ms->avg()));
2379 double avg_ratio = max_avg / min_avg;
2380 if (avg_ratio > 1.1) {
2381 should_print = true;
2382 print_indent(level + 1);
2383 gclog_or_tty->print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
2384 }
2385
2386 if (other_times_ms->sum() < -0.01) {
2387 print_indent(level + 1);
2388 gclog_or_tty->print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
2389 }
2390
2391 if (other_times_ms->avg() < -0.01) {
2392 print_indent(level + 1);
2393 gclog_or_tty->print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
2394 }
2395
2396 if (calc_other_times_ms->sum() < -0.01) {
2397 should_print = true;
2398 print_indent(level + 1);
2399 gclog_or_tty->print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
2400 }
2401
2402 if (calc_other_times_ms->avg() < -0.01) {
2403 should_print = true;
2404 print_indent(level + 1);
2405 gclog_or_tty->print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
2406 }
2407
2408 if (should_print)
2409 print_summary(level, "Other(Calc)", calc_other_times_ms);
2410 }
2411
2412 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
2413 bool parallel = ParallelGCThreads > 0;
2414 MainBodySummary* body_summary = summary->main_body_summary();
2415 PopPreambleSummary* preamble_summary = summary->pop_preamble_summary();
2416
2417 if (summary->get_total_seq()->num() > 0) {
2418 print_summary_sd(0,
2419 (preamble_summary == NULL) ? "Non-Popular Pauses" :
2420 "Popular Pauses",
2421 summary->get_total_seq());
2422 if (preamble_summary != NULL) {
2423 print_summary(1, "Popularity Preamble",
2424 preamble_summary->get_pop_preamble_seq());
2425 print_summary(2, "Update RS", preamble_summary->get_pop_update_rs_seq());
2426 print_summary(2, "Scan RS", preamble_summary->get_pop_scan_rs_seq());
2427 print_summary(2, "Closure App",
2428 preamble_summary->get_pop_closure_app_seq());
2429 print_summary(2, "Evacuation",
2430 preamble_summary->get_pop_evacuation_seq());
2431 print_summary(2, "Other", preamble_summary->get_pop_other_seq());
2432 {
2433 NumberSeq* other_parts[] = {
2434 preamble_summary->get_pop_update_rs_seq(),
2435 preamble_summary->get_pop_scan_rs_seq(),
2436 preamble_summary->get_pop_closure_app_seq(),
2437 preamble_summary->get_pop_evacuation_seq()
2438 };
2439 NumberSeq calc_other_times_ms(preamble_summary->get_pop_preamble_seq(),
2440 4, other_parts);
2441 check_other_times(2, preamble_summary->get_pop_other_seq(),
2442 &calc_other_times_ms);
2443 }
2444 }
2445 if (body_summary != NULL) {
2446 print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
2447 if (parallel) {
2448 print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
2449 print_summary(2, "Update RS", body_summary->get_update_rs_seq());
2450 print_summary(2, "Ext Root Scanning",
2451 body_summary->get_ext_root_scan_seq());
2452 print_summary(2, "Mark Stack Scanning",
2453 body_summary->get_mark_stack_scan_seq());
2454 print_summary(2, "Scan-Only Scanning",
2455 body_summary->get_scan_only_seq());
2456 print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
2457 print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
2458 print_summary(2, "Termination", body_summary->get_termination_seq());
2459 print_summary(2, "Other", body_summary->get_parallel_other_seq());
2460 {
2461 NumberSeq* other_parts[] = {
2462 body_summary->get_update_rs_seq(),
2463 body_summary->get_ext_root_scan_seq(),
2464 body_summary->get_mark_stack_scan_seq(),
2465 body_summary->get_scan_only_seq(),
2466 body_summary->get_scan_rs_seq(),
2467 body_summary->get_obj_copy_seq(),
2468 body_summary->get_termination_seq()
2469 };
2470 NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
2471 7, other_parts);
2472 check_other_times(2, body_summary->get_parallel_other_seq(),
2473 &calc_other_times_ms);
2474 }
2475 print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
2476 print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
2477 } else {
2478 print_summary(1, "Update RS", body_summary->get_update_rs_seq());
2479 print_summary(1, "Ext Root Scanning",
2480 body_summary->get_ext_root_scan_seq());
2481 print_summary(1, "Mark Stack Scanning",
2482 body_summary->get_mark_stack_scan_seq());
2483 print_summary(1, "Scan-Only Scanning",
2484 body_summary->get_scan_only_seq());
2485 print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
2486 print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
2487 }
2488 }
2489 print_summary(1, "Other", summary->get_other_seq());
2490 {
2491 NumberSeq calc_other_times_ms;
2492 if (body_summary != NULL) {
2493 // not abandoned
2494 if (parallel) {
2495 // parallel
2496 NumberSeq* other_parts[] = {
2497 body_summary->get_satb_drain_seq(),
2498 (preamble_summary == NULL) ? NULL :
2499 preamble_summary->get_pop_preamble_seq(),
2500 body_summary->get_parallel_seq(),
2501 body_summary->get_clear_ct_seq()
2502 };
2503 calc_other_times_ms = NumberSeq (summary->get_total_seq(),
2504 4, other_parts);
2505 } else {
2506 // serial
2507 NumberSeq* other_parts[] = {
2508 body_summary->get_satb_drain_seq(),
2509 (preamble_summary == NULL) ? NULL :
2510 preamble_summary->get_pop_preamble_seq(),
2511 body_summary->get_update_rs_seq(),
2512 body_summary->get_ext_root_scan_seq(),
2513 body_summary->get_mark_stack_scan_seq(),
2514 body_summary->get_scan_only_seq(),
2515 body_summary->get_scan_rs_seq(),
2516 body_summary->get_obj_copy_seq()
2517 };
2518 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
2519 8, other_parts);
2520 }
2521 } else {
2522 // abandoned
2523 NumberSeq* other_parts[] = {
2524 (preamble_summary == NULL) ? NULL :
2525 preamble_summary->get_pop_preamble_seq()
2526 };
2527 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
2528 1, other_parts);
2529 }
2530 check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
2531 }
2532 } else {
2533 print_indent(0);
2534 gclog_or_tty->print_cr("none");
2535 }
2536 gclog_or_tty->print_cr("");
2537 }
2538
2539 void
2540 G1CollectorPolicy::print_abandoned_summary(PauseSummary* non_pop_summary,
2541 PauseSummary* pop_summary) const {
2542 bool printed = false;
2543 if (non_pop_summary->get_total_seq()->num() > 0) {
2544 printed = true;
2545 print_summary(non_pop_summary);
2546 }
2547 if (pop_summary->get_total_seq()->num() > 0) {
2548 printed = true;
2549 print_summary(pop_summary);
2550 }
2551
2552 if (!printed) {
2553 print_indent(0);
2554 gclog_or_tty->print_cr("none");
2555 gclog_or_tty->print_cr("");
2556 }
2557 }
2558
2559 void G1CollectorPolicy::print_tracing_info() const {
2560 if (TraceGen0Time) {
2561 gclog_or_tty->print_cr("ALL PAUSES");
2562 print_summary_sd(0, "Total", _all_pause_times_ms);
2563 gclog_or_tty->print_cr("");
2564 gclog_or_tty->print_cr("");
2565 gclog_or_tty->print_cr(" Full Young GC Pauses: %8d", _full_young_pause_num);
2566 gclog_or_tty->print_cr(" Partial Young GC Pauses: %8d", _partial_young_pause_num);
2567 gclog_or_tty->print_cr("");
2568
2569 gclog_or_tty->print_cr("NON-POPULAR PAUSES");
2570 print_summary(_non_pop_summary);
2571
2572 gclog_or_tty->print_cr("POPULAR PAUSES");
2573 print_summary(_pop_summary);
2574
2575 gclog_or_tty->print_cr("ABANDONED PAUSES");
2576 print_abandoned_summary(_non_pop_abandoned_summary,
2577 _pop_abandoned_summary);
2578
2579 gclog_or_tty->print_cr("MISC");
2580 print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
2581 print_summary_sd(0, "Yields", _all_yield_times_ms);
2582 for (int i = 0; i < _aux_num; ++i) {
2583 if (_all_aux_times_ms[i].num() > 0) {
2584 char buffer[96];
2585 sprintf(buffer, "Aux%d", i);
2586 print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
2587 }
2588 }
2589
2590 size_t all_region_num = _region_num_young + _region_num_tenured;
2591 gclog_or_tty->print_cr(" New Regions %8d, Young %8d (%6.2lf%%), "
2592 "Tenured %8d (%6.2lf%%)",
2593 all_region_num,
2594 _region_num_young,
2595 (double) _region_num_young / (double) all_region_num * 100.0,
2596 _region_num_tenured,
2597 (double) _region_num_tenured / (double) all_region_num * 100.0);
2598
2599 if (!G1RSBarrierUseQueue) {
2600 gclog_or_tty->print_cr("Of %d times conc refinement was enabled, %d (%7.2f%%) "
2601 "did zero traversals.",
2602 _conc_refine_enabled, _conc_refine_zero_traversals,
2603 _conc_refine_enabled > 0 ?
2604 100.0 * (float)_conc_refine_zero_traversals/
2605 (float)_conc_refine_enabled : 0.0);
2606 gclog_or_tty->print_cr(" Max # of traversals = %d.",
2607 _conc_refine_max_traversals);
2608 gclog_or_tty->print_cr("");
2609 }
2610 }
2611 if (TraceGen1Time) {
2612 if (_all_full_gc_times_ms->num() > 0) {
2613 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2614 _all_full_gc_times_ms->num(),
2615 _all_full_gc_times_ms->sum() / 1000.0);
2616 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
2617 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
2618 _all_full_gc_times_ms->sd(),
2619 _all_full_gc_times_ms->maximum());
2620 }
2621 }
2622 }
2623
2624 void G1CollectorPolicy::print_yg_surv_rate_info() const {
2625 #ifndef PRODUCT
2626 _short_lived_surv_rate_group->print_surv_rate_summary();
2627 // add this call for any other surv rate groups
2628 #endif // PRODUCT
2629 }
2630
2631 void G1CollectorPolicy::update_conc_refine_data() {
2632 unsigned traversals = _g1->concurrent_g1_refine()->disable();
2633 if (traversals == 0) _conc_refine_zero_traversals++;
2634 _conc_refine_max_traversals = MAX2(_conc_refine_max_traversals,
2635 (size_t)traversals);
2636
2637 if (G1PolicyVerbose > 1)
2638 gclog_or_tty->print_cr("Did a CR traversal series: %d traversals.", traversals);
2639 double multiplier = 1.0;
2640 if (traversals == 0) {
2641 multiplier = 4.0;
2642 } else if (traversals > (size_t)G1ConcRefineTargTraversals) {
2643 multiplier = 1.0/1.5;
2644 } else if (traversals < (size_t)G1ConcRefineTargTraversals) {
2645 multiplier = 1.5;
2646 }
2647 if (G1PolicyVerbose > 1) {
2648 gclog_or_tty->print_cr(" Multiplier = %7.2f.", multiplier);
2649 gclog_or_tty->print(" Delta went from %d regions to ",
2650 _conc_refine_current_delta);
2651 }
2652 _conc_refine_current_delta =
2653 MIN2(_g1->n_regions(),
2654 (size_t)(_conc_refine_current_delta * multiplier));
2655 _conc_refine_current_delta =
2656 MAX2(_conc_refine_current_delta, (size_t)1);
2657 if (G1PolicyVerbose > 1) {
2658 gclog_or_tty->print_cr("%d regions.", _conc_refine_current_delta);
2659 }
2660 _conc_refine_enabled++;
2661 }
2662
2663 void G1CollectorPolicy::set_single_region_collection_set(HeapRegion* hr) {
2664 assert(collection_set() == NULL, "Must be no current CS.");
2665 _collection_set_size = 0;
2666 _collection_set_bytes_used_before = 0;
2667 add_to_collection_set(hr);
2668 count_CS_bytes_used();
2669 }
2670
2671 bool
2672 G1CollectorPolicy::should_add_next_region_to_young_list() {
2673 assert(in_young_gc_mode(), "should be in young GC mode");
2674 bool ret;
2675 size_t young_list_length = _g1->young_list_length();
2676
2677 if (young_list_length < _young_list_target_length) {
2678 ret = true;
2679 ++_region_num_young;
2680 } else {
2681 ret = false;
2682 ++_region_num_tenured;
2683 }
2684
2685 return ret;
2686 }
2687
2688 #ifndef PRODUCT
2689 // for debugging, bit of a hack...
2690 static char*
2691 region_num_to_mbs(int length) {
2692 static char buffer[64];
2693 double bytes = (double) (length * HeapRegion::GrainBytes);
2694 double mbs = bytes / (double) (1024 * 1024);
2695 sprintf(buffer, "%7.2lfMB", mbs);
2696 return buffer;
2697 }
2698 #endif // PRODUCT
2699
2700 void
2701 G1CollectorPolicy::checkpoint_conc_overhead() {
2702 double conc_overhead = 0.0;
2703 if (G1AccountConcurrentOverhead)
2704 conc_overhead = COTracker::totalPredConcOverhead();
2705 _mmu_tracker->update_conc_overhead(conc_overhead);
2706 #if 0
2707 gclog_or_tty->print(" CO %1.4lf TARGET %1.4lf",
2708 conc_overhead, _mmu_tracker->max_gc_time());
2709 #endif
2710 }
2711
2712
2713 uint G1CollectorPolicy::max_regions(int purpose) {
2714 switch (purpose) {
2715 case GCAllocForSurvived:
2716 return G1MaxSurvivorRegions;
2717 case GCAllocForTenured:
2718 return UINT_MAX;
2719 default:
2720 return UINT_MAX;
2721 };
2722 }
2723
2724 void
2725 G1CollectorPolicy_BestRegionsFirst::
2726 set_single_region_collection_set(HeapRegion* hr) {
2727 G1CollectorPolicy::set_single_region_collection_set(hr);
2728 _collectionSetChooser->removeRegion(hr);
2729 }
2730
2731
2732 bool
2733 G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
2734 word_size) {
2735 assert(_g1->regions_accounted_for(), "Region leakage!");
2736 // Initiate a pause when we reach the steady-state "used" target.
2737 size_t used_hard = (_g1->capacity() / 100) * G1SteadyStateUsed;
2738 size_t used_soft =
2739 MAX2((_g1->capacity() / 100) * (G1SteadyStateUsed - G1SteadyStateUsedDelta),
2740 used_hard/2);
2741 size_t used = _g1->used();
2742
2743 double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
2744
2745 size_t young_list_length = _g1->young_list_length();
2746 bool reached_target_length = young_list_length >= _young_list_target_length;
2747
2748 if (in_young_gc_mode()) {
2749 if (reached_target_length) {
2750 assert( young_list_length > 0 && _g1->young_list_length() > 0,
2751 "invariant" );
2752 _target_pause_time_ms = max_pause_time_ms;
2753 return true;
2754 }
2755 } else {
2756 guarantee( false, "should not reach here" );
2757 }
2758
2759 return false;
2760 }
2761
2762 #ifndef PRODUCT
2763 class HRSortIndexIsOKClosure: public HeapRegionClosure {
2764 CollectionSetChooser* _chooser;
2765 public:
2766 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
2767 _chooser(chooser) {}
2768
2769 bool doHeapRegion(HeapRegion* r) {
2770 if (!r->continuesHumongous()) {
2771 assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
2772 }
2773 return false;
2774 }
2775 };
2776
2777 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
2778 HRSortIndexIsOKClosure cl(_collectionSetChooser);
2779 _g1->heap_region_iterate(&cl);
2780 return true;
2781 }
2782 #endif
2783
2784 void
2785 G1CollectorPolicy_BestRegionsFirst::
2786 record_collection_pause_start(double start_time_sec, size_t start_used) {
2787 G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
2788 }
2789
2790 class NextNonCSElemFinder: public HeapRegionClosure {
2791 HeapRegion* _res;
2792 public:
2793 NextNonCSElemFinder(): _res(NULL) {}
2794 bool doHeapRegion(HeapRegion* r) {
2795 if (!r->in_collection_set()) {
2796 _res = r;
2797 return true;
2798 } else {
2799 return false;
2800 }
2801 }
2802 HeapRegion* res() { return _res; }
2803 };
2804
2805 class KnownGarbageClosure: public HeapRegionClosure {
2806 CollectionSetChooser* _hrSorted;
2807
2808 public:
2809 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
2810 _hrSorted(hrSorted)
2811 {}
2812
2813 bool doHeapRegion(HeapRegion* r) {
2814 // We only include humongous regions in collection
2815 // sets when concurrent mark shows that their contained object is
2816 // unreachable.
2817
2818 // Do we have any marking information for this region?
2819 if (r->is_marked()) {
2820 // We don't include humongous regions in collection
2821 // sets because we collect them immediately at the end of a marking
2822 // cycle. We also don't include young regions because we *must*
2823 // include them in the next collection pause.
2824 if (!r->isHumongous() && !r->is_young()) {
2825 _hrSorted->addMarkedHeapRegion(r);
2826 }
2827 }
2828 return false;
2829 }
2830 };
2831
2832 class ParKnownGarbageHRClosure: public HeapRegionClosure {
2833 CollectionSetChooser* _hrSorted;
2834 jint _marked_regions_added;
2835 jint _chunk_size;
2836 jint _cur_chunk_idx;
2837 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
2838 int _worker;
2839 int _invokes;
2840
2841 void get_new_chunk() {
2842 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
2843 _cur_chunk_end = _cur_chunk_idx + _chunk_size;
2844 }
2845 void add_region(HeapRegion* r) {
2846 if (_cur_chunk_idx == _cur_chunk_end) {
2847 get_new_chunk();
2848 }
2849 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
2850 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
2851 _marked_regions_added++;
2852 _cur_chunk_idx++;
2853 }
2854
2855 public:
2856 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
2857 jint chunk_size,
2858 int worker) :
2859 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
2860 _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
2861 _invokes(0)
2862 {}
2863
2864 bool doHeapRegion(HeapRegion* r) {
2865 // We only include humongous regions in collection
2866 // sets when concurrent mark shows that their contained object is
2867 // unreachable.
2868 _invokes++;
2869
2870 // Do we have any marking information for this region?
2871 if (r->is_marked()) {
2872 // We don't include humongous regions in collection
2873 // sets because we collect them immediately at the end of a marking
2874 // cycle.
2875 // We also do not include young regions in collection sets
2876 if (!r->isHumongous() && !r->is_young()) {
2877 add_region(r);
2878 }
2879 }
2880 return false;
2881 }
2882 jint marked_regions_added() { return _marked_regions_added; }
2883 int invokes() { return _invokes; }
2884 };
2885
2886 class ParKnownGarbageTask: public AbstractGangTask {
2887 CollectionSetChooser* _hrSorted;
2888 jint _chunk_size;
2889 G1CollectedHeap* _g1;
2890 public:
2891 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
2892 AbstractGangTask("ParKnownGarbageTask"),
2893 _hrSorted(hrSorted), _chunk_size(chunk_size),
2894 _g1(G1CollectedHeap::heap())
2895 {}
2896
2897 void work(int i) {
2898 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
2899 // Back to zero for the claim value.
2900 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i, 0);
2901 jint regions_added = parKnownGarbageCl.marked_regions_added();
2902 _hrSorted->incNumMarkedHeapRegions(regions_added);
2903 if (G1PrintParCleanupStats) {
2904 gclog_or_tty->print(" Thread %d called %d times, added %d regions to list.\n",
2905 i, parKnownGarbageCl.invokes(), regions_added);
2906 }
2907 }
2908 };
2909
2910 void
2911 G1CollectorPolicy_BestRegionsFirst::
2912 record_concurrent_mark_cleanup_end(size_t freed_bytes,
2913 size_t max_live_bytes) {
2914 double start;
2915 if (G1PrintParCleanupStats) start = os::elapsedTime();
2916 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
2917
2918 _collectionSetChooser->clearMarkedHeapRegions();
2919 double clear_marked_end;
2920 if (G1PrintParCleanupStats) {
2921 clear_marked_end = os::elapsedTime();
2922 gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.",
2923 (clear_marked_end - start)*1000.0);
2924 }
2925 if (ParallelGCThreads > 0) {
2926 const size_t OverpartitionFactor = 4;
2927 const size_t MinChunkSize = 8;
2928 const size_t ChunkSize =
2929 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
2930 MinChunkSize);
2931 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
2932 ChunkSize);
2933 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
2934 (int) ChunkSize);
2935 _g1->workers()->run_task(&parKnownGarbageTask);
2936 } else {
2937 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
2938 _g1->heap_region_iterate(&knownGarbagecl);
2939 }
2940 double known_garbage_end;
2941 if (G1PrintParCleanupStats) {
2942 known_garbage_end = os::elapsedTime();
2943 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
2944 (known_garbage_end - clear_marked_end)*1000.0);
2945 }
2946 _collectionSetChooser->sortMarkedHeapRegions();
2947 double sort_end;
2948 if (G1PrintParCleanupStats) {
2949 sort_end = os::elapsedTime();
2950 gclog_or_tty->print_cr(" sorting: %8.3f ms.",
2951 (sort_end - known_garbage_end)*1000.0);
2952 }
2953
2954 record_concurrent_mark_cleanup_end_work2();
2955 double work2_end;
2956 if (G1PrintParCleanupStats) {
2957 work2_end = os::elapsedTime();
2958 gclog_or_tty->print_cr(" work2: %8.3f ms.",
2959 (work2_end - sort_end)*1000.0);
2960 }
2961 }
2962
2963 // Add the heap region to the collection set and return the conservative
2964 // estimate of the number of live bytes.
2965 void G1CollectorPolicy::
2966 add_to_collection_set(HeapRegion* hr) {
2967 if (G1TraceRegions) {
2968 gclog_or_tty->print_cr("added region to cset %d:["PTR_FORMAT", "PTR_FORMAT"], "
2969 "top "PTR_FORMAT", young %s",
2970 hr->hrs_index(), hr->bottom(), hr->end(),
2971 hr->top(), (hr->is_young()) ? "YES" : "NO");
2972 }
2973
2974 if (_g1->mark_in_progress())
2975 _g1->concurrent_mark()->registerCSetRegion(hr);
2976
2977 assert(!hr->in_collection_set(),
2978 "should not already be in the CSet");
2979 hr->set_in_collection_set(true);
2980 hr->set_next_in_collection_set(_collection_set);
2981 _collection_set = hr;
2982 _collection_set_size++;
2983 _collection_set_bytes_used_before += hr->used();
2984 }
2985
2986 void
2987 G1CollectorPolicy_BestRegionsFirst::
2988 choose_collection_set(HeapRegion* pop_region) {
2989 double non_young_start_time_sec;
2990 start_recording_regions();
2991
2992 if (pop_region != NULL) {
2993 _target_pause_time_ms = (double) G1MaxPauseTimeMS;
2994 } else {
2995 guarantee(_target_pause_time_ms > -1.0,
2996 "_target_pause_time_ms should have been set!");
2997 }
2998
2999 // pop region is either null (and so is CS), or else it *is* the CS.
3000 assert(_collection_set == pop_region, "Precondition");
3001
3002 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
3003 double predicted_pause_time_ms = base_time_ms;
3004
3005 double target_time_ms = _target_pause_time_ms;
3006 double time_remaining_ms = target_time_ms - base_time_ms;
3007
3008 // the 10% and 50% values are arbitrary...
3009 if (time_remaining_ms < 0.10*target_time_ms) {
3010 time_remaining_ms = 0.50 * target_time_ms;
3011 _within_target = false;
3012 } else {
3013 _within_target = true;
3014 }
3015
3016 // We figure out the number of bytes available for future to-space.
3017 // For new regions without marking information, we must assume the
3018 // worst-case of complete survival. If we have marking information for a
3019 // region, we can bound the amount of live data. We can add a number of
3020 // such regions, as long as the sum of the live data bounds does not
3021 // exceed the available evacuation space.
3022 size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes;
3023
3024 size_t expansion_bytes =
3025 _g1->expansion_regions() * HeapRegion::GrainBytes;
3026
3027 if (pop_region == NULL) {
3028 _collection_set_bytes_used_before = 0;
3029 _collection_set_size = 0;
3030 }
3031
3032 // Adjust for expansion and slop.
3033 max_live_bytes = max_live_bytes + expansion_bytes;
3034
3035 assert(pop_region != NULL || _g1->regions_accounted_for(), "Region leakage!");
3036
3037 HeapRegion* hr;
3038 if (in_young_gc_mode()) {
3039 double young_start_time_sec = os::elapsedTime();
3040
3041 if (G1PolicyVerbose > 0) {
3042 gclog_or_tty->print_cr("Adding %d young regions to the CSet",
3043 _g1->young_list_length());
3044 }
3045 _young_cset_length = 0;
3046 _last_young_gc_full = full_young_gcs() ? true : false;
3047 if (_last_young_gc_full)
3048 ++_full_young_pause_num;
3049 else
3050 ++_partial_young_pause_num;
3051 hr = _g1->pop_region_from_young_list();
3052 while (hr != NULL) {
3053
3054 assert( hr->young_index_in_cset() == -1, "invariant" );
3055 assert( hr->age_in_surv_rate_group() != -1, "invariant" );
3056 hr->set_young_index_in_cset((int) _young_cset_length);
3057
3058 ++_young_cset_length;
3059 double predicted_time_ms = predict_region_elapsed_time_ms(hr, true);
3060 time_remaining_ms -= predicted_time_ms;
3061 predicted_pause_time_ms += predicted_time_ms;
3062 if (hr == pop_region) {
3063 // The popular region was young. Skip over it.
3064 assert(hr->in_collection_set(), "It's the pop region.");
3065 } else {
3066 assert(!hr->in_collection_set(), "It's not the pop region.");
3067 add_to_collection_set(hr);
3068 record_cset_region(hr, true);
3069 }
3070 max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
3071 if (G1PolicyVerbose > 0) {
3072 gclog_or_tty->print_cr(" Added [" PTR_FORMAT ", " PTR_FORMAT") to CS.",
3073 hr->bottom(), hr->end());
3074 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)",
3075 max_live_bytes/K);
3076 }
3077 hr = _g1->pop_region_from_young_list();
3078 }
3079
3080 record_scan_only_regions(_g1->young_list_scan_only_length());
3081
3082 double young_end_time_sec = os::elapsedTime();
3083 _recorded_young_cset_choice_time_ms =
3084 (young_end_time_sec - young_start_time_sec) * 1000.0;
3085
3086 non_young_start_time_sec = os::elapsedTime();
3087
3088 if (_young_cset_length > 0 && _last_young_gc_full) {
3089 // don't bother adding more regions...
3090 goto choose_collection_set_end;
3091 }
3092 } else if (pop_region != NULL) {
3093 // We're not in young mode, and we chose a popular region; don't choose
3094 // any more.
3095 return;
3096 }
3097
3098 if (!in_young_gc_mode() || !full_young_gcs()) {
3099 bool should_continue = true;
3100 NumberSeq seq;
3101 double avg_prediction = 100000000000000000.0; // something very large
3102 do {
3103 hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
3104 avg_prediction);
3105 if (hr != NULL && !hr->popular()) {
3106 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
3107 time_remaining_ms -= predicted_time_ms;
3108 predicted_pause_time_ms += predicted_time_ms;
3109 add_to_collection_set(hr);
3110 record_cset_region(hr, false);
3111 max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
3112 if (G1PolicyVerbose > 0) {
3113 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)",
3114 max_live_bytes/K);
3115 }
3116 seq.add(predicted_time_ms);
3117 avg_prediction = seq.avg() + seq.sd();
3118 }
3119 should_continue =
3120 ( hr != NULL) &&
3121 ( (adaptive_young_list_length()) ? time_remaining_ms > 0.0
3122 : _collection_set_size < _young_list_fixed_length );
3123 } while (should_continue);
3124
3125 if (!adaptive_young_list_length() &&
3126 _collection_set_size < _young_list_fixed_length)
3127 _should_revert_to_full_young_gcs = true;
3128 }
3129
3130 choose_collection_set_end:
3131 count_CS_bytes_used();
3132
3133 end_recording_regions();
3134
3135 double non_young_end_time_sec = os::elapsedTime();
3136 _recorded_non_young_cset_choice_time_ms =
3137 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
3138 }
3139
3140 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
3141 G1CollectorPolicy::record_full_collection_end();
3142 _collectionSetChooser->updateAfterFullCollection();
3143 }
3144
3145 void G1CollectorPolicy_BestRegionsFirst::
3146 expand_if_possible(size_t numRegions) {
3147 size_t expansion_bytes = numRegions * HeapRegion::GrainBytes;
3148 _g1->expand(expansion_bytes);
3149 }
3150
3151 void G1CollectorPolicy_BestRegionsFirst::
3152 record_collection_pause_end(bool popular, bool abandoned) {
3153 G1CollectorPolicy::record_collection_pause_end(popular, abandoned);
3154 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
3155 }
3156
3157 // Local Variables: ***
3158 // c-indentation-style: gnu ***
3159 // End: ***