comparison src/share/vm/gc_implementation/g1/collectionSetChooser.cpp @ 4912:a9647476d1a4

7132029: G1: mixed GC phase lasts for longer than it should Summary: Revamp of the mechanism that chooses old regions for inclusion in the CSet. It simplifies the code and introduces min and max bounds on the number of old regions added to the CSet at each mixed GC to avoid pathological cases. It also ensures that when we do a mixed GC we'll always find old regions to add to the CSet (i.e., it eliminates the case where a mixed GC will collect no old regions which can happen today). Reviewed-by: johnc, brutisso
author tonyp
date Wed, 15 Feb 2012 13:06:53 -0500
parents 441e946dc1af
children 21595f05bc93
comparison
equal deleted inserted replaced
4911:d903bf750e9f 4912:a9647476d1a4
1 /* 1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
46 } 46 }
47 } 47 }
48 48
49 #ifndef PRODUCT 49 #ifndef PRODUCT
50 bool CSetChooserCache::verify() { 50 bool CSetChooserCache::verify() {
51 guarantee(false, "CSetChooserCache::verify(): don't call this any more");
52
51 int index = _first; 53 int index = _first;
52 HeapRegion *prev = NULL; 54 HeapRegion *prev = NULL;
53 for (int i = 0; i < _occupancy; ++i) { 55 for (int i = 0; i < _occupancy; ++i) {
54 guarantee(_cache[index] != NULL, "cache entry should not be empty"); 56 guarantee(_cache[index] != NULL, "cache entry should not be empty");
55 HeapRegion *hr = _cache[index]; 57 HeapRegion *hr = _cache[index];
73 return true; 75 return true;
74 } 76 }
75 #endif // PRODUCT 77 #endif // PRODUCT
76 78
77 void CSetChooserCache::insert(HeapRegion *hr) { 79 void CSetChooserCache::insert(HeapRegion *hr) {
80 guarantee(false, "CSetChooserCache::insert(): don't call this any more");
81
78 assert(!is_full(), "cache should not be empty"); 82 assert(!is_full(), "cache should not be empty");
79 hr->calc_gc_efficiency(); 83 hr->calc_gc_efficiency();
80 84
81 int empty_index; 85 int empty_index;
82 if (_occupancy == 0) { 86 if (_occupancy == 0) {
102 ++_occupancy; 106 ++_occupancy;
103 assert(verify(), "cache should be consistent"); 107 assert(verify(), "cache should be consistent");
104 } 108 }
105 109
106 HeapRegion *CSetChooserCache::remove_first() { 110 HeapRegion *CSetChooserCache::remove_first() {
111 guarantee(false, "CSetChooserCache::remove_first(): "
112 "don't call this any more");
113
107 if (_occupancy > 0) { 114 if (_occupancy > 0) {
108 assert(_cache[_first] != NULL, "cache should have at least one region"); 115 assert(_cache[_first] != NULL, "cache should have at least one region");
109 HeapRegion *ret = _cache[_first]; 116 HeapRegion *ret = _cache[_first];
110 _cache[_first] = NULL; 117 _cache[_first] = NULL;
111 ret->set_sort_index(-1); 118 ret->set_sort_index(-1);
116 } else { 123 } else {
117 return NULL; 124 return NULL;
118 } 125 }
119 } 126 }
120 127
121 static inline int orderRegions(HeapRegion* hr1, HeapRegion* hr2) { 128 // Even though we don't use the GC efficiency in our heuristics as
129 // much as we used to, we still order according to GC efficiency. This
130 // will cause regions with a lot of live objects and large RSets to
131 // end up at the end of the array. Given that we might skip collecting
132 // the last few old regions, if after a few mixed GCs the remaining
133 // have reclaimable bytes under a certain threshold, the hope is that
134 // the ones we'll skip are ones with both large RSets and a lot of
135 // live objects, not the ones with just a lot of live objects if we
136 // ordered according to the amount of reclaimable bytes per region.
137 static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
122 if (hr1 == NULL) { 138 if (hr1 == NULL) {
123 if (hr2 == NULL) return 0; 139 if (hr2 == NULL) {
124 else return 1; 140 return 0;
141 } else {
142 return 1;
143 }
125 } else if (hr2 == NULL) { 144 } else if (hr2 == NULL) {
126 return -1; 145 return -1;
127 } 146 }
128 if (hr2->gc_efficiency() < hr1->gc_efficiency()) return -1; 147
129 else if (hr1->gc_efficiency() < hr2->gc_efficiency()) return 1; 148 double gc_eff1 = hr1->gc_efficiency();
130 else return 0; 149 double gc_eff2 = hr2->gc_efficiency();
150 if (gc_eff1 > gc_eff2) {
151 return -1;
152 } if (gc_eff1 < gc_eff2) {
153 return 1;
154 } else {
155 return 0;
156 }
131 } 157 }
132 158
133 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) { 159 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
134 return orderRegions(*hr1p, *hr2p); 160 return orderRegions(*hr1p, *hr2p);
135 } 161 }
149 // 175 //
150 // Note: containing object is allocated on C heap since it is CHeapObj. 176 // Note: containing object is allocated on C heap since it is CHeapObj.
151 // 177 //
152 _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions, 178 _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions,
153 ResourceObj::C_HEAP), 179 ResourceObj::C_HEAP),
154 100), 180 100), true /* C_Heap */),
155 true), 181 _curr_index(0), _length(0),
156 _curMarkedIndex(0), 182 _regionLiveThresholdBytes(0), _remainingReclaimableBytes(0),
157 _numMarkedRegions(0), 183 _first_par_unreserved_idx(0) {
158 _unmarked_age_1_returned_as_new(false), 184 _regionLiveThresholdBytes =
159 _first_par_unreserved_idx(0) 185 HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100;
160 {} 186 }
161
162
163 187
164 #ifndef PRODUCT 188 #ifndef PRODUCT
165 bool CollectionSetChooser::verify() { 189 bool CollectionSetChooser::verify() {
190 guarantee(_length >= 0, err_msg("_length: %d", _length));
191 guarantee(0 <= _curr_index && _curr_index <= _length,
192 err_msg("_curr_index: %d _length: %d", _curr_index, _length));
166 int index = 0; 193 int index = 0;
167 guarantee(_curMarkedIndex <= _numMarkedRegions, 194 size_t sum_of_reclaimable_bytes = 0;
168 "_curMarkedIndex should be within bounds"); 195 while (index < _curr_index) {
169 while (index < _curMarkedIndex) { 196 guarantee(_markedRegions.at(index) == NULL,
170 guarantee(_markedRegions.at(index++) == NULL, 197 "all entries before _curr_index should be NULL");
171 "all entries before _curMarkedIndex should be NULL"); 198 index += 1;
172 } 199 }
173 HeapRegion *prev = NULL; 200 HeapRegion *prev = NULL;
174 while (index < _numMarkedRegions) { 201 while (index < _length) {
175 HeapRegion *curr = _markedRegions.at(index++); 202 HeapRegion *curr = _markedRegions.at(index++);
176 guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL"); 203 guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL");
177 int si = curr->sort_index(); 204 int si = curr->sort_index();
178 guarantee(!curr->is_young(), "should not be young!"); 205 guarantee(!curr->is_young(), "should not be young!");
206 guarantee(!curr->isHumongous(), "should not be humongous!");
179 guarantee(si > -1 && si == (index-1), "sort index invariant"); 207 guarantee(si > -1 && si == (index-1), "sort index invariant");
180 if (prev != NULL) { 208 if (prev != NULL) {
181 guarantee(orderRegions(prev, curr) != 1, "regions should be sorted"); 209 guarantee(orderRegions(prev, curr) != 1,
182 } 210 err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
211 prev->gc_efficiency(), curr->gc_efficiency()));
212 }
213 sum_of_reclaimable_bytes += curr->reclaimable_bytes();
183 prev = curr; 214 prev = curr;
184 } 215 }
185 return _cache.verify(); 216 guarantee(sum_of_reclaimable_bytes == _remainingReclaimableBytes,
217 err_msg("reclaimable bytes inconsistent, "
218 "remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT,
219 _remainingReclaimableBytes, sum_of_reclaimable_bytes));
220 return true;
186 } 221 }
187 #endif 222 #endif
188 223
189 void 224 void CollectionSetChooser::fillCache() {
190 CollectionSetChooser::fillCache() { 225 guarantee(false, "fillCache: don't call this any more");
191 while (!_cache.is_full() && (_curMarkedIndex < _numMarkedRegions)) { 226
192 HeapRegion* hr = _markedRegions.at(_curMarkedIndex); 227 while (!_cache.is_full() && (_curr_index < _length)) {
228 HeapRegion* hr = _markedRegions.at(_curr_index);
193 assert(hr != NULL, 229 assert(hr != NULL,
194 err_msg("Unexpected NULL hr in _markedRegions at index %d", 230 err_msg("Unexpected NULL hr in _markedRegions at index %d",
195 _curMarkedIndex)); 231 _curr_index));
196 _curMarkedIndex += 1; 232 _curr_index += 1;
197 assert(!hr->is_young(), "should not be young!"); 233 assert(!hr->is_young(), "should not be young!");
198 assert(hr->sort_index() == _curMarkedIndex-1, "sort_index invariant"); 234 assert(hr->sort_index() == _curr_index-1, "sort_index invariant");
199 _markedRegions.at_put(hr->sort_index(), NULL); 235 _markedRegions.at_put(hr->sort_index(), NULL);
200 _cache.insert(hr); 236 _cache.insert(hr);
201 assert(!_cache.is_empty(), "cache should not be empty"); 237 assert(!_cache.is_empty(), "cache should not be empty");
202 } 238 }
203 assert(verify(), "cache should be consistent"); 239 assert(verify(), "cache should be consistent");
204 } 240 }
205 241
206 void 242 void CollectionSetChooser::sortMarkedHeapRegions() {
207 CollectionSetChooser::sortMarkedHeapRegions() {
208 guarantee(_cache.is_empty(), "cache should be empty");
209 // First trim any unused portion of the top in the parallel case. 243 // First trim any unused portion of the top in the parallel case.
210 if (_first_par_unreserved_idx > 0) { 244 if (_first_par_unreserved_idx > 0) {
211 if (G1PrintParCleanupStats) { 245 if (G1PrintParCleanupStats) {
212 gclog_or_tty->print(" Truncating _markedRegions from %d to %d.\n", 246 gclog_or_tty->print(" Truncating _markedRegions from %d to %d.\n",
213 _markedRegions.length(), _first_par_unreserved_idx); 247 _markedRegions.length(), _first_par_unreserved_idx);
215 assert(_first_par_unreserved_idx <= _markedRegions.length(), 249 assert(_first_par_unreserved_idx <= _markedRegions.length(),
216 "Or we didn't reserved enough length"); 250 "Or we didn't reserved enough length");
217 _markedRegions.trunc_to(_first_par_unreserved_idx); 251 _markedRegions.trunc_to(_first_par_unreserved_idx);
218 } 252 }
219 _markedRegions.sort(orderRegions); 253 _markedRegions.sort(orderRegions);
220 assert(_numMarkedRegions <= _markedRegions.length(), "Requirement"); 254 assert(_length <= _markedRegions.length(), "Requirement");
221 assert(_numMarkedRegions == 0 255 assert(_length == 0 || _markedRegions.at(_length - 1) != NULL,
222 || _markedRegions.at(_numMarkedRegions-1) != NULL, 256 "Testing _length");
223 "Testing _numMarkedRegions"); 257 assert(_length == _markedRegions.length() ||
224 assert(_numMarkedRegions == _markedRegions.length() 258 _markedRegions.at(_length) == NULL, "Testing _length");
225 || _markedRegions.at(_numMarkedRegions) == NULL,
226 "Testing _numMarkedRegions");
227 if (G1PrintParCleanupStats) { 259 if (G1PrintParCleanupStats) {
228 gclog_or_tty->print_cr(" Sorted %d marked regions.", _numMarkedRegions); 260 gclog_or_tty->print_cr(" Sorted %d marked regions.", _length);
229 } 261 }
230 for (int i = 0; i < _numMarkedRegions; i++) { 262 for (int i = 0; i < _length; i++) {
231 assert(_markedRegions.at(i) != NULL, "Should be true by sorting!"); 263 assert(_markedRegions.at(i) != NULL, "Should be true by sorting!");
232 _markedRegions.at(i)->set_sort_index(i); 264 _markedRegions.at(i)->set_sort_index(i);
233 } 265 }
234 if (G1PrintRegionLivenessInfo) { 266 if (G1PrintRegionLivenessInfo) {
235 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting"); 267 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
236 for (int i = 0; i < _numMarkedRegions; ++i) { 268 for (int i = 0; i < _length; ++i) {
237 HeapRegion* r = _markedRegions.at(i); 269 HeapRegion* r = _markedRegions.at(i);
238 cl.doHeapRegion(r); 270 cl.doHeapRegion(r);
239 } 271 }
240 } 272 }
241 assert(verify(), "should now be sorted"); 273 assert(verify(), "CSet chooser verification");
242 } 274 }
243 275
244 void 276 size_t CollectionSetChooser::calcMinOldCSetLength() {
245 CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) { 277 // The min old CSet region bound is based on the maximum desired
278 // number of mixed GCs after a cycle. I.e., even if some old regions
279 // look expensive, we should add them to the CSet anyway to make
280 // sure we go through the available old regions in no more than the
281 // maximum desired number of mixed GCs.
282 //
283 // The calculation is based on the number of marked regions we added
284 // to the CSet chooser in the first place, not how many remain, so
285 // that the result is the same during all mixed GCs that follow a cycle.
286
287 const size_t region_num = (size_t) _length;
288 const size_t gc_num = (size_t) G1MaxMixedGCNum;
289 size_t result = region_num / gc_num;
290 // emulate ceiling
291 if (result * gc_num < region_num) {
292 result += 1;
293 }
294 return result;
295 }
296
297 size_t CollectionSetChooser::calcMaxOldCSetLength() {
298 // The max old CSet region bound is based on the threshold expressed
299 // as a percentage of the heap size. I.e., it should bound the
300 // number of old regions added to the CSet irrespective of how many
301 // of them are available.
302
303 G1CollectedHeap* g1h = G1CollectedHeap::heap();
304 const size_t region_num = g1h->n_regions();
305 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
306 size_t result = region_num * perc / 100;
307 // emulate ceiling
308 if (100 * result < region_num * perc) {
309 result += 1;
310 }
311 return result;
312 }
313
314 void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
246 assert(!hr->isHumongous(), 315 assert(!hr->isHumongous(),
247 "Humongous regions shouldn't be added to the collection set"); 316 "Humongous regions shouldn't be added to the collection set");
248 assert(!hr->is_young(), "should not be young!"); 317 assert(!hr->is_young(), "should not be young!");
249 _markedRegions.append(hr); 318 _markedRegions.append(hr);
250 _numMarkedRegions++; 319 _length++;
320 _remainingReclaimableBytes += hr->reclaimable_bytes();
251 hr->calc_gc_efficiency(); 321 hr->calc_gc_efficiency();
252 } 322 }
253 323
254 void 324 void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions,
255 CollectionSetChooser:: 325 size_t chunkSize) {
256 prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) {
257 _first_par_unreserved_idx = 0; 326 _first_par_unreserved_idx = 0;
258 int n_threads = ParallelGCThreads; 327 int n_threads = ParallelGCThreads;
259 if (UseDynamicNumberOfGCThreads) { 328 if (UseDynamicNumberOfGCThreads) {
260 assert(G1CollectedHeap::heap()->workers()->active_workers() > 0, 329 assert(G1CollectedHeap::heap()->workers()->active_workers() > 0,
261 "Should have been set earlier"); 330 "Should have been set earlier");
272 (n_regions + (chunkSize - 1)) / chunkSize * chunkSize; 341 (n_regions + (chunkSize - 1)) / chunkSize * chunkSize;
273 assert( aligned_n_regions % chunkSize == 0, "should be aligned" ); 342 assert( aligned_n_regions % chunkSize == 0, "should be aligned" );
274 _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL); 343 _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL);
275 } 344 }
276 345
277 jint 346 jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
278 CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
279 // Don't do this assert because this can be called at a point 347 // Don't do this assert because this can be called at a point
280 // where the loop up stream will not execute again but might 348 // where the loop up stream will not execute again but might
281 // try to claim more chunks (loop test has not been done yet). 349 // try to claim more chunks (loop test has not been done yet).
282 // assert(_markedRegions.length() > _first_par_unreserved_idx, 350 // assert(_markedRegions.length() > _first_par_unreserved_idx,
283 // "Striding beyond the marked regions"); 351 // "Striding beyond the marked regions");
285 assert(_markedRegions.length() > res + n_regions - 1, 353 assert(_markedRegions.length() > res + n_regions - 1,
286 "Should already have been expanded"); 354 "Should already have been expanded");
287 return res - n_regions; 355 return res - n_regions;
288 } 356 }
289 357
290 void 358 void CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
291 CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
292 assert(_markedRegions.at(index) == NULL, "precondition"); 359 assert(_markedRegions.at(index) == NULL, "precondition");
293 assert(!hr->is_young(), "should not be young!"); 360 assert(!hr->is_young(), "should not be young!");
294 _markedRegions.at_put(index, hr); 361 _markedRegions.at_put(index, hr);
295 hr->calc_gc_efficiency(); 362 hr->calc_gc_efficiency();
296 } 363 }
297 364
298 void 365 void CollectionSetChooser::updateTotals(jint region_num,
299 CollectionSetChooser::incNumMarkedHeapRegions(jint inc_by) { 366 size_t reclaimable_bytes) {
300 (void)Atomic::add(inc_by, &_numMarkedRegions); 367 // Only take the lock if we actually need to update the totals.
301 } 368 if (region_num > 0) {
302 369 assert(reclaimable_bytes > 0, "invariant");
303 void 370 // We could have just used atomics instead of taking the
304 CollectionSetChooser::clearMarkedHeapRegions(){ 371 // lock. However, we currently don't have an atomic add for size_t.
372 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
373 _length += (int) region_num;
374 _remainingReclaimableBytes += reclaimable_bytes;
375 } else {
376 assert(reclaimable_bytes == 0, "invariant");
377 }
378 }
379
380 void CollectionSetChooser::clearMarkedHeapRegions() {
305 for (int i = 0; i < _markedRegions.length(); i++) { 381 for (int i = 0; i < _markedRegions.length(); i++) {
306 HeapRegion* r = _markedRegions.at(i); 382 HeapRegion* r = _markedRegions.at(i);
307 if (r != NULL) r->set_sort_index(-1); 383 if (r != NULL) {
384 r->set_sort_index(-1);
385 }
308 } 386 }
309 _markedRegions.clear(); 387 _markedRegions.clear();
310 _curMarkedIndex = 0; 388 _curr_index = 0;
311 _numMarkedRegions = 0; 389 _length = 0;
312 _cache.clear(); 390 _remainingReclaimableBytes = 0;
313 }; 391 };
314
315 void
316 CollectionSetChooser::updateAfterFullCollection() {
317 clearMarkedHeapRegions();
318 }
319
320 // if time_remaining < 0.0, then this method should try to return
321 // a region, whether it fits within the remaining time or not
322 HeapRegion*
323 CollectionSetChooser::getNextMarkedRegion(double time_remaining,
324 double avg_prediction) {
325 G1CollectedHeap* g1h = G1CollectedHeap::heap();
326 G1CollectorPolicy* g1p = g1h->g1_policy();
327 fillCache();
328 if (_cache.is_empty()) {
329 assert(_curMarkedIndex == _numMarkedRegions,
330 "if cache is empty, list should also be empty");
331 ergo_verbose0(ErgoCSetConstruction,
332 "stop adding old regions to CSet",
333 ergo_format_reason("cache is empty"));
334 return NULL;
335 }
336
337 HeapRegion *hr = _cache.get_first();
338 assert(hr != NULL, "if cache not empty, first entry should be non-null");
339 double predicted_time = g1h->predict_region_elapsed_time_ms(hr, false);
340
341 if (g1p->adaptive_young_list_length()) {
342 if (time_remaining - predicted_time < 0.0) {
343 g1h->check_if_region_is_too_expensive(predicted_time);
344 ergo_verbose2(ErgoCSetConstruction,
345 "stop adding old regions to CSet",
346 ergo_format_reason("predicted old region time higher than remaining time")
347 ergo_format_ms("predicted old region time")
348 ergo_format_ms("remaining time"),
349 predicted_time, time_remaining);
350 return NULL;
351 }
352 } else {
353 double threshold = 2.0 * avg_prediction;
354 if (predicted_time > threshold) {
355 ergo_verbose2(ErgoCSetConstruction,
356 "stop adding old regions to CSet",
357 ergo_format_reason("predicted old region time higher than threshold")
358 ergo_format_ms("predicted old region time")
359 ergo_format_ms("threshold"),
360 predicted_time, threshold);
361 return NULL;
362 }
363 }
364
365 HeapRegion *hr2 = _cache.remove_first();
366 assert(hr == hr2, "cache contents should not have changed");
367
368 return hr;
369 }