comparison src/share/vm/gc_implementation/g1/collectionSetChooser.cpp @ 6011:f7a8920427a6

7145441: G1: collection set chooser-related cleanup Summary: Cleanup of the CSet chooser class: standardize on uints for region num and indexes (instead of int, jint, etc.), make the method / field naming style more consistent, remove a lot of dead code. Reviewed-by: johnc, brutisso
author tonyp
date Wed, 18 Apr 2012 13:39:55 -0400
parents 720b6a76dd9d
children 37f7535e5f18
comparison
equal deleted inserted replaced
6010:720b6a76dd9d 6011:f7a8920427a6
26 #include "gc_implementation/g1/collectionSetChooser.hpp" 26 #include "gc_implementation/g1/collectionSetChooser.hpp"
27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
28 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 28 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
29 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 29 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
30 #include "memory/space.inline.hpp" 30 #include "memory/space.inline.hpp"
31
32 CSetChooserCache::CSetChooserCache() {
33 for (int i = 0; i < CacheLength; ++i)
34 _cache[i] = NULL;
35 clear();
36 }
37
38 void CSetChooserCache::clear() {
39 _occupancy = 0;
40 _first = 0;
41 for (int i = 0; i < CacheLength; ++i) {
42 HeapRegion *hr = _cache[i];
43 if (hr != NULL)
44 hr->set_sort_index(-1);
45 _cache[i] = NULL;
46 }
47 }
48
49 #ifndef PRODUCT
50 bool CSetChooserCache::verify() {
51 guarantee(false, "CSetChooserCache::verify(): don't call this any more");
52
53 int index = _first;
54 HeapRegion *prev = NULL;
55 for (int i = 0; i < _occupancy; ++i) {
56 guarantee(_cache[index] != NULL, "cache entry should not be empty");
57 HeapRegion *hr = _cache[index];
58 guarantee(!hr->is_young(), "should not be young!");
59 if (prev != NULL) {
60 guarantee(prev->gc_efficiency() >= hr->gc_efficiency(),
61 "cache should be correctly ordered");
62 }
63 guarantee(hr->sort_index() == get_sort_index(index),
64 "sort index should be correct");
65 index = trim_index(index + 1);
66 prev = hr;
67 }
68
69 for (int i = 0; i < (CacheLength - _occupancy); ++i) {
70 guarantee(_cache[index] == NULL, "cache entry should be empty");
71 index = trim_index(index + 1);
72 }
73
74 guarantee(index == _first, "we should have reached where we started from");
75 return true;
76 }
77 #endif // PRODUCT
78
79 void CSetChooserCache::insert(HeapRegion *hr) {
80 guarantee(false, "CSetChooserCache::insert(): don't call this any more");
81
82 assert(!is_full(), "cache should not be empty");
83 hr->calc_gc_efficiency();
84
85 int empty_index;
86 if (_occupancy == 0) {
87 empty_index = _first;
88 } else {
89 empty_index = trim_index(_first + _occupancy);
90 assert(_cache[empty_index] == NULL, "last slot should be empty");
91 int last_index = trim_index(empty_index - 1);
92 HeapRegion *last = _cache[last_index];
93 assert(last != NULL,"as the cache is not empty, last should not be empty");
94 while (empty_index != _first &&
95 last->gc_efficiency() < hr->gc_efficiency()) {
96 _cache[empty_index] = last;
97 last->set_sort_index(get_sort_index(empty_index));
98 empty_index = last_index;
99 last_index = trim_index(last_index - 1);
100 last = _cache[last_index];
101 }
102 }
103 _cache[empty_index] = hr;
104 hr->set_sort_index(get_sort_index(empty_index));
105
106 ++_occupancy;
107 assert(verify(), "cache should be consistent");
108 }
109
110 HeapRegion *CSetChooserCache::remove_first() {
111 guarantee(false, "CSetChooserCache::remove_first(): "
112 "don't call this any more");
113
114 if (_occupancy > 0) {
115 assert(_cache[_first] != NULL, "cache should have at least one region");
116 HeapRegion *ret = _cache[_first];
117 _cache[_first] = NULL;
118 ret->set_sort_index(-1);
119 --_occupancy;
120 _first = trim_index(_first + 1);
121 assert(verify(), "cache should be consistent");
122 return ret;
123 } else {
124 return NULL;
125 }
126 }
127 31
128 // Even though we don't use the GC efficiency in our heuristics as 32 // Even though we don't use the GC efficiency in our heuristics as
129 // much as we used to, we still order according to GC efficiency. This 33 // much as we used to, we still order according to GC efficiency. This
130 // will cause regions with a lot of live objects and large RSets to 34 // will cause regions with a lot of live objects and large RSets to
131 // end up at the end of the array. Given that we might skip collecting 35 // end up at the end of the array. Given that we might skip collecting
132 // the last few old regions, if after a few mixed GCs the remaining 36 // the last few old regions, if after a few mixed GCs the remaining
133 // have reclaimable bytes under a certain threshold, the hope is that 37 // have reclaimable bytes under a certain threshold, the hope is that
134 // the ones we'll skip are ones with both large RSets and a lot of 38 // the ones we'll skip are ones with both large RSets and a lot of
135 // live objects, not the ones with just a lot of live objects if we 39 // live objects, not the ones with just a lot of live objects if we
136 // ordered according to the amount of reclaimable bytes per region. 40 // ordered according to the amount of reclaimable bytes per region.
137 static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) { 41 static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
138 if (hr1 == NULL) { 42 if (hr1 == NULL) {
139 if (hr2 == NULL) { 43 if (hr2 == NULL) {
140 return 0; 44 return 0;
141 } else { 45 } else {
142 return 1; 46 return 1;
154 } else { 58 } else {
155 return 0; 59 return 0;
156 } 60 }
157 } 61 }
158 62
159 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) { 63 static int order_regions(HeapRegion** hr1p, HeapRegion** hr2p) {
160 return orderRegions(*hr1p, *hr2p); 64 return order_regions(*hr1p, *hr2p);
161 } 65 }
162 66
163 CollectionSetChooser::CollectionSetChooser() : 67 CollectionSetChooser::CollectionSetChooser() :
164 // The line below is the worst bit of C++ hackery I've ever written 68 // The line below is the worst bit of C++ hackery I've ever written
165 // (Detlefs, 11/23). You should think of it as equivalent to 69 // (Detlefs, 11/23). You should think of it as equivalent to
173 // allow to pass the assert in GenericGrowableArray() which checks 77 // allow to pass the assert in GenericGrowableArray() which checks
174 // that a growable array object must be on C heap if elements are. 78 // that a growable array object must be on C heap if elements are.
175 // 79 //
176 // Note: containing object is allocated on C heap since it is CHeapObj. 80 // Note: containing object is allocated on C heap since it is CHeapObj.
177 // 81 //
178 _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions, 82 _regions((ResourceObj::set_allocation_type((address) &_regions,
179 ResourceObj::C_HEAP), 83 ResourceObj::C_HEAP),
180 100), true /* C_Heap */), 84 100), true /* C_Heap */),
181 _curr_index(0), _length(0), 85 _curr_index(0), _length(0), _first_par_unreserved_idx(0),
182 _regionLiveThresholdBytes(0), _remainingReclaimableBytes(0), 86 _region_live_threshold_bytes(0), _remaining_reclaimable_bytes(0) {
183 _first_par_unreserved_idx(0) { 87 _region_live_threshold_bytes =
184 _regionLiveThresholdBytes =
185 HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100; 88 HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100;
186 } 89 }
187 90
188 #ifndef PRODUCT 91 #ifndef PRODUCT
189 bool CollectionSetChooser::verify() { 92 void CollectionSetChooser::verify() {
190 guarantee(_length >= 0, err_msg("_length: %d", _length)); 93 guarantee(_length <= regions_length(),
191 guarantee(0 <= _curr_index && _curr_index <= _length, 94 err_msg("_length: %u regions length: %u", _length, regions_length()));
192 err_msg("_curr_index: %d _length: %d", _curr_index, _length)); 95 guarantee(_curr_index <= _length,
193 int index = 0; 96 err_msg("_curr_index: %u _length: %u", _curr_index, _length));
97 uint index = 0;
194 size_t sum_of_reclaimable_bytes = 0; 98 size_t sum_of_reclaimable_bytes = 0;
195 while (index < _curr_index) { 99 while (index < _curr_index) {
196 guarantee(_markedRegions.at(index) == NULL, 100 guarantee(regions_at(index) == NULL,
197 "all entries before _curr_index should be NULL"); 101 "all entries before _curr_index should be NULL");
198 index += 1; 102 index += 1;
199 } 103 }
200 HeapRegion *prev = NULL; 104 HeapRegion *prev = NULL;
201 while (index < _length) { 105 while (index < _length) {
202 HeapRegion *curr = _markedRegions.at(index++); 106 HeapRegion *curr = regions_at(index++);
203 guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL"); 107 guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
204 int si = curr->sort_index();
205 guarantee(!curr->is_young(), "should not be young!"); 108 guarantee(!curr->is_young(), "should not be young!");
206 guarantee(!curr->isHumongous(), "should not be humongous!"); 109 guarantee(!curr->isHumongous(), "should not be humongous!");
207 guarantee(si > -1 && si == (index-1), "sort index invariant");
208 if (prev != NULL) { 110 if (prev != NULL) {
209 guarantee(orderRegions(prev, curr) != 1, 111 guarantee(order_regions(prev, curr) != 1,
210 err_msg("GC eff prev: %1.4f GC eff curr: %1.4f", 112 err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
211 prev->gc_efficiency(), curr->gc_efficiency())); 113 prev->gc_efficiency(), curr->gc_efficiency()));
212 } 114 }
213 sum_of_reclaimable_bytes += curr->reclaimable_bytes(); 115 sum_of_reclaimable_bytes += curr->reclaimable_bytes();
214 prev = curr; 116 prev = curr;
215 } 117 }
216 guarantee(sum_of_reclaimable_bytes == _remainingReclaimableBytes, 118 guarantee(sum_of_reclaimable_bytes == _remaining_reclaimable_bytes,
217 err_msg("reclaimable bytes inconsistent, " 119 err_msg("reclaimable bytes inconsistent, "
218 "remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT, 120 "remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT,
219 _remainingReclaimableBytes, sum_of_reclaimable_bytes)); 121 _remaining_reclaimable_bytes, sum_of_reclaimable_bytes));
220 return true; 122 }
221 } 123 #endif // !PRODUCT
222 #endif 124
223 125 void CollectionSetChooser::sort_regions() {
224 void CollectionSetChooser::fillCache() {
225 guarantee(false, "fillCache: don't call this any more");
226
227 while (!_cache.is_full() && (_curr_index < _length)) {
228 HeapRegion* hr = _markedRegions.at(_curr_index);
229 assert(hr != NULL,
230 err_msg("Unexpected NULL hr in _markedRegions at index %d",
231 _curr_index));
232 _curr_index += 1;
233 assert(!hr->is_young(), "should not be young!");
234 assert(hr->sort_index() == _curr_index-1, "sort_index invariant");
235 _markedRegions.at_put(hr->sort_index(), NULL);
236 _cache.insert(hr);
237 assert(!_cache.is_empty(), "cache should not be empty");
238 }
239 assert(verify(), "cache should be consistent");
240 }
241
242 void CollectionSetChooser::sortMarkedHeapRegions() {
243 // First trim any unused portion of the top in the parallel case. 126 // First trim any unused portion of the top in the parallel case.
244 if (_first_par_unreserved_idx > 0) { 127 if (_first_par_unreserved_idx > 0) {
245 if (G1PrintParCleanupStats) { 128 assert(_first_par_unreserved_idx <= regions_length(),
246 gclog_or_tty->print(" Truncating _markedRegions from %d to %d.\n",
247 _markedRegions.length(), _first_par_unreserved_idx);
248 }
249 assert(_first_par_unreserved_idx <= _markedRegions.length(),
250 "Or we didn't reserved enough length"); 129 "Or we didn't reserved enough length");
251 _markedRegions.trunc_to(_first_par_unreserved_idx); 130 regions_trunc_to(_first_par_unreserved_idx);
252 } 131 }
253 _markedRegions.sort(orderRegions); 132 _regions.sort(order_regions);
254 assert(_length <= _markedRegions.length(), "Requirement"); 133 assert(_length <= regions_length(), "Requirement");
255 assert(_length == 0 || _markedRegions.at(_length - 1) != NULL, 134 #ifdef ASSERT
256 "Testing _length"); 135 for (uint i = 0; i < _length; i++) {
257 assert(_length == _markedRegions.length() || 136 assert(regions_at(i) != NULL, "Should be true by sorting!");
258 _markedRegions.at(_length) == NULL, "Testing _length"); 137 }
259 if (G1PrintParCleanupStats) { 138 #endif // ASSERT
260 gclog_or_tty->print_cr(" Sorted %d marked regions.", _length);
261 }
262 for (int i = 0; i < _length; i++) {
263 assert(_markedRegions.at(i) != NULL, "Should be true by sorting!");
264 _markedRegions.at(i)->set_sort_index(i);
265 }
266 if (G1PrintRegionLivenessInfo) { 139 if (G1PrintRegionLivenessInfo) {
267 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting"); 140 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
268 for (int i = 0; i < _length; ++i) { 141 for (uint i = 0; i < _length; ++i) {
269 HeapRegion* r = _markedRegions.at(i); 142 HeapRegion* r = regions_at(i);
270 cl.doHeapRegion(r); 143 cl.doHeapRegion(r);
271 } 144 }
272 } 145 }
273 assert(verify(), "CSet chooser verification"); 146 verify();
274 } 147 }
275 148
276 uint CollectionSetChooser::calcMinOldCSetLength() { 149 uint CollectionSetChooser::calc_min_old_cset_length() {
277 // The min old CSet region bound is based on the maximum desired 150 // The min old CSet region bound is based on the maximum desired
278 // number of mixed GCs after a cycle. I.e., even if some old regions 151 // number of mixed GCs after a cycle. I.e., even if some old regions
279 // look expensive, we should add them to the CSet anyway to make 152 // look expensive, we should add them to the CSet anyway to make
280 // sure we go through the available old regions in no more than the 153 // sure we go through the available old regions in no more than the
281 // maximum desired number of mixed GCs. 154 // maximum desired number of mixed GCs.
292 result += 1; 165 result += 1;
293 } 166 }
294 return (uint) result; 167 return (uint) result;
295 } 168 }
296 169
297 uint CollectionSetChooser::calcMaxOldCSetLength() { 170 uint CollectionSetChooser::calc_max_old_cset_length() {
298 // The max old CSet region bound is based on the threshold expressed 171 // The max old CSet region bound is based on the threshold expressed
299 // as a percentage of the heap size. I.e., it should bound the 172 // as a percentage of the heap size. I.e., it should bound the
300 // number of old regions added to the CSet irrespective of how many 173 // number of old regions added to the CSet irrespective of how many
301 // of them are available. 174 // of them are available.
302 175
309 result += 1; 182 result += 1;
310 } 183 }
311 return (uint) result; 184 return (uint) result;
312 } 185 }
313 186
314 void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) { 187 void CollectionSetChooser::add_region(HeapRegion* hr) {
315 assert(!hr->isHumongous(), 188 assert(!hr->isHumongous(),
316 "Humongous regions shouldn't be added to the collection set"); 189 "Humongous regions shouldn't be added to the collection set");
317 assert(!hr->is_young(), "should not be young!"); 190 assert(!hr->is_young(), "should not be young!");
318 _markedRegions.append(hr); 191 _regions.append(hr);
319 _length++; 192 _length++;
320 _remainingReclaimableBytes += hr->reclaimable_bytes(); 193 _remaining_reclaimable_bytes += hr->reclaimable_bytes();
321 hr->calc_gc_efficiency(); 194 hr->calc_gc_efficiency();
322 } 195 }
323 196
324 void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(uint n_regions, 197 void CollectionSetChooser::prepare_for_par_region_addition(uint n_regions,
325 uint chunkSize) { 198 uint chunk_size) {
326 _first_par_unreserved_idx = 0; 199 _first_par_unreserved_idx = 0;
327 uint n_threads = (uint) ParallelGCThreads; 200 uint n_threads = (uint) ParallelGCThreads;
328 if (UseDynamicNumberOfGCThreads) { 201 if (UseDynamicNumberOfGCThreads) {
329 assert(G1CollectedHeap::heap()->workers()->active_workers() > 0, 202 assert(G1CollectedHeap::heap()->workers()->active_workers() > 0,
330 "Should have been set earlier"); 203 "Should have been set earlier");
333 // or some improperly initialized variable with leads to no 206 // or some improperly initialized variable with leads to no
334 // active threads, protect against that in a product build. 207 // active threads, protect against that in a product build.
335 n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(), 208 n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
336 1U); 209 1U);
337 } 210 }
338 uint max_waste = n_threads * chunkSize; 211 uint max_waste = n_threads * chunk_size;
339 // it should be aligned with respect to chunkSize 212 // it should be aligned with respect to chunk_size
340 uint aligned_n_regions = (n_regions + chunkSize - 1) / chunkSize * chunkSize; 213 uint aligned_n_regions = (n_regions + chunk_size - 1) / chunk_size * chunk_size;
341 assert(aligned_n_regions % chunkSize == 0, "should be aligned"); 214 assert(aligned_n_regions % chunk_size == 0, "should be aligned");
342 _markedRegions.at_put_grow((int) (aligned_n_regions + max_waste - 1), NULL); 215 regions_at_put_grow(aligned_n_regions + max_waste - 1, NULL);
343 } 216 }
344 217
345 jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) { 218 uint CollectionSetChooser::claim_array_chunk(uint chunk_size) {
346 // Don't do this assert because this can be called at a point 219 uint res = (uint) Atomic::add((jint) chunk_size,
347 // where the loop up stream will not execute again but might 220 (volatile jint*) &_first_par_unreserved_idx);
348 // try to claim more chunks (loop test has not been done yet). 221 assert(regions_length() > res + chunk_size - 1,
349 // assert(_markedRegions.length() > _first_par_unreserved_idx,
350 // "Striding beyond the marked regions");
351 jint res = Atomic::add(n_regions, &_first_par_unreserved_idx);
352 assert(_markedRegions.length() > res + n_regions - 1,
353 "Should already have been expanded"); 222 "Should already have been expanded");
354 return res - n_regions; 223 return res - chunk_size;
355 } 224 }
356 225
357 void CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) { 226 void CollectionSetChooser::set_region(uint index, HeapRegion* hr) {
358 assert(_markedRegions.at(index) == NULL, "precondition"); 227 assert(regions_at(index) == NULL, "precondition");
359 assert(!hr->is_young(), "should not be young!"); 228 assert(!hr->is_young(), "should not be young!");
360 _markedRegions.at_put(index, hr); 229 regions_at_put(index, hr);
361 hr->calc_gc_efficiency(); 230 hr->calc_gc_efficiency();
362 } 231 }
363 232
364 void CollectionSetChooser::updateTotals(jint region_num, 233 void CollectionSetChooser::update_totals(uint region_num,
365 size_t reclaimable_bytes) { 234 size_t reclaimable_bytes) {
366 // Only take the lock if we actually need to update the totals. 235 // Only take the lock if we actually need to update the totals.
367 if (region_num > 0) { 236 if (region_num > 0) {
368 assert(reclaimable_bytes > 0, "invariant"); 237 assert(reclaimable_bytes > 0, "invariant");
369 // We could have just used atomics instead of taking the 238 // We could have just used atomics instead of taking the
370 // lock. However, we currently don't have an atomic add for size_t. 239 // lock. However, we currently don't have an atomic add for size_t.
371 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 240 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
372 _length += (int) region_num; 241 _length += region_num;
373 _remainingReclaimableBytes += reclaimable_bytes; 242 _remaining_reclaimable_bytes += reclaimable_bytes;
374 } else { 243 } else {
375 assert(reclaimable_bytes == 0, "invariant"); 244 assert(reclaimable_bytes == 0, "invariant");
376 } 245 }
377 } 246 }
378 247
379 void CollectionSetChooser::clearMarkedHeapRegions() { 248 void CollectionSetChooser::clear() {
380 for (int i = 0; i < _markedRegions.length(); i++) { 249 _regions.clear();
381 HeapRegion* r = _markedRegions.at(i);
382 if (r != NULL) {
383 r->set_sort_index(-1);
384 }
385 }
386 _markedRegions.clear();
387 _curr_index = 0; 250 _curr_index = 0;
388 _length = 0; 251 _length = 0;
389 _remainingReclaimableBytes = 0; 252 _remaining_reclaimable_bytes = 0;
390 }; 253 };