comparison src/share/vm/gc_implementation/g1/collectionSetChooser.cpp @ 6010:720b6a76dd9d

7157073: G1: type change size_t -> uint for region counts / indexes Summary: Change the type of fields / variables / etc. that represent region counts and indeces from size_t to uint. Reviewed-by: iveresov, brutisso, jmasa, jwilhelm
author tonyp
date Wed, 18 Apr 2012 07:21:15 -0400
parents 21595f05bc93
children f7a8920427a6
comparison
equal deleted inserted replaced
6009:dde53abda3d6 6010:720b6a76dd9d
271 } 271 }
272 } 272 }
273 assert(verify(), "CSet chooser verification"); 273 assert(verify(), "CSet chooser verification");
274 } 274 }
275 275
276 size_t CollectionSetChooser::calcMinOldCSetLength() { 276 uint CollectionSetChooser::calcMinOldCSetLength() {
277 // The min old CSet region bound is based on the maximum desired 277 // The min old CSet region bound is based on the maximum desired
278 // number of mixed GCs after a cycle. I.e., even if some old regions 278 // number of mixed GCs after a cycle. I.e., even if some old regions
279 // look expensive, we should add them to the CSet anyway to make 279 // look expensive, we should add them to the CSet anyway to make
280 // sure we go through the available old regions in no more than the 280 // sure we go through the available old regions in no more than the
281 // maximum desired number of mixed GCs. 281 // maximum desired number of mixed GCs.
289 size_t result = region_num / gc_num; 289 size_t result = region_num / gc_num;
290 // emulate ceiling 290 // emulate ceiling
291 if (result * gc_num < region_num) { 291 if (result * gc_num < region_num) {
292 result += 1; 292 result += 1;
293 } 293 }
294 return result; 294 return (uint) result;
295 } 295 }
296 296
297 size_t CollectionSetChooser::calcMaxOldCSetLength() { 297 uint CollectionSetChooser::calcMaxOldCSetLength() {
298 // The max old CSet region bound is based on the threshold expressed 298 // The max old CSet region bound is based on the threshold expressed
299 // as a percentage of the heap size. I.e., it should bound the 299 // as a percentage of the heap size. I.e., it should bound the
300 // number of old regions added to the CSet irrespective of how many 300 // number of old regions added to the CSet irrespective of how many
301 // of them are available. 301 // of them are available.
302 302
306 size_t result = region_num * perc / 100; 306 size_t result = region_num * perc / 100;
307 // emulate ceiling 307 // emulate ceiling
308 if (100 * result < region_num * perc) { 308 if (100 * result < region_num * perc) {
309 result += 1; 309 result += 1;
310 } 310 }
311 return result; 311 return (uint) result;
312 } 312 }
313 313
314 void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) { 314 void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
315 assert(!hr->isHumongous(), 315 assert(!hr->isHumongous(),
316 "Humongous regions shouldn't be added to the collection set"); 316 "Humongous regions shouldn't be added to the collection set");
319 _length++; 319 _length++;
320 _remainingReclaimableBytes += hr->reclaimable_bytes(); 320 _remainingReclaimableBytes += hr->reclaimable_bytes();
321 hr->calc_gc_efficiency(); 321 hr->calc_gc_efficiency();
322 } 322 }
323 323
324 void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions, 324 void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(uint n_regions,
325 size_t chunkSize) { 325 uint chunkSize) {
326 _first_par_unreserved_idx = 0; 326 _first_par_unreserved_idx = 0;
327 int n_threads = ParallelGCThreads; 327 uint n_threads = (uint) ParallelGCThreads;
328 if (UseDynamicNumberOfGCThreads) { 328 if (UseDynamicNumberOfGCThreads) {
329 assert(G1CollectedHeap::heap()->workers()->active_workers() > 0, 329 assert(G1CollectedHeap::heap()->workers()->active_workers() > 0,
330 "Should have been set earlier"); 330 "Should have been set earlier");
331 // This is defensive code. As the assertion above says, the number 331 // This is defensive code. As the assertion above says, the number
332 // of active threads should be > 0, but in case there is some path 332 // of active threads should be > 0, but in case there is some path
333 // or some improperly initialized variable with leads to no 333 // or some improperly initialized variable with leads to no
334 // active threads, protect against that in a product build. 334 // active threads, protect against that in a product build.
335 n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(), 335 n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
336 1U); 336 1U);
337 } 337 }
338 size_t max_waste = n_threads * chunkSize; 338 uint max_waste = n_threads * chunkSize;
339 // it should be aligned with respect to chunkSize 339 // it should be aligned with respect to chunkSize
340 size_t aligned_n_regions = 340 uint aligned_n_regions = (n_regions + chunkSize - 1) / chunkSize * chunkSize;
341 (n_regions + (chunkSize - 1)) / chunkSize * chunkSize; 341 assert(aligned_n_regions % chunkSize == 0, "should be aligned");
342 assert( aligned_n_regions % chunkSize == 0, "should be aligned" ); 342 _markedRegions.at_put_grow((int) (aligned_n_regions + max_waste - 1), NULL);
343 _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL);
344 } 343 }
345 344
346 jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) { 345 jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
347 // Don't do this assert because this can be called at a point 346 // Don't do this assert because this can be called at a point
348 // where the loop up stream will not execute again but might 347 // where the loop up stream will not execute again but might