Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 20336:6701abbc4441
8054818: Refactor HeapRegionSeq to manage heap region and auxiliary data
Summary: Let HeapRegionSeq manage the heap region and auxiliary data to decrease the amount of responsibilities of G1CollectedHeap, and encapsulate this work from other code.
Reviewed-by: jwilhelm, jmasa, mgerdin, brutisso
author | tschatzl |
---|---|
date | Tue, 19 Aug 2014 10:50:27 +0200 |
parents | eec72fa4b108 |
children | 1f1d373cd044 |
comparison
equal
deleted
inserted
replaced
20335:eec72fa4b108 | 20336:6701abbc4441 |
---|---|
242 // The one and only G1CollectedHeap, so static functions can find it. | 242 // The one and only G1CollectedHeap, so static functions can find it. |
243 static G1CollectedHeap* _g1h; | 243 static G1CollectedHeap* _g1h; |
244 | 244 |
245 static size_t _humongous_object_threshold_in_words; | 245 static size_t _humongous_object_threshold_in_words; |
246 | 246 |
247 // Storage for the G1 heap. | |
248 VirtualSpace _g1_storage; | |
249 MemRegion _g1_reserved; | |
250 | |
251 // The part of _g1_storage that is currently committed. | |
252 MemRegion _g1_committed; | |
253 | |
254 // The master free list. It will satisfy all new region allocations. | |
255 FreeRegionList _free_list; | |
256 | |
257 // The secondary free list which contains regions that have been | 247 // The secondary free list which contains regions that have been |
258 // freed up during the cleanup process. This will be appended to the | 248 // freed up during the cleanup process. This will be appended to |
259 // master free list when appropriate. | 249 // the master free list when appropriate. |
260 FreeRegionList _secondary_free_list; | 250 FreeRegionList _secondary_free_list; |
261 | 251 |
262 // It keeps track of the old regions. | 252 // It keeps track of the old regions. |
263 HeapRegionSet _old_set; | 253 HeapRegionSet _old_set; |
264 | 254 |
517 // an allocation of the given word_size. If do_expand is true, | 507 // an allocation of the given word_size. If do_expand is true, |
518 // attempt to expand the heap if necessary to satisfy the allocation | 508 // attempt to expand the heap if necessary to satisfy the allocation |
519 // request. If the region is to be used as an old region or for a | 509 // request. If the region is to be used as an old region or for a |
520 // humongous object, set is_old to true. If not, to false. | 510 // humongous object, set is_old to true. If not, to false. |
521 HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand); | 511 HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand); |
522 | |
523 // Attempt to satisfy a humongous allocation request of the given | |
524 // size by finding a contiguous set of free regions of num_regions | |
525 // length and remove them from the master free list. Return the | |
526 // index of the first region or G1_NULL_HRS_INDEX if the search | |
527 // was unsuccessful. | |
528 uint humongous_obj_allocate_find_first(uint num_regions, | |
529 size_t word_size); | |
530 | 512 |
531 // Initialize a contiguous set of free regions of length num_regions | 513 // Initialize a contiguous set of free regions of length num_regions |
532 // and starting at index first so that they appear as a single | 514 // and starting at index first so that they appear as a single |
533 // humongous region. | 515 // humongous region. |
534 HeapWord* humongous_obj_allocate_initialize_regions(uint first, | 516 HeapWord* humongous_obj_allocate_initialize_regions(uint first, |
1191 // should be treated as an approximation, not a guarantee, for use in | 1173 // should be treated as an approximation, not a guarantee, for use in |
1192 // heuristic resizing decisions. | 1174 // heuristic resizing decisions. |
1193 virtual size_t unsafe_max_alloc(); | 1175 virtual size_t unsafe_max_alloc(); |
1194 | 1176 |
1195 virtual bool is_maximal_no_gc() const { | 1177 virtual bool is_maximal_no_gc() const { |
1196 return _g1_storage.uncommitted_size() == 0; | 1178 return _hrs.available() == 0; |
1197 } | 1179 } |
1198 | 1180 |
1199 // The total number of regions in the heap. | 1181 // The current number of regions in the heap. |
1200 uint n_regions() const { return _hrs.length(); } | 1182 uint num_regions() const { return _hrs.length(); } |
1201 | 1183 |
1202 // The max number of regions in the heap. | 1184 // The max number of regions in the heap. |
1203 uint max_regions() const { return _hrs.max_length(); } | 1185 uint max_regions() const { return _hrs.max_length(); } |
1204 | 1186 |
1205 // The number of regions that are completely free. | 1187 // The number of regions that are completely free. |
1206 uint free_regions() const { return _free_list.length(); } | 1188 uint num_free_regions() const { return _hrs.num_free_regions(); } |
1207 | 1189 |
1208 // The number of regions that are not completely free. | 1190 // The number of regions that are not completely free. |
1209 uint used_regions() const { return n_regions() - free_regions(); } | 1191 uint num_used_regions() const { return num_regions() - num_free_regions(); } |
1210 | |
1211 // The number of regions available for "regular" expansion. | |
1212 uint expansion_regions() const { return _expansion_regions; } | |
1213 | |
1214 // Factory method for HeapRegion instances. It will return NULL if | |
1215 // the allocation fails. | |
1216 HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom); | |
1217 | 1192 |
1218 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; | 1193 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; |
1219 void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; | 1194 void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; |
1220 void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; | 1195 void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; |
1221 void verify_dirty_young_regions() PRODUCT_RETURN; | 1196 void verify_dirty_young_regions() PRODUCT_RETURN; |
1260 void verify_region_sets_optional() { } | 1235 void verify_region_sets_optional() { } |
1261 #endif // HEAP_REGION_SET_FORCE_VERIFY | 1236 #endif // HEAP_REGION_SET_FORCE_VERIFY |
1262 | 1237 |
1263 #ifdef ASSERT | 1238 #ifdef ASSERT |
1264 bool is_on_master_free_list(HeapRegion* hr) { | 1239 bool is_on_master_free_list(HeapRegion* hr) { |
1265 return hr->containing_set() == &_free_list; | 1240 return _hrs.is_free(hr); |
1266 } | 1241 } |
1267 #endif // ASSERT | 1242 #endif // ASSERT |
1268 | 1243 |
1269 // Wrapper for the region list operations that can be called from | 1244 // Wrapper for the region list operations that can be called from |
1270 // methods outside this class. | 1245 // methods outside this class. |
1272 void secondary_free_list_add(FreeRegionList* list) { | 1247 void secondary_free_list_add(FreeRegionList* list) { |
1273 _secondary_free_list.add_ordered(list); | 1248 _secondary_free_list.add_ordered(list); |
1274 } | 1249 } |
1275 | 1250 |
1276 void append_secondary_free_list() { | 1251 void append_secondary_free_list() { |
1277 _free_list.add_ordered(&_secondary_free_list); | 1252 _hrs.insert_list_into_free_list(&_secondary_free_list); |
1278 } | 1253 } |
1279 | 1254 |
1280 void append_secondary_free_list_if_not_empty_with_lock() { | 1255 void append_secondary_free_list_if_not_empty_with_lock() { |
1281 // If the secondary free list looks empty there's no reason to | 1256 // If the secondary free list looks empty there's no reason to |
1282 // take the lock and then try to append it. | 1257 // take the lock and then try to append it. |
1378 inline in_cset_state_t in_cset_state(const oop obj); | 1353 inline in_cset_state_t in_cset_state(const oop obj); |
1379 | 1354 |
1380 // Return "TRUE" iff the given object address is in the reserved | 1355 // Return "TRUE" iff the given object address is in the reserved |
1381 // region of g1. | 1356 // region of g1. |
1382 bool is_in_g1_reserved(const void* p) const { | 1357 bool is_in_g1_reserved(const void* p) const { |
1383 return _g1_reserved.contains(p); | 1358 return _hrs.reserved().contains(p); |
1384 } | 1359 } |
1385 | 1360 |
1386 // Returns a MemRegion that corresponds to the space that has been | 1361 // Returns a MemRegion that corresponds to the space that has been |
1387 // reserved for the heap | 1362 // reserved for the heap |
1388 MemRegion g1_reserved() { | 1363 MemRegion g1_reserved() const { |
1389 return _g1_reserved; | 1364 return _hrs.reserved(); |
1390 } | 1365 } |
1391 | 1366 |
1392 // Returns a MemRegion that corresponds to the space that has been | 1367 // Returns a MemRegion that corresponds to the space that has been |
1393 // committed in the heap | 1368 // committed in the heap |
1394 MemRegion g1_committed() { | 1369 MemRegion g1_committed() { |
1395 return _g1_committed; | 1370 return _hrs.committed(); |
1396 } | 1371 } |
1397 | 1372 |
1398 virtual bool is_in_closed_subset(const void* p) const; | 1373 virtual bool is_in_closed_subset(const void* p) const; |
1399 | 1374 |
1400 G1SATBCardTableModRefBS* g1_barrier_set() { | 1375 G1SATBCardTableModRefBS* g1_barrier_set() { |
1429 inline HeapRegion* region_at(uint index) const; | 1404 inline HeapRegion* region_at(uint index) const; |
1430 | 1405 |
1431 // Calculate the region index of the given address. Given address must be | 1406 // Calculate the region index of the given address. Given address must be |
1432 // within the heap. | 1407 // within the heap. |
1433 inline uint addr_to_region(HeapWord* addr) const; | 1408 inline uint addr_to_region(HeapWord* addr) const; |
1409 | |
1410 inline HeapWord* bottom_addr_for_region(uint index) const; | |
1434 | 1411 |
1435 // Divide the heap region sequence into "chunks" of some size (the number | 1412 // Divide the heap region sequence into "chunks" of some size (the number |
1436 // of regions divided by the number of parallel threads times some | 1413 // of regions divided by the number of parallel threads times some |
1437 // overpartition factor, currently 4). Assumes that this will be called | 1414 // overpartition factor, currently 4). Assumes that this will be called |
1438 // in parallel by ParallelGCThreads worker threads with discinct worker | 1415 // in parallel by ParallelGCThreads worker threads with discinct worker |
1443 // attempting to claim the first region in each chunk, and, if | 1420 // attempting to claim the first region in each chunk, and, if |
1444 // successful, applying the closure to each region in the chunk (and | 1421 // successful, applying the closure to each region in the chunk (and |
1445 // setting the claim value of the second and subsequent regions of the | 1422 // setting the claim value of the second and subsequent regions of the |
1446 // chunk.) For now requires that "doHeapRegion" always returns "false", | 1423 // chunk.) For now requires that "doHeapRegion" always returns "false", |
1447 // i.e., that a closure never attempt to abort a traversal. | 1424 // i.e., that a closure never attempt to abort a traversal. |
1448 void heap_region_par_iterate_chunked(HeapRegionClosure* blk, | 1425 void heap_region_par_iterate_chunked(HeapRegionClosure* cl, |
1449 uint worker, | 1426 uint worker_id, |
1450 uint no_of_par_workers, | 1427 uint num_workers, |
1451 jint claim_value); | 1428 jint claim_value) const; |
1452 | 1429 |
1453 // It resets all the region claim values to the default. | 1430 // It resets all the region claim values to the default. |
1454 void reset_heap_region_claim_values(); | 1431 void reset_heap_region_claim_values(); |
1455 | 1432 |
1456 // Resets the claim values of regions in the current | 1433 // Resets the claim values of regions in the current |
1470 void clear_cset_start_regions(); | 1447 void clear_cset_start_regions(); |
1471 | 1448 |
1472 // Given the id of a worker, obtain or calculate a suitable | 1449 // Given the id of a worker, obtain or calculate a suitable |
1473 // starting region for iterating over the current collection set. | 1450 // starting region for iterating over the current collection set. |
1474 HeapRegion* start_cset_region_for_worker(uint worker_i); | 1451 HeapRegion* start_cset_region_for_worker(uint worker_i); |
1475 | |
1476 // This is a convenience method that is used by the | |
1477 // HeapRegionIterator classes to calculate the starting region for | |
1478 // each worker so that they do not all start from the same region. | |
1479 HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers); | |
1480 | 1452 |
1481 // Iterate over the regions (if any) in the current collection set. | 1453 // Iterate over the regions (if any) in the current collection set. |
1482 void collection_set_iterate(HeapRegionClosure* blk); | 1454 void collection_set_iterate(HeapRegionClosure* blk); |
1483 | 1455 |
1484 // As above but starting from region r | 1456 // As above but starting from region r |