Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 20804:7848fc12602b
Merge with jdk8u40-b25
author | Gilles Duboscq <gilles.m.duboscq@oracle.com> |
---|---|
date | Tue, 07 Apr 2015 14:58:49 +0200 |
parents | 52b4284cb496 ee10217e3d03 |
children | d3cec14f33f3 |
comparison
equal
deleted
inserted
replaced
20184:84105dcdb05b | 20804:7848fc12602b |
---|---|
23 */ | 23 */ |
24 | 24 |
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP | 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP |
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP | 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP |
27 | 27 |
28 #include "gc_implementation/g1/g1AllocationContext.hpp" | |
29 #include "gc_implementation/g1/g1Allocator.hpp" | |
28 #include "gc_implementation/g1/concurrentMark.hpp" | 30 #include "gc_implementation/g1/concurrentMark.hpp" |
29 #include "gc_implementation/g1/evacuationInfo.hpp" | 31 #include "gc_implementation/g1/evacuationInfo.hpp" |
30 #include "gc_implementation/g1/g1AllocRegion.hpp" | 32 #include "gc_implementation/g1/g1AllocRegion.hpp" |
33 #include "gc_implementation/g1/g1BiasedArray.hpp" | |
31 #include "gc_implementation/g1/g1HRPrinter.hpp" | 34 #include "gc_implementation/g1/g1HRPrinter.hpp" |
32 #include "gc_implementation/g1/g1MonitoringSupport.hpp" | 35 #include "gc_implementation/g1/g1MonitoringSupport.hpp" |
33 #include "gc_implementation/g1/g1RemSet.hpp" | |
34 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" | 36 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" |
35 #include "gc_implementation/g1/g1YCTypes.hpp" | 37 #include "gc_implementation/g1/g1YCTypes.hpp" |
36 #include "gc_implementation/g1/heapRegionSeq.hpp" | 38 #include "gc_implementation/g1/heapRegionManager.hpp" |
37 #include "gc_implementation/g1/heapRegionSet.hpp" | 39 #include "gc_implementation/g1/heapRegionSet.hpp" |
38 #include "gc_implementation/shared/hSpaceCounters.hpp" | 40 #include "gc_implementation/shared/hSpaceCounters.hpp" |
39 #include "gc_implementation/shared/parGCAllocBuffer.hpp" | 41 #include "gc_implementation/shared/parGCAllocBuffer.hpp" |
40 #include "memory/barrierSet.hpp" | 42 #include "memory/barrierSet.hpp" |
41 #include "memory/memRegion.hpp" | 43 #include "memory/memRegion.hpp" |
78 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet; | 80 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet; |
79 | 81 |
80 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) | 82 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) |
81 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) | 83 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) |
82 | 84 |
83 enum GCAllocPurpose { | |
84 GCAllocForTenured, | |
85 GCAllocForSurvived, | |
86 GCAllocPurposeCount | |
87 }; | |
88 | |
89 class YoungList : public CHeapObj<mtGC> { | 85 class YoungList : public CHeapObj<mtGC> { |
90 private: | 86 private: |
91 G1CollectedHeap* _g1h; | 87 G1CollectedHeap* _g1h; |
92 | 88 |
93 HeapRegion* _head; | 89 HeapRegion* _head; |
154 | 150 |
155 // debugging | 151 // debugging |
156 bool check_list_well_formed(); | 152 bool check_list_well_formed(); |
157 bool check_list_empty(bool check_sample = true); | 153 bool check_list_empty(bool check_sample = true); |
158 void print(); | 154 void print(); |
159 }; | |
160 | |
161 class MutatorAllocRegion : public G1AllocRegion { | |
162 protected: | |
163 virtual HeapRegion* allocate_new_region(size_t word_size, bool force); | |
164 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); | |
165 public: | |
166 MutatorAllocRegion() | |
167 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { } | |
168 }; | |
169 | |
170 class SurvivorGCAllocRegion : public G1AllocRegion { | |
171 protected: | |
172 virtual HeapRegion* allocate_new_region(size_t word_size, bool force); | |
173 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); | |
174 public: | |
175 SurvivorGCAllocRegion() | |
176 : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { } | |
177 }; | |
178 | |
179 class OldGCAllocRegion : public G1AllocRegion { | |
180 protected: | |
181 virtual HeapRegion* allocate_new_region(size_t word_size, bool force); | |
182 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); | |
183 public: | |
184 OldGCAllocRegion() | |
185 : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { } | |
186 }; | 155 }; |
187 | 156 |
188 // The G1 STW is alive closure. | 157 // The G1 STW is alive closure. |
189 // An instance is embedded into the G1CH and used as the | 158 // An instance is embedded into the G1CH and used as the |
190 // (optional) _is_alive_non_header closure in the STW | 159 // (optional) _is_alive_non_header closure in the STW |
197 bool do_object_b(oop p); | 166 bool do_object_b(oop p); |
198 }; | 167 }; |
199 | 168 |
200 class RefineCardTableEntryClosure; | 169 class RefineCardTableEntryClosure; |
201 | 170 |
171 class G1RegionMappingChangedListener : public G1MappingChangedListener { | |
172 private: | |
173 void reset_from_card_cache(uint start_idx, size_t num_regions); | |
174 public: | |
175 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled); | |
176 }; | |
177 | |
202 class G1CollectedHeap : public SharedHeap { | 178 class G1CollectedHeap : public SharedHeap { |
179 friend class VM_CollectForMetadataAllocation; | |
203 friend class VM_G1CollectForAllocation; | 180 friend class VM_G1CollectForAllocation; |
204 friend class VM_G1CollectFull; | 181 friend class VM_G1CollectFull; |
205 friend class VM_G1IncCollectionPause; | 182 friend class VM_G1IncCollectionPause; |
206 friend class VMStructs; | 183 friend class VMStructs; |
207 friend class MutatorAllocRegion; | 184 friend class MutatorAllocRegion; |
208 friend class SurvivorGCAllocRegion; | 185 friend class SurvivorGCAllocRegion; |
209 friend class OldGCAllocRegion; | 186 friend class OldGCAllocRegion; |
187 friend class G1Allocator; | |
188 friend class G1DefaultAllocator; | |
189 friend class G1ResManAllocator; | |
210 | 190 |
211 // Closures used in implementation. | 191 // Closures used in implementation. |
212 template <G1Barrier barrier, bool do_mark_object> | 192 template <G1Barrier barrier, G1Mark do_mark_object> |
213 friend class G1ParCopyClosure; | 193 friend class G1ParCopyClosure; |
214 friend class G1IsAliveClosure; | 194 friend class G1IsAliveClosure; |
215 friend class G1EvacuateFollowersClosure; | 195 friend class G1EvacuateFollowersClosure; |
216 friend class G1ParScanThreadState; | 196 friend class G1ParScanThreadState; |
217 friend class G1ParScanClosureSuper; | 197 friend class G1ParScanClosureSuper; |
218 friend class G1ParEvacuateFollowersClosure; | 198 friend class G1ParEvacuateFollowersClosure; |
219 friend class G1ParTask; | 199 friend class G1ParTask; |
200 friend class G1ParGCAllocator; | |
201 friend class G1DefaultParGCAllocator; | |
220 friend class G1FreeGarbageRegionClosure; | 202 friend class G1FreeGarbageRegionClosure; |
221 friend class RefineCardTableEntryClosure; | 203 friend class RefineCardTableEntryClosure; |
222 friend class G1PrepareCompactClosure; | 204 friend class G1PrepareCompactClosure; |
223 friend class RegionSorter; | 205 friend class RegionSorter; |
224 friend class RegionResetter; | 206 friend class RegionResetter; |
225 friend class CountRCClosure; | 207 friend class CountRCClosure; |
226 friend class EvacPopObjClosure; | 208 friend class EvacPopObjClosure; |
227 friend class G1ParCleanupCTTask; | 209 friend class G1ParCleanupCTTask; |
228 | 210 |
211 friend class G1FreeHumongousRegionClosure; | |
229 // Other related classes. | 212 // Other related classes. |
230 friend class G1MarkSweep; | 213 friend class G1MarkSweep; |
231 | 214 |
232 private: | 215 private: |
233 // The one and only G1CollectedHeap, so static functions can find it. | 216 // The one and only G1CollectedHeap, so static functions can find it. |
234 static G1CollectedHeap* _g1h; | 217 static G1CollectedHeap* _g1h; |
235 | 218 |
236 static size_t _humongous_object_threshold_in_words; | 219 static size_t _humongous_object_threshold_in_words; |
237 | 220 |
238 // Storage for the G1 heap. | |
239 VirtualSpace _g1_storage; | |
240 MemRegion _g1_reserved; | |
241 | |
242 // The part of _g1_storage that is currently committed. | |
243 MemRegion _g1_committed; | |
244 | |
245 // The master free list. It will satisfy all new region allocations. | |
246 FreeRegionList _free_list; | |
247 | |
248 // The secondary free list which contains regions that have been | 221 // The secondary free list which contains regions that have been |
249 // freed up during the cleanup process. This will be appended to the | 222 // freed up during the cleanup process. This will be appended to |
250 // master free list when appropriate. | 223 // the master free list when appropriate. |
251 FreeRegionList _secondary_free_list; | 224 FreeRegionList _secondary_free_list; |
252 | 225 |
253 // It keeps track of the old regions. | 226 // It keeps track of the old regions. |
254 HeapRegionSet _old_set; | 227 HeapRegionSet _old_set; |
255 | 228 |
256 // It keeps track of the humongous regions. | 229 // It keeps track of the humongous regions. |
257 HeapRegionSet _humongous_set; | 230 HeapRegionSet _humongous_set; |
231 | |
232 void clear_humongous_is_live_table(); | |
233 void eagerly_reclaim_humongous_regions(); | |
258 | 234 |
259 // The number of regions we could create by expansion. | 235 // The number of regions we could create by expansion. |
260 uint _expansion_regions; | 236 uint _expansion_regions; |
261 | 237 |
262 // The block offset table for the G1 heap. | 238 // The block offset table for the G1 heap. |
276 // free_list_only is true, it will only rebuild the master free | 252 // free_list_only is true, it will only rebuild the master free |
277 // list. It is called after a Full GC (free_list_only == false) or | 253 // list. It is called after a Full GC (free_list_only == false) or |
278 // after heap shrinking (free_list_only == true). | 254 // after heap shrinking (free_list_only == true). |
279 void rebuild_region_sets(bool free_list_only); | 255 void rebuild_region_sets(bool free_list_only); |
280 | 256 |
257 // Callback for region mapping changed events. | |
258 G1RegionMappingChangedListener _listener; | |
259 | |
281 // The sequence of all heap regions in the heap. | 260 // The sequence of all heap regions in the heap. |
282 HeapRegionSeq _hrs; | 261 HeapRegionManager _hrm; |
283 | 262 |
284 // Alloc region used to satisfy mutator allocation requests. | 263 // Class that handles the different kinds of allocations. |
285 MutatorAllocRegion _mutator_alloc_region; | 264 G1Allocator* _allocator; |
286 | 265 |
287 // Alloc region used to satisfy allocation requests by the GC for | 266 // Statistics for each allocation context |
288 // survivor objects. | 267 AllocationContextStats _allocation_context_stats; |
289 SurvivorGCAllocRegion _survivor_gc_alloc_region; | |
290 | 268 |
291 // PLAB sizing policy for survivors. | 269 // PLAB sizing policy for survivors. |
292 PLABStats _survivor_plab_stats; | 270 PLABStats _survivor_plab_stats; |
293 | 271 |
294 // Alloc region used to satisfy allocation requests by the GC for | |
295 // old objects. | |
296 OldGCAllocRegion _old_gc_alloc_region; | |
297 | |
298 // PLAB sizing policy for tenured objects. | 272 // PLAB sizing policy for tenured objects. |
299 PLABStats _old_plab_stats; | 273 PLABStats _old_plab_stats; |
300 | |
301 PLABStats* stats_for_purpose(GCAllocPurpose purpose) { | |
302 PLABStats* stats = NULL; | |
303 | |
304 switch (purpose) { | |
305 case GCAllocForSurvived: | |
306 stats = &_survivor_plab_stats; | |
307 break; | |
308 case GCAllocForTenured: | |
309 stats = &_old_plab_stats; | |
310 break; | |
311 default: | |
312 assert(false, "unrecognized GCAllocPurpose"); | |
313 } | |
314 | |
315 return stats; | |
316 } | |
317 | |
318 // The last old region we allocated to during the last GC. | |
319 // Typically, it is not full so we should re-use it during the next GC. | |
320 HeapRegion* _retained_old_gc_alloc_region; | |
321 | 274 |
322 // It specifies whether we should attempt to expand the heap after a | 275 // It specifies whether we should attempt to expand the heap after a |
323 // region allocation failure. If heap expansion fails we set this to | 276 // region allocation failure. If heap expansion fails we set this to |
324 // false so that we don't re-attempt the heap expansion (it's likely | 277 // false so that we don't re-attempt the heap expansion (it's likely |
325 // that subsequent expansion attempts will also fail if one fails). | 278 // that subsequent expansion attempts will also fail if one fails). |
344 void abandon_gc_alloc_regions(); | 297 void abandon_gc_alloc_regions(); |
345 | 298 |
346 // Helper for monitoring and management support. | 299 // Helper for monitoring and management support. |
347 G1MonitoringSupport* _g1mm; | 300 G1MonitoringSupport* _g1mm; |
348 | 301 |
349 // Determines PLAB size for a particular allocation purpose. | 302 // Records whether the region at the given index is kept live by roots or |
350 size_t desired_plab_sz(GCAllocPurpose purpose); | 303 // references from the young generation. |
351 | 304 class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> { |
352 // Outside of GC pauses, the number of bytes used in all regions other | 305 protected: |
353 // than the current allocation region. | 306 bool default_value() const { return false; } |
354 size_t _summary_bytes_used; | 307 public: |
355 | 308 void clear() { G1BiasedMappedArray<bool>::clear(); } |
356 // This is used for a quick test on whether a reference points into | 309 void set_live(uint region) { |
357 // the collection set or not. Basically, we have an array, with one | 310 set_by_index(region, true); |
358 // byte per region, and that byte denotes whether the corresponding | 311 } |
359 // region is in the collection set or not. The entry corresponding | 312 bool is_live(uint region) { |
360 // the bottom of the heap, i.e., region 0, is pointed to by | 313 return get_by_index(region); |
361 // _in_cset_fast_test_base. The _in_cset_fast_test field has been | 314 } |
362 // biased so that it actually points to address 0 of the address | 315 }; |
363 // space, to make the test as fast as possible (we can simply shift | 316 |
364 // the address to address into it, instead of having to subtract the | 317 HumongousIsLiveBiasedMappedArray _humongous_is_live; |
365 // bottom of the heap from the address before shifting it; basically | 318 // Stores whether during humongous object registration we found candidate regions. |
366 // it works in the same way the card table works). | 319 // If not, we can skip a few steps. |
367 bool* _in_cset_fast_test; | 320 bool _has_humongous_reclaim_candidates; |
368 | |
369 // The allocated array used for the fast test on whether a reference | |
370 // points into the collection set or not. This field is also used to | |
371 // free the array. | |
372 bool* _in_cset_fast_test_base; | |
373 | |
374 // The length of the _in_cset_fast_test_base array. | |
375 uint _in_cset_fast_test_length; | |
376 | 321 |
377 volatile unsigned _gc_time_stamp; | 322 volatile unsigned _gc_time_stamp; |
378 | 323 |
379 size_t* _surviving_young_words; | 324 size_t* _surviving_young_words; |
380 | 325 |
413 // Clear RSets after a compaction. It also resets the GC time stamps. | 358 // Clear RSets after a compaction. It also resets the GC time stamps. |
414 void clear_rsets_post_compaction(); | 359 void clear_rsets_post_compaction(); |
415 | 360 |
416 // If the HR printer is active, dump the state of the regions in the | 361 // If the HR printer is active, dump the state of the regions in the |
417 // heap after a compaction. | 362 // heap after a compaction. |
418 void print_hrs_post_compaction(); | 363 void print_hrm_post_compaction(); |
419 | 364 |
420 double verify(bool guard, const char* msg); | 365 double verify(bool guard, const char* msg); |
421 void verify_before_gc(); | 366 void verify_before_gc(); |
422 void verify_after_gc(); | 367 void verify_after_gc(); |
423 | 368 |
504 // attempt to expand the heap if necessary to satisfy the allocation | 449 // attempt to expand the heap if necessary to satisfy the allocation |
505 // request. If the region is to be used as an old region or for a | 450 // request. If the region is to be used as an old region or for a |
506 // humongous object, set is_old to true. If not, to false. | 451 // humongous object, set is_old to true. If not, to false. |
507 HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand); | 452 HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand); |
508 | 453 |
509 // Attempt to satisfy a humongous allocation request of the given | |
510 // size by finding a contiguous set of free regions of num_regions | |
511 // length and remove them from the master free list. Return the | |
512 // index of the first region or G1_NULL_HRS_INDEX if the search | |
513 // was unsuccessful. | |
514 uint humongous_obj_allocate_find_first(uint num_regions, | |
515 size_t word_size); | |
516 | |
517 // Initialize a contiguous set of free regions of length num_regions | 454 // Initialize a contiguous set of free regions of length num_regions |
518 // and starting at index first so that they appear as a single | 455 // and starting at index first so that they appear as a single |
519 // humongous region. | 456 // humongous region. |
520 HeapWord* humongous_obj_allocate_initialize_regions(uint first, | 457 HeapWord* humongous_obj_allocate_initialize_regions(uint first, |
521 uint num_regions, | 458 uint num_regions, |
522 size_t word_size); | 459 size_t word_size, |
460 AllocationContext_t context); | |
523 | 461 |
524 // Attempt to allocate a humongous object of the given size. Return | 462 // Attempt to allocate a humongous object of the given size. Return |
525 // NULL if unsuccessful. | 463 // NULL if unsuccessful. |
526 HeapWord* humongous_obj_allocate(size_t word_size); | 464 HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context); |
527 | 465 |
528 // The following two methods, allocate_new_tlab() and | 466 // The following two methods, allocate_new_tlab() and |
529 // mem_allocate(), are the two main entry points from the runtime | 467 // mem_allocate(), are the two main entry points from the runtime |
530 // into the G1's allocation routines. They have the following | 468 // into the G1's allocation routines. They have the following |
531 // assumptions: | 469 // assumptions: |
577 | 515 |
578 // Second-level mutator allocation attempt: take the Heap_lock and | 516 // Second-level mutator allocation attempt: take the Heap_lock and |
579 // retry the allocation attempt, potentially scheduling a GC | 517 // retry the allocation attempt, potentially scheduling a GC |
580 // pause. This should only be used for non-humongous allocations. | 518 // pause. This should only be used for non-humongous allocations. |
581 HeapWord* attempt_allocation_slow(size_t word_size, | 519 HeapWord* attempt_allocation_slow(size_t word_size, |
520 AllocationContext_t context, | |
582 unsigned int* gc_count_before_ret, | 521 unsigned int* gc_count_before_ret, |
583 int* gclocker_retry_count_ret); | 522 int* gclocker_retry_count_ret); |
584 | 523 |
585 // Takes the Heap_lock and attempts a humongous allocation. It can | 524 // Takes the Heap_lock and attempts a humongous allocation. It can |
586 // potentially schedule a GC pause. | 525 // potentially schedule a GC pause. |
591 // Allocation attempt that should be called during safepoints (e.g., | 530 // Allocation attempt that should be called during safepoints (e.g., |
592 // at the end of a successful GC). expect_null_mutator_alloc_region | 531 // at the end of a successful GC). expect_null_mutator_alloc_region |
593 // specifies whether the mutator alloc region is expected to be NULL | 532 // specifies whether the mutator alloc region is expected to be NULL |
594 // or not. | 533 // or not. |
595 HeapWord* attempt_allocation_at_safepoint(size_t word_size, | 534 HeapWord* attempt_allocation_at_safepoint(size_t word_size, |
596 bool expect_null_mutator_alloc_region); | 535 AllocationContext_t context, |
536 bool expect_null_mutator_alloc_region); | |
597 | 537 |
598 // It dirties the cards that cover the block so that so that the post | 538 // It dirties the cards that cover the block so that so that the post |
599 // write barrier never queues anything when updating objects on this | 539 // write barrier never queues anything when updating objects on this |
600 // block. It is assumed (and in fact we assert) that the block | 540 // block. It is assumed (and in fact we assert) that the block |
601 // belongs to a young region. | 541 // belongs to a young region. |
603 | 543 |
604 // Allocate blocks during garbage collection. Will ensure an | 544 // Allocate blocks during garbage collection. Will ensure an |
605 // allocation region, either by picking one or expanding the | 545 // allocation region, either by picking one or expanding the |
606 // heap, and then allocate a block of the given size. The block | 546 // heap, and then allocate a block of the given size. The block |
607 // may not be a humongous - it must fit into a single heap region. | 547 // may not be a humongous - it must fit into a single heap region. |
608 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); | 548 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, |
549 size_t word_size, | |
550 AllocationContext_t context); | |
609 | 551 |
610 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, | 552 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, |
611 HeapRegion* alloc_region, | 553 HeapRegion* alloc_region, |
612 bool par, | 554 bool par, |
613 size_t word_size); | 555 size_t word_size); |
615 // Ensure that no further allocations can happen in "r", bearing in mind | 557 // Ensure that no further allocations can happen in "r", bearing in mind |
616 // that parallel threads might be attempting allocations. | 558 // that parallel threads might be attempting allocations. |
617 void par_allocate_remaining_space(HeapRegion* r); | 559 void par_allocate_remaining_space(HeapRegion* r); |
618 | 560 |
619 // Allocation attempt during GC for a survivor object / PLAB. | 561 // Allocation attempt during GC for a survivor object / PLAB. |
620 inline HeapWord* survivor_attempt_allocation(size_t word_size); | 562 inline HeapWord* survivor_attempt_allocation(size_t word_size, |
563 AllocationContext_t context); | |
621 | 564 |
622 // Allocation attempt during GC for an old object / PLAB. | 565 // Allocation attempt during GC for an old object / PLAB. |
623 inline HeapWord* old_attempt_allocation(size_t word_size); | 566 inline HeapWord* old_attempt_allocation(size_t word_size, |
567 AllocationContext_t context); | |
624 | 568 |
625 // These methods are the "callbacks" from the G1AllocRegion class. | 569 // These methods are the "callbacks" from the G1AllocRegion class. |
626 | 570 |
627 // For mutator alloc regions. | 571 // For mutator alloc regions. |
628 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force); | 572 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force); |
657 void resize_if_necessary_after_full_collection(size_t word_size); | 601 void resize_if_necessary_after_full_collection(size_t word_size); |
658 | 602 |
659 // Callback from VM_G1CollectForAllocation operation. | 603 // Callback from VM_G1CollectForAllocation operation. |
660 // This function does everything necessary/possible to satisfy a | 604 // This function does everything necessary/possible to satisfy a |
661 // failed allocation request (including collection, expansion, etc.) | 605 // failed allocation request (including collection, expansion, etc.) |
662 HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded); | 606 HeapWord* satisfy_failed_allocation(size_t word_size, |
607 AllocationContext_t context, | |
608 bool* succeeded); | |
663 | 609 |
664 // Attempting to expand the heap sufficiently | 610 // Attempting to expand the heap sufficiently |
665 // to support an allocation of the given "word_size". If | 611 // to support an allocation of the given "word_size". If |
666 // successful, perform the allocation and return the address of the | 612 // successful, perform the allocation and return the address of the |
667 // allocated block, or else "NULL". | 613 // allocated block, or else "NULL". |
668 HeapWord* expand_and_allocate(size_t word_size); | 614 HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context); |
669 | 615 |
670 // Process any reference objects discovered during | 616 // Process any reference objects discovered during |
671 // an incremental evacuation pause. | 617 // an incremental evacuation pause. |
672 void process_discovered_references(uint no_of_gc_workers); | 618 void process_discovered_references(uint no_of_gc_workers); |
673 | 619 |
674 // Enqueue any remaining discovered references | 620 // Enqueue any remaining discovered references |
675 // after processing. | 621 // after processing. |
676 void enqueue_discovered_references(uint no_of_gc_workers); | 622 void enqueue_discovered_references(uint no_of_gc_workers); |
677 | 623 |
678 public: | 624 public: |
625 | |
626 G1Allocator* allocator() { | |
627 return _allocator; | |
628 } | |
679 | 629 |
680 G1MonitoringSupport* g1mm() { | 630 G1MonitoringSupport* g1mm() { |
681 assert(_g1mm != NULL, "should have been initialized"); | 631 assert(_g1mm != NULL, "should have been initialized"); |
682 return _g1mm; | 632 return _g1mm; |
683 } | 633 } |
686 // Returns true if the heap was expanded by the requested amount; | 636 // Returns true if the heap was expanded by the requested amount; |
687 // false otherwise. | 637 // false otherwise. |
688 // (Rounds up to a HeapRegion boundary.) | 638 // (Rounds up to a HeapRegion boundary.) |
689 bool expand(size_t expand_bytes); | 639 bool expand(size_t expand_bytes); |
690 | 640 |
641 // Returns the PLAB statistics given a purpose. | |
642 PLABStats* stats_for_purpose(GCAllocPurpose purpose) { | |
643 PLABStats* stats = NULL; | |
644 | |
645 switch (purpose) { | |
646 case GCAllocForSurvived: | |
647 stats = &_survivor_plab_stats; | |
648 break; | |
649 case GCAllocForTenured: | |
650 stats = &_old_plab_stats; | |
651 break; | |
652 default: | |
653 assert(false, "unrecognized GCAllocPurpose"); | |
654 } | |
655 | |
656 return stats; | |
657 } | |
658 | |
659 // Determines PLAB size for a particular allocation purpose. | |
660 size_t desired_plab_sz(GCAllocPurpose purpose); | |
661 | |
662 inline AllocationContextStats& allocation_context_stats(); | |
663 | |
691 // Do anything common to GC's. | 664 // Do anything common to GC's. |
692 virtual void gc_prologue(bool full); | 665 virtual void gc_prologue(bool full); |
693 virtual void gc_epilogue(bool full); | 666 virtual void gc_epilogue(bool full); |
694 | 667 |
695 #ifdef GRAAL | 668 #ifdef GRAAL |
696 HeapWord** top_addr() const; | 669 HeapWord** top_addr() const; |
697 HeapWord** end_addr() const; | 670 HeapWord** end_addr() const; |
698 #endif | 671 #endif |
699 | 672 |
673 inline void set_humongous_is_live(oop obj); | |
674 | |
675 bool humongous_is_live(uint region) { | |
676 return _humongous_is_live.is_live(region); | |
677 } | |
678 | |
679 // Returns whether the given region (which must be a humongous (start) region) | |
680 // is to be considered conservatively live regardless of any other conditions. | |
681 bool humongous_region_is_always_live(uint index); | |
682 // Register the given region to be part of the collection set. | |
683 inline void register_humongous_region_with_in_cset_fast_test(uint index); | |
684 // Register regions with humongous objects (actually on the start region) in | |
685 // the in_cset_fast_test table. | |
686 void register_humongous_regions_with_in_cset_fast_test(); | |
700 // We register a region with the fast "in collection set" test. We | 687 // We register a region with the fast "in collection set" test. We |
701 // simply set to true the array slot corresponding to this region. | 688 // simply set to true the array slot corresponding to this region. |
702 void register_region_with_in_cset_fast_test(HeapRegion* r) { | 689 void register_region_with_in_cset_fast_test(HeapRegion* r) { |
703 assert(_in_cset_fast_test_base != NULL, "sanity"); | 690 _in_cset_fast_test.set_in_cset(r->hrm_index()); |
704 assert(r->in_collection_set(), "invariant"); | |
705 uint index = r->hrs_index(); | |
706 assert(index < _in_cset_fast_test_length, "invariant"); | |
707 assert(!_in_cset_fast_test_base[index], "invariant"); | |
708 _in_cset_fast_test_base[index] = true; | |
709 } | 691 } |
710 | 692 |
711 // This is a fast test on whether a reference points into the | 693 // This is a fast test on whether a reference points into the |
712 // collection set or not. Assume that the reference | 694 // collection set or not. Assume that the reference |
713 // points into the heap. | 695 // points into the heap. |
714 inline bool in_cset_fast_test(oop obj); | 696 inline bool in_cset_fast_test(oop obj); |
715 | 697 |
716 void clear_cset_fast_test() { | 698 void clear_cset_fast_test() { |
717 assert(_in_cset_fast_test_base != NULL, "sanity"); | 699 _in_cset_fast_test.clear(); |
718 memset(_in_cset_fast_test_base, false, | |
719 (size_t) _in_cset_fast_test_length * sizeof(bool)); | |
720 } | 700 } |
721 | 701 |
722 // This is called at the start of either a concurrent cycle or a Full | 702 // This is called at the start of either a concurrent cycle or a Full |
723 // GC to update the number of old marking cycles started. | 703 // GC to update the number of old marking cycles started. |
724 void increment_old_marking_cycles_started(); | 704 void increment_old_marking_cycles_started(); |
843 // and does "scan_metadata" If "scan_rs" is | 823 // and does "scan_metadata" If "scan_rs" is |
844 // NULL, then this step is skipped. The "worker_i" | 824 // NULL, then this step is skipped. The "worker_i" |
845 // param is for use with parallel roots processing, and should be | 825 // param is for use with parallel roots processing, and should be |
846 // the "i" of the calling parallel worker thread's work(i) function. | 826 // the "i" of the calling parallel worker thread's work(i) function. |
847 // In the sequential case this param will be ignored. | 827 // In the sequential case this param will be ignored. |
848 void g1_process_strong_roots(bool is_scavenging, | 828 void g1_process_roots(OopClosure* scan_non_heap_roots, |
849 ScanningOption so, | 829 OopClosure* scan_non_heap_weak_roots, |
850 OopClosure* scan_non_heap_roots, | 830 OopsInHeapRegionClosure* scan_rs, |
851 OopsInHeapRegionClosure* scan_rs, | 831 CLDClosure* scan_strong_clds, |
852 G1KlassScanClosure* scan_klasses, | 832 CLDClosure* scan_weak_clds, |
853 uint worker_i); | 833 CodeBlobClosure* scan_strong_code, |
854 | 834 uint worker_i); |
855 // Apply "blk" to all the weak roots of the system. These include | |
856 // JNI weak roots, the code cache, system dictionary, symbol table, | |
857 // string table, and referents of reachable weak refs. | |
858 void g1_process_weak_roots(OopClosure* root_closure); | |
859 | |
860 // Notifies all the necessary spaces that the committed space has | |
861 // been updated (either expanded or shrunk). It should be called | |
862 // after _g1_storage is updated. | |
863 void update_committed_space(HeapWord* old_end, HeapWord* new_end); | |
864 | 835 |
865 // The concurrent marker (and the thread it runs in.) | 836 // The concurrent marker (and the thread it runs in.) |
866 ConcurrentMark* _cm; | 837 ConcurrentMark* _cm; |
867 ConcurrentMarkThread* _cmThread; | 838 ConcurrentMarkThread* _cmThread; |
868 bool _mark_in_progress; | 839 bool _mark_in_progress; |
1046 // The heap region entry for a given worker is valid iff | 1017 // The heap region entry for a given worker is valid iff |
1047 // the associated time stamp value matches the current value | 1018 // the associated time stamp value matches the current value |
1048 // of G1CollectedHeap::_gc_time_stamp. | 1019 // of G1CollectedHeap::_gc_time_stamp. |
1049 unsigned int* _worker_cset_start_region_time_stamp; | 1020 unsigned int* _worker_cset_start_region_time_stamp; |
1050 | 1021 |
1051 enum G1H_process_strong_roots_tasks { | 1022 enum G1H_process_roots_tasks { |
1052 G1H_PS_filter_satb_buffers, | 1023 G1H_PS_filter_satb_buffers, |
1053 G1H_PS_refProcessor_oops_do, | 1024 G1H_PS_refProcessor_oops_do, |
1054 // Leave this one last. | 1025 // Leave this one last. |
1055 G1H_PS_NumElements | 1026 G1H_PS_NumElements |
1056 }; | 1027 }; |
1127 | 1098 |
1128 unsigned get_gc_time_stamp() { | 1099 unsigned get_gc_time_stamp() { |
1129 return _gc_time_stamp; | 1100 return _gc_time_stamp; |
1130 } | 1101 } |
1131 | 1102 |
1132 void reset_gc_time_stamp() { | 1103 inline void reset_gc_time_stamp(); |
1133 _gc_time_stamp = 0; | |
1134 OrderAccess::fence(); | |
1135 // Clear the cached CSet starting regions and time stamps. | |
1136 // Their validity is dependent on the GC timestamp. | |
1137 clear_cset_start_regions(); | |
1138 } | |
1139 | 1104 |
1140 void check_gc_time_stamps() PRODUCT_RETURN; | 1105 void check_gc_time_stamps() PRODUCT_RETURN; |
1141 | 1106 |
1142 void increment_gc_time_stamp() { | 1107 inline void increment_gc_time_stamp(); |
1143 ++_gc_time_stamp; | |
1144 OrderAccess::fence(); | |
1145 } | |
1146 | 1108 |
1147 // Reset the given region's GC timestamp. If it's starts humongous, | 1109 // Reset the given region's GC timestamp. If it's starts humongous, |
1148 // also reset the GC timestamp of its corresponding | 1110 // also reset the GC timestamp of its corresponding |
1149 // continues humongous regions too. | 1111 // continues humongous regions too. |
1150 void reset_gc_time_stamps(HeapRegion* hr); | 1112 void reset_gc_time_stamps(HeapRegion* hr); |
1178 // Some heaps may offer a contiguous region for shared non-blocking | 1140 // Some heaps may offer a contiguous region for shared non-blocking |
1179 // allocation, via inlined code (by exporting the address of the top and | 1141 // allocation, via inlined code (by exporting the address of the top and |
1180 // end fields defining the extent of the contiguous allocation region.) | 1142 // end fields defining the extent of the contiguous allocation region.) |
1181 // But G1CollectedHeap doesn't yet support this. | 1143 // But G1CollectedHeap doesn't yet support this. |
1182 | 1144 |
1183 // Return an estimate of the maximum allocation that could be performed | |
1184 // without triggering any collection or expansion activity. In a | |
1185 // generational collector, for example, this is probably the largest | |
1186 // allocation that could be supported (without expansion) in the youngest | |
1187 // generation. It is "unsafe" because no locks are taken; the result | |
1188 // should be treated as an approximation, not a guarantee, for use in | |
1189 // heuristic resizing decisions. | |
1190 virtual size_t unsafe_max_alloc(); | |
1191 | |
1192 virtual bool is_maximal_no_gc() const { | 1145 virtual bool is_maximal_no_gc() const { |
1193 return _g1_storage.uncommitted_size() == 0; | 1146 return _hrm.available() == 0; |
1194 } | 1147 } |
1195 | 1148 |
1196 // The total number of regions in the heap. | 1149 // The current number of regions in the heap. |
1197 uint n_regions() { return _hrs.length(); } | 1150 uint num_regions() const { return _hrm.length(); } |
1198 | 1151 |
1199 // The max number of regions in the heap. | 1152 // The max number of regions in the heap. |
1200 uint max_regions() { return _hrs.max_length(); } | 1153 uint max_regions() const { return _hrm.max_length(); } |
1201 | 1154 |
1202 // The number of regions that are completely free. | 1155 // The number of regions that are completely free. |
1203 uint free_regions() { return _free_list.length(); } | 1156 uint num_free_regions() const { return _hrm.num_free_regions(); } |
1204 | 1157 |
1205 // The number of regions that are not completely free. | 1158 // The number of regions that are not completely free. |
1206 uint used_regions() { return n_regions() - free_regions(); } | 1159 uint num_used_regions() const { return num_regions() - num_free_regions(); } |
1207 | |
1208 // The number of regions available for "regular" expansion. | |
1209 uint expansion_regions() { return _expansion_regions; } | |
1210 | |
1211 // Factory method for HeapRegion instances. It will return NULL if | |
1212 // the allocation fails. | |
1213 HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom); | |
1214 | 1160 |
1215 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; | 1161 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; |
1216 void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; | 1162 void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; |
1217 void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; | 1163 void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; |
1218 void verify_dirty_young_regions() PRODUCT_RETURN; | 1164 void verify_dirty_young_regions() PRODUCT_RETURN; |
1165 | |
1166 #ifndef PRODUCT | |
1167 // Make sure that the given bitmap has no marked objects in the | |
1168 // range [from,limit). If it does, print an error message and return | |
1169 // false. Otherwise, just return true. bitmap_name should be "prev" | |
1170 // or "next". | |
1171 bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap, | |
1172 HeapWord* from, HeapWord* limit); | |
1173 | |
1174 // Verify that the prev / next bitmap range [tams,end) for the given | |
1175 // region has no marks. Return true if all is well, false if errors | |
1176 // are detected. | |
1177 bool verify_bitmaps(const char* caller, HeapRegion* hr); | |
1178 #endif // PRODUCT | |
1179 | |
1180 // If G1VerifyBitmaps is set, verify that the marking bitmaps for | |
1181 // the given region do not have any spurious marks. If errors are | |
1182 // detected, print appropriate error messages and crash. | |
1183 void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN; | |
1184 | |
1185 // If G1VerifyBitmaps is set, verify that the marking bitmaps do not | |
1186 // have any spurious marks. If errors are detected, print | |
1187 // appropriate error messages and crash. | |
1188 void check_bitmaps(const char* caller) PRODUCT_RETURN; | |
1219 | 1189 |
1220 // verify_region_sets() performs verification over the region | 1190 // verify_region_sets() performs verification over the region |
1221 // lists. It will be compiled in the product code to be used when | 1191 // lists. It will be compiled in the product code to be used when |
1222 // necessary (i.e., during heap verification). | 1192 // necessary (i.e., during heap verification). |
1223 void verify_region_sets(); | 1193 void verify_region_sets(); |
1233 void verify_region_sets_optional() { } | 1203 void verify_region_sets_optional() { } |
1234 #endif // HEAP_REGION_SET_FORCE_VERIFY | 1204 #endif // HEAP_REGION_SET_FORCE_VERIFY |
1235 | 1205 |
1236 #ifdef ASSERT | 1206 #ifdef ASSERT |
1237 bool is_on_master_free_list(HeapRegion* hr) { | 1207 bool is_on_master_free_list(HeapRegion* hr) { |
1238 return hr->containing_set() == &_free_list; | 1208 return _hrm.is_free(hr); |
1239 } | 1209 } |
1240 #endif // ASSERT | 1210 #endif // ASSERT |
1241 | 1211 |
1242 // Wrapper for the region list operations that can be called from | 1212 // Wrapper for the region list operations that can be called from |
1243 // methods outside this class. | 1213 // methods outside this class. |
1245 void secondary_free_list_add(FreeRegionList* list) { | 1215 void secondary_free_list_add(FreeRegionList* list) { |
1246 _secondary_free_list.add_ordered(list); | 1216 _secondary_free_list.add_ordered(list); |
1247 } | 1217 } |
1248 | 1218 |
1249 void append_secondary_free_list() { | 1219 void append_secondary_free_list() { |
1250 _free_list.add_ordered(&_secondary_free_list); | 1220 _hrm.insert_list_into_free_list(&_secondary_free_list); |
1251 } | 1221 } |
1252 | 1222 |
1253 void append_secondary_free_list_if_not_empty_with_lock() { | 1223 void append_secondary_free_list_if_not_empty_with_lock() { |
1254 // If the secondary free list looks empty there's no reason to | 1224 // If the secondary free list looks empty there's no reason to |
1255 // take the lock and then try to append it. | 1225 // take the lock and then try to append it. |
1271 void wait_while_free_regions_coming(); | 1241 void wait_while_free_regions_coming(); |
1272 | 1242 |
1273 // Determine whether the given region is one that we are using as an | 1243 // Determine whether the given region is one that we are using as an |
1274 // old GC alloc region. | 1244 // old GC alloc region. |
1275 bool is_old_gc_alloc_region(HeapRegion* hr) { | 1245 bool is_old_gc_alloc_region(HeapRegion* hr) { |
1276 return hr == _retained_old_gc_alloc_region; | 1246 return _allocator->is_retained_old_region(hr); |
1277 } | 1247 } |
1278 | 1248 |
1279 // Perform a collection of the heap; intended for use in implementing | 1249 // Perform a collection of the heap; intended for use in implementing |
1280 // "System.gc". This probably implies as full a collection as the | 1250 // "System.gc". This probably implies as full a collection as the |
1281 // "CollectedHeap" supports. | 1251 // "CollectedHeap" supports. |
1282 virtual void collect(GCCause::Cause cause); | 1252 virtual void collect(GCCause::Cause cause); |
1283 | 1253 |
1284 // The same as above but assume that the caller holds the Heap_lock. | 1254 // The same as above but assume that the caller holds the Heap_lock. |
1285 void collect_locked(GCCause::Cause cause); | 1255 void collect_locked(GCCause::Cause cause); |
1286 | 1256 |
1257 virtual bool copy_allocation_context_stats(const jint* contexts, | |
1258 jlong* totals, | |
1259 jbyte* accuracy, | |
1260 jint len); | |
1261 | |
1287 // True iff an evacuation has failed in the most-recent collection. | 1262 // True iff an evacuation has failed in the most-recent collection. |
1288 bool evacuation_failed() { return _evacuation_failed; } | 1263 bool evacuation_failed() { return _evacuation_failed; } |
1289 | 1264 |
1290 void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed); | 1265 void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed); |
1291 void prepend_to_freelist(FreeRegionList* list); | 1266 void prepend_to_freelist(FreeRegionList* list); |
1292 void decrement_summary_bytes(size_t bytes); | 1267 void decrement_summary_bytes(size_t bytes); |
1293 | 1268 |
1294 // Returns "TRUE" iff "p" points into the committed areas of the heap. | 1269 // Returns "TRUE" iff "p" points into the committed areas of the heap. |
1295 virtual bool is_in(const void* p) const; | 1270 virtual bool is_in(const void* p) const; |
1271 #ifdef ASSERT | |
1272 // Returns whether p is in one of the available areas of the heap. Slow but | |
1273 // extensive version. | |
1274 bool is_in_exact(const void* p) const; | |
1275 #endif | |
1296 | 1276 |
1297 // Return "TRUE" iff the given object address is within the collection | 1277 // Return "TRUE" iff the given object address is within the collection |
1298 // set. | 1278 // set. Slow implementation. |
1299 inline bool obj_in_cs(oop obj); | 1279 inline bool obj_in_cs(oop obj); |
1280 | |
1281 inline bool is_in_cset(oop obj); | |
1282 | |
1283 inline bool is_in_cset_or_humongous(const oop obj); | |
1284 | |
1285 enum in_cset_state_t { | |
1286 InNeither, // neither in collection set nor humongous | |
1287 InCSet, // region is in collection set only | |
1288 IsHumongous // region is a humongous start region | |
1289 }; | |
1290 private: | |
1291 // Instances of this class are used for quick tests on whether a reference points | |
1292 // into the collection set or is a humongous object (points into a humongous | |
1293 // object). | |
1294 // Each of the array's elements denotes whether the corresponding region is in | |
1295 // the collection set or a humongous region. | |
1296 // We use this to quickly reclaim humongous objects: by making a humongous region | |
1297 // succeed this test, we sort-of add it to the collection set. During the reference | |
1298 // iteration closures, when we see a humongous region, we simply mark it as | |
1299 // referenced, i.e. live. | |
1300 class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<char> { | |
1301 protected: | |
1302 char default_value() const { return G1CollectedHeap::InNeither; } | |
1303 public: | |
1304 void set_humongous(uintptr_t index) { | |
1305 assert(get_by_index(index) != InCSet, "Should not overwrite InCSet values"); | |
1306 set_by_index(index, G1CollectedHeap::IsHumongous); | |
1307 } | |
1308 | |
1309 void clear_humongous(uintptr_t index) { | |
1310 set_by_index(index, G1CollectedHeap::InNeither); | |
1311 } | |
1312 | |
1313 void set_in_cset(uintptr_t index) { | |
1314 assert(get_by_index(index) != G1CollectedHeap::IsHumongous, "Should not overwrite IsHumongous value"); | |
1315 set_by_index(index, G1CollectedHeap::InCSet); | |
1316 } | |
1317 | |
1318 bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; } | |
1319 bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; } | |
1320 G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); } | |
1321 void clear() { G1BiasedMappedArray<char>::clear(); } | |
1322 }; | |
1323 | |
1324 // This array is used for a quick test on whether a reference points into | |
1325 // the collection set or not. Each of the array's elements denotes whether the | |
1326 // corresponding region is in the collection set or not. | |
1327 G1FastCSetBiasedMappedArray _in_cset_fast_test; | |
1328 | |
1329 public: | |
1330 | |
1331 inline in_cset_state_t in_cset_state(const oop obj); | |
1300 | 1332 |
1301 // Return "TRUE" iff the given object address is in the reserved | 1333 // Return "TRUE" iff the given object address is in the reserved |
1302 // region of g1. | 1334 // region of g1. |
1303 bool is_in_g1_reserved(const void* p) const { | 1335 bool is_in_g1_reserved(const void* p) const { |
1304 return _g1_reserved.contains(p); | 1336 return _hrm.reserved().contains(p); |
1305 } | 1337 } |
1306 | 1338 |
1307 // Returns a MemRegion that corresponds to the space that has been | 1339 // Returns a MemRegion that corresponds to the space that has been |
1308 // reserved for the heap | 1340 // reserved for the heap |
1309 MemRegion g1_reserved() { | 1341 MemRegion g1_reserved() const { |
1310 return _g1_reserved; | 1342 return _hrm.reserved(); |
1311 } | |
1312 | |
1313 // Returns a MemRegion that corresponds to the space that has been | |
1314 // committed in the heap | |
1315 MemRegion g1_committed() { | |
1316 return _g1_committed; | |
1317 } | 1343 } |
1318 | 1344 |
1319 virtual bool is_in_closed_subset(const void* p) const; | 1345 virtual bool is_in_closed_subset(const void* p) const; |
1320 | 1346 |
1321 G1SATBCardTableModRefBS* g1_barrier_set() { | 1347 G1SATBCardTableLoggingModRefBS* g1_barrier_set() { |
1322 return (G1SATBCardTableModRefBS*) barrier_set(); | 1348 return (G1SATBCardTableLoggingModRefBS*) barrier_set(); |
1323 } | 1349 } |
1324 | 1350 |
1325 // This resets the card table to all zeros. It is used after | 1351 // This resets the card table to all zeros. It is used after |
1326 // a collection pause which used the card table to claim cards. | 1352 // a collection pause which used the card table to claim cards. |
1327 void cleanUpCardTable(); | 1353 void cleanUpCardTable(); |
1330 | 1356 |
1331 // Iterate over all the ref-containing fields of all objects, calling | 1357 // Iterate over all the ref-containing fields of all objects, calling |
1332 // "cl.do_oop" on each. | 1358 // "cl.do_oop" on each. |
1333 virtual void oop_iterate(ExtendedOopClosure* cl); | 1359 virtual void oop_iterate(ExtendedOopClosure* cl); |
1334 | 1360 |
1335 // Same as above, restricted to a memory region. | |
1336 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); | |
1337 | |
1338 // Iterate over all objects, calling "cl.do_object" on each. | 1361 // Iterate over all objects, calling "cl.do_object" on each. |
1339 virtual void object_iterate(ObjectClosure* cl); | 1362 virtual void object_iterate(ObjectClosure* cl); |
1340 | 1363 |
1341 virtual void safe_object_iterate(ObjectClosure* cl) { | 1364 virtual void safe_object_iterate(ObjectClosure* cl) { |
1342 object_iterate(cl); | 1365 object_iterate(cl); |
1349 // iteration early if the "doHeapRegion" method returns "true". | 1372 // iteration early if the "doHeapRegion" method returns "true". |
1350 void heap_region_iterate(HeapRegionClosure* blk) const; | 1373 void heap_region_iterate(HeapRegionClosure* blk) const; |
1351 | 1374 |
1352 // Return the region with the given index. It assumes the index is valid. | 1375 // Return the region with the given index. It assumes the index is valid. |
1353 inline HeapRegion* region_at(uint index) const; | 1376 inline HeapRegion* region_at(uint index) const; |
1377 | |
1378 // Calculate the region index of the given address. Given address must be | |
1379 // within the heap. | |
1380 inline uint addr_to_region(HeapWord* addr) const; | |
1381 | |
1382 inline HeapWord* bottom_addr_for_region(uint index) const; | |
1354 | 1383 |
1355 // Divide the heap region sequence into "chunks" of some size (the number | 1384 // Divide the heap region sequence into "chunks" of some size (the number |
1356 // of regions divided by the number of parallel threads times some | 1385 // of regions divided by the number of parallel threads times some |
1357 // overpartition factor, currently 4). Assumes that this will be called | 1386 // overpartition factor, currently 4). Assumes that this will be called |
1358 // in parallel by ParallelGCThreads worker threads with discinct worker | 1387 // in parallel by ParallelGCThreads worker threads with discinct worker |
1363 // attempting to claim the first region in each chunk, and, if | 1392 // attempting to claim the first region in each chunk, and, if |
1364 // successful, applying the closure to each region in the chunk (and | 1393 // successful, applying the closure to each region in the chunk (and |
1365 // setting the claim value of the second and subsequent regions of the | 1394 // setting the claim value of the second and subsequent regions of the |
1366 // chunk.) For now requires that "doHeapRegion" always returns "false", | 1395 // chunk.) For now requires that "doHeapRegion" always returns "false", |
1367 // i.e., that a closure never attempt to abort a traversal. | 1396 // i.e., that a closure never attempt to abort a traversal. |
1368 void heap_region_par_iterate_chunked(HeapRegionClosure* blk, | 1397 void heap_region_par_iterate_chunked(HeapRegionClosure* cl, |
1369 uint worker, | 1398 uint worker_id, |
1370 uint no_of_par_workers, | 1399 uint num_workers, |
1371 jint claim_value); | 1400 jint claim_value) const; |
1372 | 1401 |
1373 // It resets all the region claim values to the default. | 1402 // It resets all the region claim values to the default. |
1374 void reset_heap_region_claim_values(); | 1403 void reset_heap_region_claim_values(); |
1375 | 1404 |
1376 // Resets the claim values of regions in the current | 1405 // Resets the claim values of regions in the current |
1391 | 1420 |
1392 // Given the id of a worker, obtain or calculate a suitable | 1421 // Given the id of a worker, obtain or calculate a suitable |
1393 // starting region for iterating over the current collection set. | 1422 // starting region for iterating over the current collection set. |
1394 HeapRegion* start_cset_region_for_worker(uint worker_i); | 1423 HeapRegion* start_cset_region_for_worker(uint worker_i); |
1395 | 1424 |
1396 // This is a convenience method that is used by the | |
1397 // HeapRegionIterator classes to calculate the starting region for | |
1398 // each worker so that they do not all start from the same region. | |
1399 HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers); | |
1400 | |
1401 // Iterate over the regions (if any) in the current collection set. | 1425 // Iterate over the regions (if any) in the current collection set. |
1402 void collection_set_iterate(HeapRegionClosure* blk); | 1426 void collection_set_iterate(HeapRegionClosure* blk); |
1403 | 1427 |
1404 // As above but starting from region r | 1428 // As above but starting from region r |
1405 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk); | 1429 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk); |
1406 | 1430 |
1407 // Returns the first (lowest address) compactible space in the heap. | 1431 HeapRegion* next_compaction_region(const HeapRegion* from) const; |
1408 virtual CompactibleSpace* first_compactible_space(); | |
1409 | 1432 |
1410 // A CollectedHeap will contain some number of spaces. This finds the | 1433 // A CollectedHeap will contain some number of spaces. This finds the |
1411 // space containing a given address, or else returns NULL. | 1434 // space containing a given address, or else returns NULL. |
1412 virtual Space* space_containing(const void* addr) const; | 1435 virtual Space* space_containing(const void* addr) const; |
1413 | 1436 |
1414 // A G1CollectedHeap will contain some number of heap regions. This | 1437 // Returns the HeapRegion that contains addr. addr must not be NULL. |
1415 // finds the region containing a given address, or else returns NULL. | 1438 template <class T> |
1439 inline HeapRegion* heap_region_containing_raw(const T addr) const; | |
1440 | |
1441 // Returns the HeapRegion that contains addr. addr must not be NULL. | |
1442 // If addr is within a humongous continues region, it returns its humongous start region. | |
1416 template <class T> | 1443 template <class T> |
1417 inline HeapRegion* heap_region_containing(const T addr) const; | 1444 inline HeapRegion* heap_region_containing(const T addr) const; |
1418 | |
1419 // Like the above, but requires "addr" to be in the heap (to avoid a | |
1420 // null-check), and unlike the above, may return an continuing humongous | |
1421 // region. | |
1422 template <class T> | |
1423 inline HeapRegion* heap_region_containing_raw(const T addr) const; | |
1424 | 1445 |
1425 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, | 1446 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, |
1426 // each address in the (reserved) heap is a member of exactly | 1447 // each address in the (reserved) heap is a member of exactly |
1427 // one block. The defining characteristic of a block is that it is | 1448 // one block. The defining characteristic of a block is that it is |
1428 // possible to find its size, and thus to progress forward to the next | 1449 // possible to find its size, and thus to progress forward to the next |
1561 | 1582 |
1562 // Determine if an object is dead, given the object and also | 1583 // Determine if an object is dead, given the object and also |
1563 // the region to which the object belongs. An object is dead | 1584 // the region to which the object belongs. An object is dead |
1564 // iff a) it was not allocated since the last mark and b) it | 1585 // iff a) it was not allocated since the last mark and b) it |
1565 // is not marked. | 1586 // is not marked. |
1566 | |
1567 bool is_obj_dead(const oop obj, const HeapRegion* hr) const { | 1587 bool is_obj_dead(const oop obj, const HeapRegion* hr) const { |
1568 return | 1588 return |
1569 !hr->obj_allocated_since_prev_marking(obj) && | 1589 !hr->obj_allocated_since_prev_marking(obj) && |
1570 !isMarkedPrev(obj); | 1590 !isMarkedPrev(obj); |
1571 } | 1591 } |
1572 | 1592 |
1573 // This function returns true when an object has been | 1593 // This function returns true when an object has been |
1574 // around since the previous marking and hasn't yet | 1594 // around since the previous marking and hasn't yet |
1575 // been marked during this marking. | 1595 // been marked during this marking. |
1576 | |
1577 bool is_obj_ill(const oop obj, const HeapRegion* hr) const { | 1596 bool is_obj_ill(const oop obj, const HeapRegion* hr) const { |
1578 return | 1597 return |
1579 !hr->obj_allocated_since_next_marking(obj) && | 1598 !hr->obj_allocated_since_next_marking(obj) && |
1580 !isMarkedNext(obj); | 1599 !isMarkedNext(obj); |
1581 } | 1600 } |
1617 virtual void register_nmethod(nmethod* nm); | 1636 virtual void register_nmethod(nmethod* nm); |
1618 | 1637 |
1619 // Unregister the given nmethod from the G1 heap | 1638 // Unregister the given nmethod from the G1 heap |
1620 virtual void unregister_nmethod(nmethod* nm); | 1639 virtual void unregister_nmethod(nmethod* nm); |
1621 | 1640 |
1622 // Migrate the nmethods in the code root lists of the regions | |
1623 // in the collection set to regions in to-space. In the event | |
1624 // of an evacuation failure, nmethods that reference objects | |
1625 // that were not successfullly evacuated are not migrated. | |
1626 void migrate_strong_code_roots(); | |
1627 | |
1628 // Free up superfluous code root memory. | 1641 // Free up superfluous code root memory. |
1629 void purge_code_root_memory(); | 1642 void purge_code_root_memory(); |
1630 | |
1631 // During an initial mark pause, mark all the code roots that | |
1632 // point into regions *not* in the collection set. | |
1633 void mark_strong_code_roots(uint worker_id); | |
1634 | 1643 |
1635 // Rebuild the stong code root lists for each region | 1644 // Rebuild the stong code root lists for each region |
1636 // after a full GC | 1645 // after a full GC |
1637 void rebuild_strong_code_roots(); | 1646 void rebuild_strong_code_roots(); |
1638 | 1647 |
1639 // Delete entries for dead interned string and clean up unreferenced symbols | 1648 // Delete entries for dead interned string and clean up unreferenced symbols |
1640 // in symbol table, possibly in parallel. | 1649 // in symbol table, possibly in parallel. |
1641 void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true); | 1650 void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true); |
1651 | |
1652 // Parallel phase of unloading/cleaning after G1 concurrent mark. | |
1653 void parallel_cleaning(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool class_unloading_occurred); | |
1642 | 1654 |
1643 // Redirty logged cards in the refinement queue. | 1655 // Redirty logged cards in the refinement queue. |
1644 void redirty_logged_cards(); | 1656 void redirty_logged_cards(); |
1645 // Verification | 1657 // Verification |
1646 | 1658 |
1709 | 1721 |
1710 protected: | 1722 protected: |
1711 size_t _max_heap_capacity; | 1723 size_t _max_heap_capacity; |
1712 }; | 1724 }; |
1713 | 1725 |
1714 class G1ParGCAllocBuffer: public ParGCAllocBuffer { | |
1715 private: | |
1716 bool _retired; | |
1717 | |
1718 public: | |
1719 G1ParGCAllocBuffer(size_t gclab_word_size); | |
1720 | |
1721 void set_buf(HeapWord* buf) { | |
1722 ParGCAllocBuffer::set_buf(buf); | |
1723 _retired = false; | |
1724 } | |
1725 | |
1726 void retire(bool end_of_gc, bool retain) { | |
1727 if (_retired) | |
1728 return; | |
1729 ParGCAllocBuffer::retire(end_of_gc, retain); | |
1730 _retired = true; | |
1731 } | |
1732 }; | |
1733 | |
1734 class G1ParScanThreadState : public StackObj { | |
1735 protected: | |
1736 G1CollectedHeap* _g1h; | |
1737 RefToScanQueue* _refs; | |
1738 DirtyCardQueue _dcq; | |
1739 G1SATBCardTableModRefBS* _ct_bs; | |
1740 G1RemSet* _g1_rem; | |
1741 | |
1742 G1ParGCAllocBuffer _surviving_alloc_buffer; | |
1743 G1ParGCAllocBuffer _tenured_alloc_buffer; | |
1744 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount]; | |
1745 ageTable _age_table; | |
1746 | |
1747 G1ParScanClosure _scanner; | |
1748 | |
1749 size_t _alloc_buffer_waste; | |
1750 size_t _undo_waste; | |
1751 | |
1752 OopsInHeapRegionClosure* _evac_failure_cl; | |
1753 | |
1754 int _hash_seed; | |
1755 uint _queue_num; | |
1756 | |
1757 size_t _term_attempts; | |
1758 | |
1759 double _start; | |
1760 double _start_strong_roots; | |
1761 double _strong_roots_time; | |
1762 double _start_term; | |
1763 double _term_time; | |
1764 | |
1765 // Map from young-age-index (0 == not young, 1 is youngest) to | |
1766 // surviving words. base is what we get back from the malloc call | |
1767 size_t* _surviving_young_words_base; | |
1768 // this points into the array, as we use the first few entries for padding | |
1769 size_t* _surviving_young_words; | |
1770 | |
1771 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)) | |
1772 | |
1773 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } | |
1774 | |
1775 void add_to_undo_waste(size_t waste) { _undo_waste += waste; } | |
1776 | |
1777 DirtyCardQueue& dirty_card_queue() { return _dcq; } | |
1778 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; } | |
1779 | |
1780 template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid); | |
1781 | |
1782 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) { | |
1783 // If the new value of the field points to the same region or | |
1784 // is the to-space, we don't need to include it in the Rset updates. | |
1785 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) { | |
1786 size_t card_index = ctbs()->index_for(p); | |
1787 // If the card hasn't been added to the buffer, do it. | |
1788 if (ctbs()->mark_card_deferred(card_index)) { | |
1789 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index)); | |
1790 } | |
1791 } | |
1792 } | |
1793 | |
1794 public: | |
1795 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp); | |
1796 | |
1797 ~G1ParScanThreadState() { | |
1798 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC); | |
1799 } | |
1800 | |
1801 RefToScanQueue* refs() { return _refs; } | |
1802 ageTable* age_table() { return &_age_table; } | |
1803 | |
1804 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { | |
1805 return _alloc_buffers[purpose]; | |
1806 } | |
1807 | |
1808 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; } | |
1809 size_t undo_waste() const { return _undo_waste; } | |
1810 | |
1811 #ifdef ASSERT | |
1812 bool verify_ref(narrowOop* ref) const; | |
1813 bool verify_ref(oop* ref) const; | |
1814 bool verify_task(StarTask ref) const; | |
1815 #endif // ASSERT | |
1816 | |
1817 template <class T> void push_on_queue(T* ref) { | |
1818 assert(verify_ref(ref), "sanity"); | |
1819 refs()->push(ref); | |
1820 } | |
1821 | |
1822 template <class T> inline void update_rs(HeapRegion* from, T* p, int tid); | |
1823 | |
1824 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { | |
1825 HeapWord* obj = NULL; | |
1826 size_t gclab_word_size = _g1h->desired_plab_sz(purpose); | |
1827 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { | |
1828 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); | |
1829 add_to_alloc_buffer_waste(alloc_buf->words_remaining()); | |
1830 alloc_buf->retire(false /* end_of_gc */, false /* retain */); | |
1831 | |
1832 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size); | |
1833 if (buf == NULL) return NULL; // Let caller handle allocation failure. | |
1834 // Otherwise. | |
1835 alloc_buf->set_word_size(gclab_word_size); | |
1836 alloc_buf->set_buf(buf); | |
1837 | |
1838 obj = alloc_buf->allocate(word_sz); | |
1839 assert(obj != NULL, "buffer was definitely big enough..."); | |
1840 } else { | |
1841 obj = _g1h->par_allocate_during_gc(purpose, word_sz); | |
1842 } | |
1843 return obj; | |
1844 } | |
1845 | |
1846 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) { | |
1847 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz); | |
1848 if (obj != NULL) return obj; | |
1849 return allocate_slow(purpose, word_sz); | |
1850 } | |
1851 | |
1852 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) { | |
1853 if (alloc_buffer(purpose)->contains(obj)) { | |
1854 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1), | |
1855 "should contain whole object"); | |
1856 alloc_buffer(purpose)->undo_allocation(obj, word_sz); | |
1857 } else { | |
1858 CollectedHeap::fill_with_object(obj, word_sz); | |
1859 add_to_undo_waste(word_sz); | |
1860 } | |
1861 } | |
1862 | |
1863 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { | |
1864 _evac_failure_cl = evac_failure_cl; | |
1865 } | |
1866 OopsInHeapRegionClosure* evac_failure_closure() { | |
1867 return _evac_failure_cl; | |
1868 } | |
1869 | |
1870 int* hash_seed() { return &_hash_seed; } | |
1871 uint queue_num() { return _queue_num; } | |
1872 | |
1873 size_t term_attempts() const { return _term_attempts; } | |
1874 void note_term_attempt() { _term_attempts++; } | |
1875 | |
1876 void start_strong_roots() { | |
1877 _start_strong_roots = os::elapsedTime(); | |
1878 } | |
1879 void end_strong_roots() { | |
1880 _strong_roots_time += (os::elapsedTime() - _start_strong_roots); | |
1881 } | |
1882 double strong_roots_time() const { return _strong_roots_time; } | |
1883 | |
1884 void start_term_time() { | |
1885 note_term_attempt(); | |
1886 _start_term = os::elapsedTime(); | |
1887 } | |
1888 void end_term_time() { | |
1889 _term_time += (os::elapsedTime() - _start_term); | |
1890 } | |
1891 double term_time() const { return _term_time; } | |
1892 | |
1893 double elapsed_time() const { | |
1894 return os::elapsedTime() - _start; | |
1895 } | |
1896 | |
1897 static void | |
1898 print_termination_stats_hdr(outputStream* const st = gclog_or_tty); | |
1899 void | |
1900 print_termination_stats(int i, outputStream* const st = gclog_or_tty) const; | |
1901 | |
1902 size_t* surviving_young_words() { | |
1903 // We add on to hide entry 0 which accumulates surviving words for | |
1904 // age -1 regions (i.e. non-young ones) | |
1905 return _surviving_young_words; | |
1906 } | |
1907 | |
1908 void retire_alloc_buffers() { | |
1909 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
1910 size_t waste = _alloc_buffers[ap]->words_remaining(); | |
1911 add_to_alloc_buffer_waste(waste); | |
1912 _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap), | |
1913 true /* end_of_gc */, | |
1914 false /* retain */); | |
1915 } | |
1916 } | |
1917 private: | |
1918 #define G1_PARTIAL_ARRAY_MASK 0x2 | |
1919 | |
1920 inline bool has_partial_array_mask(oop* ref) const { | |
1921 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK; | |
1922 } | |
1923 | |
1924 // We never encode partial array oops as narrowOop*, so return false immediately. | |
1925 // This allows the compiler to create optimized code when popping references from | |
1926 // the work queue. | |
1927 inline bool has_partial_array_mask(narrowOop* ref) const { | |
1928 assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*"); | |
1929 return false; | |
1930 } | |
1931 | |
1932 // Only implement set_partial_array_mask() for regular oops, not for narrowOops. | |
1933 // We always encode partial arrays as regular oop, to allow the | |
1934 // specialization for has_partial_array_mask() for narrowOops above. | |
1935 // This means that unintentional use of this method with narrowOops are caught | |
1936 // by the compiler. | |
1937 inline oop* set_partial_array_mask(oop obj) const { | |
1938 assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!"); | |
1939 return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK); | |
1940 } | |
1941 | |
1942 inline oop clear_partial_array_mask(oop* ref) const { | |
1943 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK); | |
1944 } | |
1945 | |
1946 inline void do_oop_partial_array(oop* p); | |
1947 | |
1948 // This method is applied to the fields of the objects that have just been copied. | |
1949 template <class T> void do_oop_evac(T* p, HeapRegion* from) { | |
1950 assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)), | |
1951 "Reference should not be NULL here as such are never pushed to the task queue."); | |
1952 oop obj = oopDesc::load_decode_heap_oop_not_null(p); | |
1953 | |
1954 // Although we never intentionally push references outside of the collection | |
1955 // set, due to (benign) races in the claim mechanism during RSet scanning more | |
1956 // than one thread might claim the same card. So the same card may be | |
1957 // processed multiple times. So redo this check. | |
1958 if (_g1h->in_cset_fast_test(obj)) { | |
1959 oop forwardee; | |
1960 if (obj->is_forwarded()) { | |
1961 forwardee = obj->forwardee(); | |
1962 } else { | |
1963 forwardee = copy_to_survivor_space(obj); | |
1964 } | |
1965 assert(forwardee != NULL, "forwardee should not be NULL"); | |
1966 oopDesc::encode_store_heap_oop(p, forwardee); | |
1967 } | |
1968 | |
1969 assert(obj != NULL, "Must be"); | |
1970 update_rs(from, p, queue_num()); | |
1971 } | |
1972 public: | |
1973 | |
1974 oop copy_to_survivor_space(oop const obj); | |
1975 | |
1976 template <class T> inline void deal_with_reference(T* ref_to_scan); | |
1977 | |
1978 inline void deal_with_reference(StarTask ref); | |
1979 | |
1980 public: | |
1981 void trim_queue(); | |
1982 }; | |
1983 | |
1984 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP | 1726 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP |