comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 20404:227a9e5e4b4a

8057536: Refactor G1 to allow context specific allocations Summary: Splitting out a g1 allocator class to simply specialized allocators which can associate each allocation with a given context. Reviewed-by: mgerdin, brutisso
author sjohanss
date Fri, 05 Sep 2014 09:49:19 +0200
parents a8ea2f110d87
children d35872270666
comparison
equal deleted inserted replaced
20403:8ec8971f511a 20404:227a9e5e4b4a
23 */ 23 */
24 24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
27 27
28 #include "gc_implementation/g1/g1AllocationContext.hpp"
29 #include "gc_implementation/g1/g1Allocator.hpp"
28 #include "gc_implementation/g1/concurrentMark.hpp" 30 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/evacuationInfo.hpp" 31 #include "gc_implementation/g1/evacuationInfo.hpp"
30 #include "gc_implementation/g1/g1AllocRegion.hpp" 32 #include "gc_implementation/g1/g1AllocRegion.hpp"
31 #include "gc_implementation/g1/g1BiasedArray.hpp" 33 #include "gc_implementation/g1/g1BiasedArray.hpp"
32 #include "gc_implementation/g1/g1HRPrinter.hpp" 34 #include "gc_implementation/g1/g1HRPrinter.hpp"
78 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet; 80 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
79 81
80 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) 82 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
81 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) 83 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
82 84
83 enum GCAllocPurpose {
84 GCAllocForTenured,
85 GCAllocForSurvived,
86 GCAllocPurposeCount
87 };
88
89 class YoungList : public CHeapObj<mtGC> { 85 class YoungList : public CHeapObj<mtGC> {
90 private: 86 private:
91 G1CollectedHeap* _g1h; 87 G1CollectedHeap* _g1h;
92 88
93 HeapRegion* _head; 89 HeapRegion* _head;
154 150
155 // debugging 151 // debugging
156 bool check_list_well_formed(); 152 bool check_list_well_formed();
157 bool check_list_empty(bool check_sample = true); 153 bool check_list_empty(bool check_sample = true);
158 void print(); 154 void print();
159 };
160
161 class MutatorAllocRegion : public G1AllocRegion {
162 protected:
163 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
164 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
165 public:
166 MutatorAllocRegion()
167 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
168 };
169
170 class SurvivorGCAllocRegion : public G1AllocRegion {
171 protected:
172 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
173 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
174 public:
175 SurvivorGCAllocRegion()
176 : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
177 };
178
179 class OldGCAllocRegion : public G1AllocRegion {
180 protected:
181 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
182 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
183 public:
184 OldGCAllocRegion()
185 : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
186
187 // This specialization of release() makes sure that the last card that has been
188 // allocated into has been completely filled by a dummy object.
189 // This avoids races when remembered set scanning wants to update the BOT of the
190 // last card in the retained old gc alloc region, and allocation threads
191 // allocating into that card at the same time.
192 virtual HeapRegion* release();
193 }; 155 };
194 156
195 // The G1 STW is alive closure. 157 // The G1 STW is alive closure.
196 // An instance is embedded into the G1CH and used as the 158 // An instance is embedded into the G1CH and used as the
197 // (optional) _is_alive_non_header closure in the STW 159 // (optional) _is_alive_non_header closure in the STW
220 friend class VM_G1IncCollectionPause; 182 friend class VM_G1IncCollectionPause;
221 friend class VMStructs; 183 friend class VMStructs;
222 friend class MutatorAllocRegion; 184 friend class MutatorAllocRegion;
223 friend class SurvivorGCAllocRegion; 185 friend class SurvivorGCAllocRegion;
224 friend class OldGCAllocRegion; 186 friend class OldGCAllocRegion;
187 friend class G1Allocator;
188 friend class G1DefaultAllocator;
189 friend class G1ResManAllocator;
225 190
226 // Closures used in implementation. 191 // Closures used in implementation.
227 template <G1Barrier barrier, G1Mark do_mark_object> 192 template <G1Barrier barrier, G1Mark do_mark_object>
228 friend class G1ParCopyClosure; 193 friend class G1ParCopyClosure;
229 friend class G1IsAliveClosure; 194 friend class G1IsAliveClosure;
230 friend class G1EvacuateFollowersClosure; 195 friend class G1EvacuateFollowersClosure;
231 friend class G1ParScanThreadState; 196 friend class G1ParScanThreadState;
232 friend class G1ParScanClosureSuper; 197 friend class G1ParScanClosureSuper;
233 friend class G1ParEvacuateFollowersClosure; 198 friend class G1ParEvacuateFollowersClosure;
234 friend class G1ParTask; 199 friend class G1ParTask;
200 friend class G1ParGCAllocator;
201 friend class G1DefaultParGCAllocator;
235 friend class G1FreeGarbageRegionClosure; 202 friend class G1FreeGarbageRegionClosure;
236 friend class RefineCardTableEntryClosure; 203 friend class RefineCardTableEntryClosure;
237 friend class G1PrepareCompactClosure; 204 friend class G1PrepareCompactClosure;
238 friend class RegionSorter; 205 friend class RegionSorter;
239 friend class RegionResetter; 206 friend class RegionResetter;
291 G1RegionMappingChangedListener _listener; 258 G1RegionMappingChangedListener _listener;
292 259
293 // The sequence of all heap regions in the heap. 260 // The sequence of all heap regions in the heap.
294 HeapRegionManager _hrm; 261 HeapRegionManager _hrm;
295 262
296 // Alloc region used to satisfy mutator allocation requests. 263 // Class that handles the different kinds of allocations.
297 MutatorAllocRegion _mutator_alloc_region; 264 G1Allocator* _allocator;
298
299 // Alloc region used to satisfy allocation requests by the GC for
300 // survivor objects.
301 SurvivorGCAllocRegion _survivor_gc_alloc_region;
302 265
303 // PLAB sizing policy for survivors. 266 // PLAB sizing policy for survivors.
304 PLABStats _survivor_plab_stats; 267 PLABStats _survivor_plab_stats;
305 268
306 // Alloc region used to satisfy allocation requests by the GC for
307 // old objects.
308 OldGCAllocRegion _old_gc_alloc_region;
309
310 // PLAB sizing policy for tenured objects. 269 // PLAB sizing policy for tenured objects.
311 PLABStats _old_plab_stats; 270 PLABStats _old_plab_stats;
312
313 PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
314 PLABStats* stats = NULL;
315
316 switch (purpose) {
317 case GCAllocForSurvived:
318 stats = &_survivor_plab_stats;
319 break;
320 case GCAllocForTenured:
321 stats = &_old_plab_stats;
322 break;
323 default:
324 assert(false, "unrecognized GCAllocPurpose");
325 }
326
327 return stats;
328 }
329
330 // The last old region we allocated to during the last GC.
331 // Typically, it is not full so we should re-use it during the next GC.
332 HeapRegion* _retained_old_gc_alloc_region;
333 271
334 // It specifies whether we should attempt to expand the heap after a 272 // It specifies whether we should attempt to expand the heap after a
335 // region allocation failure. If heap expansion fails we set this to 273 // region allocation failure. If heap expansion fails we set this to
336 // false so that we don't re-attempt the heap expansion (it's likely 274 // false so that we don't re-attempt the heap expansion (it's likely
337 // that subsequent expansion attempts will also fail if one fails). 275 // that subsequent expansion attempts will also fail if one fails).
346 void release_mutator_alloc_region(); 284 void release_mutator_alloc_region();
347 285
348 // It initializes the GC alloc regions at the start of a GC. 286 // It initializes the GC alloc regions at the start of a GC.
349 void init_gc_alloc_regions(EvacuationInfo& evacuation_info); 287 void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
350 288
351 // Setup the retained old gc alloc region as the currrent old gc alloc region.
352 void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info);
353
354 // It releases the GC alloc regions at the end of a GC. 289 // It releases the GC alloc regions at the end of a GC.
355 void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info); 290 void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
356 291
357 // It does any cleanup that needs to be done on the GC alloc regions 292 // It does any cleanup that needs to be done on the GC alloc regions
358 // before a Full GC. 293 // before a Full GC.
359 void abandon_gc_alloc_regions(); 294 void abandon_gc_alloc_regions();
360 295
361 // Helper for monitoring and management support. 296 // Helper for monitoring and management support.
362 G1MonitoringSupport* _g1mm; 297 G1MonitoringSupport* _g1mm;
363
364 // Determines PLAB size for a particular allocation purpose.
365 size_t desired_plab_sz(GCAllocPurpose purpose);
366
367 // Outside of GC pauses, the number of bytes used in all regions other
368 // than the current allocation region.
369 size_t _summary_bytes_used;
370 298
371 // Records whether the region at the given index is kept live by roots or 299 // Records whether the region at the given index is kept live by roots or
372 // references from the young generation. 300 // references from the young generation.
373 class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> { 301 class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
374 protected: 302 protected:
523 // Initialize a contiguous set of free regions of length num_regions 451 // Initialize a contiguous set of free regions of length num_regions
524 // and starting at index first so that they appear as a single 452 // and starting at index first so that they appear as a single
525 // humongous region. 453 // humongous region.
526 HeapWord* humongous_obj_allocate_initialize_regions(uint first, 454 HeapWord* humongous_obj_allocate_initialize_regions(uint first,
527 uint num_regions, 455 uint num_regions,
528 size_t word_size); 456 size_t word_size,
457 AllocationContext_t context);
529 458
530 // Attempt to allocate a humongous object of the given size. Return 459 // Attempt to allocate a humongous object of the given size. Return
531 // NULL if unsuccessful. 460 // NULL if unsuccessful.
532 HeapWord* humongous_obj_allocate(size_t word_size); 461 HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context);
533 462
534 // The following two methods, allocate_new_tlab() and 463 // The following two methods, allocate_new_tlab() and
535 // mem_allocate(), are the two main entry points from the runtime 464 // mem_allocate(), are the two main entry points from the runtime
536 // into the G1's allocation routines. They have the following 465 // into the G1's allocation routines. They have the following
537 // assumptions: 466 // assumptions:
583 512
584 // Second-level mutator allocation attempt: take the Heap_lock and 513 // Second-level mutator allocation attempt: take the Heap_lock and
585 // retry the allocation attempt, potentially scheduling a GC 514 // retry the allocation attempt, potentially scheduling a GC
586 // pause. This should only be used for non-humongous allocations. 515 // pause. This should only be used for non-humongous allocations.
587 HeapWord* attempt_allocation_slow(size_t word_size, 516 HeapWord* attempt_allocation_slow(size_t word_size,
517 AllocationContext_t context,
588 unsigned int* gc_count_before_ret, 518 unsigned int* gc_count_before_ret,
589 int* gclocker_retry_count_ret); 519 int* gclocker_retry_count_ret);
590 520
591 // Takes the Heap_lock and attempts a humongous allocation. It can 521 // Takes the Heap_lock and attempts a humongous allocation. It can
592 // potentially schedule a GC pause. 522 // potentially schedule a GC pause.
597 // Allocation attempt that should be called during safepoints (e.g., 527 // Allocation attempt that should be called during safepoints (e.g.,
598 // at the end of a successful GC). expect_null_mutator_alloc_region 528 // at the end of a successful GC). expect_null_mutator_alloc_region
599 // specifies whether the mutator alloc region is expected to be NULL 529 // specifies whether the mutator alloc region is expected to be NULL
600 // or not. 530 // or not.
601 HeapWord* attempt_allocation_at_safepoint(size_t word_size, 531 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
602 bool expect_null_mutator_alloc_region); 532 AllocationContext_t context,
533 bool expect_null_mutator_alloc_region);
603 534
604 // It dirties the cards that cover the block so that so that the post 535 // It dirties the cards that cover the block so that so that the post
605 // write barrier never queues anything when updating objects on this 536 // write barrier never queues anything when updating objects on this
606 // block. It is assumed (and in fact we assert) that the block 537 // block. It is assumed (and in fact we assert) that the block
607 // belongs to a young region. 538 // belongs to a young region.
609 540
610 // Allocate blocks during garbage collection. Will ensure an 541 // Allocate blocks during garbage collection. Will ensure an
611 // allocation region, either by picking one or expanding the 542 // allocation region, either by picking one or expanding the
612 // heap, and then allocate a block of the given size. The block 543 // heap, and then allocate a block of the given size. The block
613 // may not be a humongous - it must fit into a single heap region. 544 // may not be a humongous - it must fit into a single heap region.
614 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); 545 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
546 size_t word_size,
547 AllocationContext_t context);
615 548
616 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, 549 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
617 HeapRegion* alloc_region, 550 HeapRegion* alloc_region,
618 bool par, 551 bool par,
619 size_t word_size); 552 size_t word_size);
621 // Ensure that no further allocations can happen in "r", bearing in mind 554 // Ensure that no further allocations can happen in "r", bearing in mind
622 // that parallel threads might be attempting allocations. 555 // that parallel threads might be attempting allocations.
623 void par_allocate_remaining_space(HeapRegion* r); 556 void par_allocate_remaining_space(HeapRegion* r);
624 557
625 // Allocation attempt during GC for a survivor object / PLAB. 558 // Allocation attempt during GC for a survivor object / PLAB.
626 inline HeapWord* survivor_attempt_allocation(size_t word_size); 559 inline HeapWord* survivor_attempt_allocation(size_t word_size,
560 AllocationContext_t context);
627 561
628 // Allocation attempt during GC for an old object / PLAB. 562 // Allocation attempt during GC for an old object / PLAB.
629 inline HeapWord* old_attempt_allocation(size_t word_size); 563 inline HeapWord* old_attempt_allocation(size_t word_size,
564 AllocationContext_t context);
630 565
631 // These methods are the "callbacks" from the G1AllocRegion class. 566 // These methods are the "callbacks" from the G1AllocRegion class.
632 567
633 // For mutator alloc regions. 568 // For mutator alloc regions.
634 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force); 569 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
663 void resize_if_necessary_after_full_collection(size_t word_size); 598 void resize_if_necessary_after_full_collection(size_t word_size);
664 599
665 // Callback from VM_G1CollectForAllocation operation. 600 // Callback from VM_G1CollectForAllocation operation.
666 // This function does everything necessary/possible to satisfy a 601 // This function does everything necessary/possible to satisfy a
667 // failed allocation request (including collection, expansion, etc.) 602 // failed allocation request (including collection, expansion, etc.)
668 HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded); 603 HeapWord* satisfy_failed_allocation(size_t word_size,
604 AllocationContext_t context,
605 bool* succeeded);
669 606
670 // Attempting to expand the heap sufficiently 607 // Attempting to expand the heap sufficiently
671 // to support an allocation of the given "word_size". If 608 // to support an allocation of the given "word_size". If
672 // successful, perform the allocation and return the address of the 609 // successful, perform the allocation and return the address of the
673 // allocated block, or else "NULL". 610 // allocated block, or else "NULL".
674 HeapWord* expand_and_allocate(size_t word_size); 611 HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
675 612
676 // Process any reference objects discovered during 613 // Process any reference objects discovered during
677 // an incremental evacuation pause. 614 // an incremental evacuation pause.
678 void process_discovered_references(uint no_of_gc_workers); 615 void process_discovered_references(uint no_of_gc_workers);
679 616
691 // Expand the garbage-first heap by at least the given size (in bytes!). 628 // Expand the garbage-first heap by at least the given size (in bytes!).
692 // Returns true if the heap was expanded by the requested amount; 629 // Returns true if the heap was expanded by the requested amount;
693 // false otherwise. 630 // false otherwise.
694 // (Rounds up to a HeapRegion boundary.) 631 // (Rounds up to a HeapRegion boundary.)
695 bool expand(size_t expand_bytes); 632 bool expand(size_t expand_bytes);
633
634 // Returns the PLAB statistics given a purpose.
635 PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
636 PLABStats* stats = NULL;
637
638 switch (purpose) {
639 case GCAllocForSurvived:
640 stats = &_survivor_plab_stats;
641 break;
642 case GCAllocForTenured:
643 stats = &_old_plab_stats;
644 break;
645 default:
646 assert(false, "unrecognized GCAllocPurpose");
647 }
648
649 return stats;
650 }
651
652 // Determines PLAB size for a particular allocation purpose.
653 size_t desired_plab_sz(GCAllocPurpose purpose);
696 654
697 // Do anything common to GC's. 655 // Do anything common to GC's.
698 virtual void gc_prologue(bool full); 656 virtual void gc_prologue(bool full);
699 virtual void gc_epilogue(bool full); 657 virtual void gc_epilogue(bool full);
700 658
1269 void wait_while_free_regions_coming(); 1227 void wait_while_free_regions_coming();
1270 1228
1271 // Determine whether the given region is one that we are using as an 1229 // Determine whether the given region is one that we are using as an
1272 // old GC alloc region. 1230 // old GC alloc region.
1273 bool is_old_gc_alloc_region(HeapRegion* hr) { 1231 bool is_old_gc_alloc_region(HeapRegion* hr) {
1274 return hr == _retained_old_gc_alloc_region; 1232 return _allocator->is_retained_old_region(hr);
1275 } 1233 }
1276 1234
1277 // Perform a collection of the heap; intended for use in implementing 1235 // Perform a collection of the heap; intended for use in implementing
1278 // "System.gc". This probably implies as full a collection as the 1236 // "System.gc". This probably implies as full a collection as the
1279 // "CollectedHeap" supports. 1237 // "CollectedHeap" supports.
1750 1708
1751 protected: 1709 protected:
1752 size_t _max_heap_capacity; 1710 size_t _max_heap_capacity;
1753 }; 1711 };
1754 1712
1755 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1756 private:
1757 bool _retired;
1758
1759 public:
1760 G1ParGCAllocBuffer(size_t gclab_word_size);
1761 virtual ~G1ParGCAllocBuffer() {
1762 guarantee(_retired, "Allocation buffer has not been retired");
1763 }
1764
1765 virtual void set_buf(HeapWord* buf) {
1766 ParGCAllocBuffer::set_buf(buf);
1767 _retired = false;
1768 }
1769
1770 virtual void retire(bool end_of_gc, bool retain) {
1771 if (_retired) {
1772 return;
1773 }
1774 ParGCAllocBuffer::retire(end_of_gc, retain);
1775 _retired = true;
1776 }
1777 };
1778
1779 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 1713 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP