comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 3830:f44782f04dd4

7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally Summary: Refactor the allocation code during GC to use the G1AllocRegion abstraction. Use separate subclasses of G1AllocRegion for survivor and old regions. Avoid BOT updates and dirty survivor cards incrementally for the former. Reviewed-by: brutisso, johnc, ysr
author tonyp
date Fri, 12 Aug 2011 11:31:06 -0400
parents 14a2fd14c0db
children ff53346271fe
comparison
equal deleted inserted replaced
3829:87e40b34bc2b 3830:f44782f04dd4
153 public: 153 public:
154 MutatorAllocRegion() 154 MutatorAllocRegion()
155 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { } 155 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
156 }; 156 };
157 157
158 class SurvivorGCAllocRegion : public G1AllocRegion {
159 protected:
160 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
161 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
162 public:
163 SurvivorGCAllocRegion()
164 : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
165 };
166
167 class OldGCAllocRegion : public G1AllocRegion {
168 protected:
169 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
170 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
171 public:
172 OldGCAllocRegion()
173 : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
174 };
175
158 class RefineCardTableEntryClosure; 176 class RefineCardTableEntryClosure;
159 class G1CollectedHeap : public SharedHeap { 177 class G1CollectedHeap : public SharedHeap {
160 friend class VM_G1CollectForAllocation; 178 friend class VM_G1CollectForAllocation;
161 friend class VM_GenCollectForPermanentAllocation; 179 friend class VM_GenCollectForPermanentAllocation;
162 friend class VM_G1CollectFull; 180 friend class VM_G1CollectFull;
163 friend class VM_G1IncCollectionPause; 181 friend class VM_G1IncCollectionPause;
164 friend class VMStructs; 182 friend class VMStructs;
165 friend class MutatorAllocRegion; 183 friend class MutatorAllocRegion;
184 friend class SurvivorGCAllocRegion;
185 friend class OldGCAllocRegion;
166 186
167 // Closures used in implementation. 187 // Closures used in implementation.
168 friend class G1ParCopyHelper; 188 friend class G1ParCopyHelper;
169 friend class G1IsAliveClosure; 189 friend class G1IsAliveClosure;
170 friend class G1EvacuateFollowersClosure; 190 friend class G1EvacuateFollowersClosure;
223 HeapRegionSeq _hrs; 243 HeapRegionSeq _hrs;
224 244
225 // Alloc region used to satisfy mutator allocation requests. 245 // Alloc region used to satisfy mutator allocation requests.
226 MutatorAllocRegion _mutator_alloc_region; 246 MutatorAllocRegion _mutator_alloc_region;
227 247
248 // Alloc region used to satisfy allocation requests by the GC for
249 // survivor objects.
250 SurvivorGCAllocRegion _survivor_gc_alloc_region;
251
252 // Alloc region used to satisfy allocation requests by the GC for
253 // old objects.
254 OldGCAllocRegion _old_gc_alloc_region;
255
256 // The last old region we allocated to during the last GC.
257 // Typically, it is not full so we should re-use it during the next GC.
258 HeapRegion* _retained_old_gc_alloc_region;
259
228 // It resets the mutator alloc region before new allocations can take place. 260 // It resets the mutator alloc region before new allocations can take place.
229 void init_mutator_alloc_region(); 261 void init_mutator_alloc_region();
230 262
231 // It releases the mutator alloc region. 263 // It releases the mutator alloc region.
232 void release_mutator_alloc_region(); 264 void release_mutator_alloc_region();
233 265
266 // It initializes the GC alloc regions at the start of a GC.
267 void init_gc_alloc_regions();
268
269 // It releases the GC alloc regions at the end of a GC.
270 void release_gc_alloc_regions();
271
272 // It does any cleanup that needs to be done on the GC alloc regions
273 // before a Full GC.
234 void abandon_gc_alloc_regions(); 274 void abandon_gc_alloc_regions();
235
236 // The to-space memory regions into which objects are being copied during
237 // a GC.
238 HeapRegion* _gc_alloc_regions[GCAllocPurposeCount];
239 size_t _gc_alloc_region_counts[GCAllocPurposeCount];
240 // These are the regions, one per GCAllocPurpose, that are half-full
241 // at the end of a collection and that we want to reuse during the
242 // next collection.
243 HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount];
244 // This specifies whether we will keep the last half-full region at
245 // the end of a collection so that it can be reused during the next
246 // collection (this is specified per GCAllocPurpose)
247 bool _retain_gc_alloc_region[GCAllocPurposeCount];
248
249 // A list of the regions that have been set to be alloc regions in the
250 // current collection.
251 HeapRegion* _gc_alloc_region_list;
252 275
253 // Helper for monitoring and management support. 276 // Helper for monitoring and management support.
254 G1MonitoringSupport* _g1mm; 277 G1MonitoringSupport* _g1mm;
255 278
256 // Determines PLAB size for a particular allocation purpose. 279 // Determines PLAB size for a particular allocation purpose.
257 static size_t desired_plab_sz(GCAllocPurpose purpose); 280 static size_t desired_plab_sz(GCAllocPurpose purpose);
258
259 // When called by par thread, requires the FreeList_lock to be held.
260 void push_gc_alloc_region(HeapRegion* hr);
261
262 // This should only be called single-threaded. Undeclares all GC alloc
263 // regions.
264 void forget_alloc_region_list();
265
266 // Should be used to set an alloc region, because there's other
267 // associated bookkeeping.
268 void set_gc_alloc_region(int purpose, HeapRegion* r);
269
270 // Check well-formedness of alloc region list.
271 bool check_gc_alloc_regions();
272 281
273 // Outside of GC pauses, the number of bytes used in all regions other 282 // Outside of GC pauses, the number of bytes used in all regions other
274 // than the current allocation region. 283 // than the current allocation region.
275 size_t _summary_bytes_used; 284 size_t _summary_bytes_used;
276 285
385 heap_locking_asserts_err_msg("should not be at a safepoint")); \ 394 heap_locking_asserts_err_msg("should not be at a safepoint")); \
386 } while (0) 395 } while (0)
387 396
388 protected: 397 protected:
389 398
390 // Returns "true" iff none of the gc alloc regions have any allocations
391 // since the last call to "save_marks".
392 bool all_alloc_regions_no_allocs_since_save_marks();
393 // Perform finalization stuff on all allocation regions.
394 void retire_all_alloc_regions();
395
396 // The young region list. 399 // The young region list.
397 YoungList* _young_list; 400 YoungList* _young_list;
398 401
399 // The current policy object for the collector. 402 // The current policy object for the collector.
400 G1CollectorPolicy* _g1_policy; 403 G1CollectorPolicy* _g1_policy;
409 // Try to allocate a single non-humongous HeapRegion sufficient for 412 // Try to allocate a single non-humongous HeapRegion sufficient for
410 // an allocation of the given word_size. If do_expand is true, 413 // an allocation of the given word_size. If do_expand is true,
411 // attempt to expand the heap if necessary to satisfy the allocation 414 // attempt to expand the heap if necessary to satisfy the allocation
412 // request. 415 // request.
413 HeapRegion* new_region(size_t word_size, bool do_expand); 416 HeapRegion* new_region(size_t word_size, bool do_expand);
414
415 // Try to allocate a new region to be used for allocation by
416 // a GC thread. It will try to expand the heap if no region is
417 // available.
418 HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
419 417
420 // Attempt to satisfy a humongous allocation request of the given 418 // Attempt to satisfy a humongous allocation request of the given
421 // size by finding a contiguous set of free regions of num_regions 419 // size by finding a contiguous set of free regions of num_regions
422 // length and remove them from the master free list. Return the 420 // length and remove them from the master free list. Return the
423 // index of the first region or G1_NULL_HRS_INDEX if the search 421 // index of the first region or G1_NULL_HRS_INDEX if the search
522 520
523 // Ensure that no further allocations can happen in "r", bearing in mind 521 // Ensure that no further allocations can happen in "r", bearing in mind
524 // that parallel threads might be attempting allocations. 522 // that parallel threads might be attempting allocations.
525 void par_allocate_remaining_space(HeapRegion* r); 523 void par_allocate_remaining_space(HeapRegion* r);
526 524
527 // Retires an allocation region when it is full or at the end of a 525 // Allocation attempt during GC for a survivor object / PLAB.
528 // GC pause. 526 inline HeapWord* survivor_attempt_allocation(size_t word_size);
529 void retire_alloc_region(HeapRegion* alloc_region, bool par); 527
530 528 // Allocation attempt during GC for an old object / PLAB.
531 // These two methods are the "callbacks" from the G1AllocRegion class. 529 inline HeapWord* old_attempt_allocation(size_t word_size);
532 530
531 // These methods are the "callbacks" from the G1AllocRegion class.
532
533 // For mutator alloc regions.
533 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force); 534 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
534 void retire_mutator_alloc_region(HeapRegion* alloc_region, 535 void retire_mutator_alloc_region(HeapRegion* alloc_region,
535 size_t allocated_bytes); 536 size_t allocated_bytes);
537
538 // For GC alloc regions.
539 HeapRegion* new_gc_alloc_region(size_t word_size, size_t count,
540 GCAllocPurpose ap);
541 void retire_gc_alloc_region(HeapRegion* alloc_region,
542 size_t allocated_bytes, GCAllocPurpose ap);
536 543
537 // - if explicit_gc is true, the GC is for a System.gc() or a heap 544 // - if explicit_gc is true, the GC is for a System.gc() or a heap
538 // inspection request and should collect the entire heap 545 // inspection request and should collect the entire heap
539 // - if clear_all_soft_refs is true, all soft references should be 546 // - if clear_all_soft_refs is true, all soft references should be
540 // cleared during the GC 547 // cleared during the GC
725 // JNI weak roots, the code cache, system dictionary, symbol table, 732 // JNI weak roots, the code cache, system dictionary, symbol table,
726 // string table, and referents of reachable weak refs. 733 // string table, and referents of reachable weak refs.
727 void g1_process_weak_roots(OopClosure* root_closure, 734 void g1_process_weak_roots(OopClosure* root_closure,
728 OopClosure* non_root_closure); 735 OopClosure* non_root_closure);
729 736
730 // Invoke "save_marks" on all heap regions.
731 void save_marks();
732
733 // Frees a non-humongous region by initializing its contents and 737 // Frees a non-humongous region by initializing its contents and
734 // adding it to the free list that's passed as a parameter (this is 738 // adding it to the free list that's passed as a parameter (this is
735 // usually a local list which will be appended to the master free 739 // usually a local list which will be appended to the master free
736 // list later). The used bytes of freed regions are accumulated in 740 // list later). The used bytes of freed regions are accumulated in
737 // pre_used. If par is true, the region's RSet will not be freed 741 // pre_used. If par is true, the region's RSet will not be freed
819 823
820 // An attempt to evacuate "obj" has failed; take necessary steps. 824 // An attempt to evacuate "obj" has failed; take necessary steps.
821 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); 825 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
822 void handle_evacuation_failure_common(oop obj, markOop m); 826 void handle_evacuation_failure_common(oop obj, markOop m);
823 827
824 // Ensure that the relevant gc_alloc regions are set.
825 void get_gc_alloc_regions();
826 // We're done with GC alloc regions. We are going to tear down the
827 // gc alloc list and remove the gc alloc tag from all the regions on
828 // that list. However, we will also retain the last (i.e., the one
829 // that is half-full) GC alloc region, per GCAllocPurpose, for
830 // possible reuse during the next collection, provided
831 // _retain_gc_alloc_region[] indicates that it should be the
832 // case. Said regions are kept in the _retained_gc_alloc_regions[]
833 // array. If the parameter totally is set, we will not retain any
834 // regions, irrespective of what _retain_gc_alloc_region[]
835 // indicates.
836 void release_gc_alloc_regions(bool totally);
837 #ifndef PRODUCT
838 // Useful for debugging.
839 void print_gc_alloc_regions();
840 #endif // !PRODUCT
841
842 // Instance of the concurrent mark is_alive closure for embedding 828 // Instance of the concurrent mark is_alive closure for embedding
843 // into the reference processor as the is_alive_non_header. This 829 // into the reference processor as the is_alive_non_header. This
844 // prevents unnecessary additions to the discovered lists during 830 // prevents unnecessary additions to the discovered lists during
845 // concurrent discovery. 831 // concurrent discovery.
846 G1CMIsAliveClosure _is_alive_closure; 832 G1CMIsAliveClosure _is_alive_closure;
945 virtual size_t used() const; 931 virtual size_t used() const;
946 // This should be called when we're not holding the heap lock. The 932 // This should be called when we're not holding the heap lock. The
947 // result might be a bit inaccurate. 933 // result might be a bit inaccurate.
948 size_t used_unlocked() const; 934 size_t used_unlocked() const;
949 size_t recalculate_used() const; 935 size_t recalculate_used() const;
950 #ifndef PRODUCT
951 size_t recalculate_used_regions() const;
952 #endif // PRODUCT
953 936
954 // These virtual functions do the actual allocation. 937 // These virtual functions do the actual allocation.
955 // Some heaps may offer a contiguous region for shared non-blocking 938 // Some heaps may offer a contiguous region for shared non-blocking
956 // allocation, via inlined code (by exporting the address of the top and 939 // allocation, via inlined code (by exporting the address of the top and
957 // end fields defining the extent of the contiguous allocation region.) 940 // end fields defining the extent of the contiguous allocation region.)
1106 MemRegion g1_committed() { 1089 MemRegion g1_committed() {
1107 return _g1_committed; 1090 return _g1_committed;
1108 } 1091 }
1109 1092
1110 virtual bool is_in_closed_subset(const void* p) const; 1093 virtual bool is_in_closed_subset(const void* p) const;
1111
1112 // Dirty card table entries covering a list of young regions.
1113 void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list);
1114 1094
1115 // This resets the card table to all zeros. It is used after 1095 // This resets the card table to all zeros. It is used after
1116 // a collection pause which used the card table to claim cards. 1096 // a collection pause which used the card table to claim cards.
1117 void cleanUpCardTable(); 1097 void cleanUpCardTable();
1118 1098