Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 2181:d25d4ca69222
Merge.
author | Thomas Wuerthinger <wuerthinger@ssw.jku.at> |
---|---|
date | Wed, 16 Feb 2011 13:47:20 +0100 |
parents | 97ba643ea3ed |
children | c33825b68624 |
comparison
equal
deleted
inserted
replaced
2108:50b45e2d9725 | 2181:d25d4ca69222 |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP | 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP |
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP | 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP |
27 | 27 |
28 #include "gc_implementation/g1/concurrentMark.hpp" | 28 #include "gc_implementation/g1/concurrentMark.hpp" |
29 #include "gc_implementation/g1/g1RemSet.hpp" | 29 #include "gc_implementation/g1/g1RemSet.hpp" |
30 #include "gc_implementation/g1/heapRegion.hpp" | 30 #include "gc_implementation/g1/heapRegionSets.hpp" |
31 #include "gc_implementation/parNew/parGCAllocBuffer.hpp" | 31 #include "gc_implementation/parNew/parGCAllocBuffer.hpp" |
32 #include "memory/barrierSet.hpp" | 32 #include "memory/barrierSet.hpp" |
33 #include "memory/memRegion.hpp" | 33 #include "memory/memRegion.hpp" |
34 #include "memory/sharedHeap.hpp" | 34 #include "memory/sharedHeap.hpp" |
35 | 35 |
38 // may combine concurrent marking with parallel, incremental compaction of | 38 // may combine concurrent marking with parallel, incremental compaction of |
39 // heap subsets that will yield large amounts of garbage. | 39 // heap subsets that will yield large amounts of garbage. |
40 | 40 |
41 class HeapRegion; | 41 class HeapRegion; |
42 class HeapRegionSeq; | 42 class HeapRegionSeq; |
43 class HRRSCleanupTask; | |
43 class PermanentGenerationSpec; | 44 class PermanentGenerationSpec; |
44 class GenerationSpec; | 45 class GenerationSpec; |
45 class OopsInHeapRegionClosure; | 46 class OopsInHeapRegionClosure; |
46 class G1ScanHeapEvacClosure; | 47 class G1ScanHeapEvacClosure; |
47 class ObjectClosure; | 48 class ObjectClosure; |
64 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) | 65 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) |
65 | 66 |
66 enum G1GCThreadGroups { | 67 enum G1GCThreadGroups { |
67 G1CRGroup = 0, | 68 G1CRGroup = 0, |
68 G1ZFGroup = 1, | 69 G1ZFGroup = 1, |
69 G1CMGroup = 2, | 70 G1CMGroup = 2 |
70 G1CLGroup = 3 | |
71 }; | 71 }; |
72 | 72 |
73 enum GCAllocPurpose { | 73 enum GCAllocPurpose { |
74 GCAllocForTenured, | 74 GCAllocForTenured, |
75 GCAllocForSurvived, | 75 GCAllocForSurvived, |
153 friend class G1ParTask; | 153 friend class G1ParTask; |
154 friend class G1FreeGarbageRegionClosure; | 154 friend class G1FreeGarbageRegionClosure; |
155 friend class RefineCardTableEntryClosure; | 155 friend class RefineCardTableEntryClosure; |
156 friend class G1PrepareCompactClosure; | 156 friend class G1PrepareCompactClosure; |
157 friend class RegionSorter; | 157 friend class RegionSorter; |
158 friend class RegionResetter; | |
158 friend class CountRCClosure; | 159 friend class CountRCClosure; |
159 friend class EvacPopObjClosure; | 160 friend class EvacPopObjClosure; |
160 friend class G1ParCleanupCTTask; | 161 friend class G1ParCleanupCTTask; |
161 | 162 |
162 // Other related classes. | 163 // Other related classes. |
176 MemRegion _g1_committed; | 177 MemRegion _g1_committed; |
177 | 178 |
178 // The maximum part of _g1_storage that has ever been committed. | 179 // The maximum part of _g1_storage that has ever been committed. |
179 MemRegion _g1_max_committed; | 180 MemRegion _g1_max_committed; |
180 | 181 |
181 // The number of regions that are completely free. | 182 // The master free list. It will satisfy all new region allocations. |
182 size_t _free_regions; | 183 MasterFreeRegionList _free_list; |
184 | |
185 // The secondary free list which contains regions that have been | |
186 // freed up during the cleanup process. This will be appended to the | |
187 // master free list when appropriate. | |
188 SecondaryFreeRegionList _secondary_free_list; | |
189 | |
190 // It keeps track of the humongous regions. | |
191 MasterHumongousRegionSet _humongous_set; | |
183 | 192 |
184 // The number of regions we could create by expansion. | 193 // The number of regions we could create by expansion. |
185 size_t _expansion_regions; | 194 size_t _expansion_regions; |
186 | |
187 // Return the number of free regions in the heap (by direct counting.) | |
188 size_t count_free_regions(); | |
189 // Return the number of free regions on the free and unclean lists. | |
190 size_t count_free_regions_list(); | |
191 | 195 |
192 // The block offset table for the G1 heap. | 196 // The block offset table for the G1 heap. |
193 G1BlockOffsetSharedArray* _bot_shared; | 197 G1BlockOffsetSharedArray* _bot_shared; |
194 | 198 |
195 // Move all of the regions off the free lists, then rebuild those free | 199 // Move all of the regions off the free lists, then rebuild those free |
196 // lists, before and after full GC. | 200 // lists, before and after full GC. |
197 void tear_down_region_lists(); | 201 void tear_down_region_lists(); |
198 void rebuild_region_lists(); | 202 void rebuild_region_lists(); |
199 // This sets all non-empty regions to need zero-fill (which they will if | |
200 // they are empty after full collection.) | |
201 void set_used_regions_to_need_zero_fill(); | |
202 | 203 |
203 // The sequence of all heap regions in the heap. | 204 // The sequence of all heap regions in the heap. |
204 HeapRegionSeq* _hrs; | 205 HeapRegionSeq* _hrs; |
205 | 206 |
206 // The region from which normal-sized objects are currently being | 207 // The region from which normal-sized objects are currently being |
229 HeapRegion* _gc_alloc_region_list; | 230 HeapRegion* _gc_alloc_region_list; |
230 | 231 |
231 // Determines PLAB size for a particular allocation purpose. | 232 // Determines PLAB size for a particular allocation purpose. |
232 static size_t desired_plab_sz(GCAllocPurpose purpose); | 233 static size_t desired_plab_sz(GCAllocPurpose purpose); |
233 | 234 |
234 // When called by par thread, require par_alloc_during_gc_lock() to be held. | 235 // When called by par thread, requires the FreeList_lock to be held. |
235 void push_gc_alloc_region(HeapRegion* hr); | 236 void push_gc_alloc_region(HeapRegion* hr); |
236 | 237 |
237 // This should only be called single-threaded. Undeclares all GC alloc | 238 // This should only be called single-threaded. Undeclares all GC alloc |
238 // regions. | 239 // regions. |
239 void forget_alloc_region_list(); | 240 void forget_alloc_region_list(); |
292 | 293 |
293 // These are macros so that, if the assert fires, we get the correct | 294 // These are macros so that, if the assert fires, we get the correct |
294 // line number, file, etc. | 295 // line number, file, etc. |
295 | 296 |
296 #define heap_locking_asserts_err_msg(__extra_message) \ | 297 #define heap_locking_asserts_err_msg(__extra_message) \ |
297 err_msg("%s : Heap_lock %slocked, %sat a safepoint", \ | 298 err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \ |
298 (__extra_message), \ | 299 (__extra_message), \ |
299 (!Heap_lock->owned_by_self()) ? "NOT " : "", \ | 300 BOOL_TO_STR(Heap_lock->owned_by_self()), \ |
300 (!SafepointSynchronize::is_at_safepoint()) ? "NOT " : "") | 301 BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \ |
302 BOOL_TO_STR(Thread::current()->is_VM_thread())) | |
301 | 303 |
302 #define assert_heap_locked() \ | 304 #define assert_heap_locked() \ |
303 do { \ | 305 do { \ |
304 assert(Heap_lock->owned_by_self(), \ | 306 assert(Heap_lock->owned_by_self(), \ |
305 heap_locking_asserts_err_msg("should be holding the Heap_lock")); \ | 307 heap_locking_asserts_err_msg("should be holding the Heap_lock")); \ |
306 } while (0) | 308 } while (0) |
307 | 309 |
308 #define assert_heap_locked_or_at_safepoint() \ | 310 #define assert_heap_locked_or_at_safepoint(__should_be_vm_thread) \ |
309 do { \ | 311 do { \ |
310 assert(Heap_lock->owned_by_self() || \ | 312 assert(Heap_lock->owned_by_self() || \ |
311 SafepointSynchronize::is_at_safepoint(), \ | 313 (SafepointSynchronize::is_at_safepoint() && \ |
314 ((__should_be_vm_thread) == Thread::current()->is_VM_thread())), \ | |
312 heap_locking_asserts_err_msg("should be holding the Heap_lock or " \ | 315 heap_locking_asserts_err_msg("should be holding the Heap_lock or " \ |
313 "should be at a safepoint")); \ | 316 "should be at a safepoint")); \ |
314 } while (0) | 317 } while (0) |
315 | 318 |
316 #define assert_heap_locked_and_not_at_safepoint() \ | 319 #define assert_heap_locked_and_not_at_safepoint() \ |
333 !SafepointSynchronize::is_at_safepoint(), \ | 336 !SafepointSynchronize::is_at_safepoint(), \ |
334 heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \ | 337 heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \ |
335 "should not be at a safepoint")); \ | 338 "should not be at a safepoint")); \ |
336 } while (0) | 339 } while (0) |
337 | 340 |
338 #define assert_at_safepoint() \ | 341 #define assert_at_safepoint(__should_be_vm_thread) \ |
339 do { \ | 342 do { \ |
340 assert(SafepointSynchronize::is_at_safepoint(), \ | 343 assert(SafepointSynchronize::is_at_safepoint() && \ |
344 ((__should_be_vm_thread) == Thread::current()->is_VM_thread()), \ | |
341 heap_locking_asserts_err_msg("should be at a safepoint")); \ | 345 heap_locking_asserts_err_msg("should be at a safepoint")); \ |
342 } while (0) | 346 } while (0) |
343 | 347 |
344 #define assert_not_at_safepoint() \ | 348 #define assert_not_at_safepoint() \ |
345 do { \ | 349 do { \ |
360 YoungList* _young_list; | 364 YoungList* _young_list; |
361 | 365 |
362 // The current policy object for the collector. | 366 // The current policy object for the collector. |
363 G1CollectorPolicy* _g1_policy; | 367 G1CollectorPolicy* _g1_policy; |
364 | 368 |
365 // Parallel allocation lock to protect the current allocation region. | 369 // This is the second level of trying to allocate a new region. If |
366 Mutex _par_alloc_during_gc_lock; | 370 // new_region_work didn't find a region in the free_list, this call |
367 Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; } | 371 // will check whether there's anything available in the |
368 | 372 // secondary_free_list and/or wait for more regions to appear in that |
369 // If possible/desirable, allocate a new HeapRegion for normal object | 373 // list, if _free_regions_coming is set. |
370 // allocation sufficient for an allocation of the given "word_size". | 374 HeapRegion* new_region_try_secondary_free_list(size_t word_size); |
371 // If "do_expand" is true, will attempt to expand the heap if necessary | 375 |
372 // to to satisfy the request. If "zero_filled" is true, requires a | 376 // It will try to allocate a single non-humongous HeapRegion |
373 // zero-filled region. | 377 // sufficient for an allocation of the given word_size. If |
374 // (Returning NULL will trigger a GC.) | 378 // do_expand is true, it will attempt to expand the heap if |
375 virtual HeapRegion* newAllocRegion_work(size_t word_size, | 379 // necessary to satisfy the allocation request. Note that word_size |
376 bool do_expand, | 380 // is only used to make sure that we expand sufficiently but, given |
377 bool zero_filled); | 381 // that the allocation request is assumed not to be humongous, |
378 | 382 // having word_size is not strictly necessary (expanding by a single |
379 virtual HeapRegion* newAllocRegion(size_t word_size, | 383 // region will always be sufficient). But let's keep that parameter |
380 bool zero_filled = true) { | 384 // in case we need it in the future. |
381 return newAllocRegion_work(word_size, false, zero_filled); | 385 HeapRegion* new_region_work(size_t word_size, bool do_expand); |
382 } | 386 |
383 virtual HeapRegion* newAllocRegionWithExpansion(int purpose, | 387 // It will try to allocate a new region to be used for allocation by |
384 size_t word_size, | 388 // mutator threads. It will not try to expand the heap if not region |
385 bool zero_filled = true); | 389 // is available. |
390 HeapRegion* new_alloc_region(size_t word_size) { | |
391 return new_region_work(word_size, false /* do_expand */); | |
392 } | |
393 | |
394 // It will try to allocate a new region to be used for allocation by | |
395 // a GC thread. It will try to expand the heap if no region is | |
396 // available. | |
397 HeapRegion* new_gc_alloc_region(int purpose, size_t word_size); | |
398 | |
399 int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size); | |
386 | 400 |
387 // Attempt to allocate an object of the given (very large) "word_size". | 401 // Attempt to allocate an object of the given (very large) "word_size". |
388 // Returns "NULL" on failure. | 402 // Returns "NULL" on failure. |
389 virtual HeapWord* humongous_obj_allocate(size_t word_size); | 403 HeapWord* humongous_obj_allocate(size_t word_size); |
390 | 404 |
391 // The following two methods, allocate_new_tlab() and | 405 // The following two methods, allocate_new_tlab() and |
392 // mem_allocate(), are the two main entry points from the runtime | 406 // mem_allocate(), are the two main entry points from the runtime |
393 // into the G1's allocation routines. They have the following | 407 // into the G1's allocation routines. They have the following |
394 // assumptions: | 408 // assumptions: |
428 bool is_noref, | 442 bool is_noref, |
429 bool is_tlab, /* expected to be false */ | 443 bool is_tlab, /* expected to be false */ |
430 bool* gc_overhead_limit_was_exceeded); | 444 bool* gc_overhead_limit_was_exceeded); |
431 | 445 |
432 // The following methods, allocate_from_cur_allocation_region(), | 446 // The following methods, allocate_from_cur_allocation_region(), |
433 // attempt_allocation(), replace_cur_alloc_region_and_allocate(), | 447 // attempt_allocation(), attempt_allocation_locked(), |
448 // replace_cur_alloc_region_and_allocate(), | |
434 // attempt_allocation_slow(), and attempt_allocation_humongous() | 449 // attempt_allocation_slow(), and attempt_allocation_humongous() |
435 // have very awkward pre- and post-conditions with respect to | 450 // have very awkward pre- and post-conditions with respect to |
436 // locking: | 451 // locking: |
437 // | 452 // |
438 // If they are called outside a safepoint they assume the caller | 453 // If they are called outside a safepoint they assume the caller |
479 // | 494 // |
480 // They all return either the address of the block, if they | 495 // They all return either the address of the block, if they |
481 // successfully manage to allocate it, or NULL. | 496 // successfully manage to allocate it, or NULL. |
482 | 497 |
483 // It tries to satisfy an allocation request out of the current | 498 // It tries to satisfy an allocation request out of the current |
484 // allocating region, which is passed as a parameter. It assumes | 499 // alloc region, which is passed as a parameter. It assumes that the |
485 // that the caller has checked that the current allocating region is | 500 // caller has checked that the current alloc region is not NULL. |
486 // not NULL. Given that the caller has to check the current | 501 // Given that the caller has to check the current alloc region for |
487 // allocating region for at least NULL, it might as well pass it as | 502 // at least NULL, it might as well pass it as the first parameter so |
488 // the first parameter so that the method doesn't have to read it | 503 // that the method doesn't have to read it from the |
489 // from the _cur_alloc_region field again. | 504 // _cur_alloc_region field again. It is called from both |
505 // attempt_allocation() and attempt_allocation_locked() and the | |
506 // with_heap_lock parameter indicates whether the caller was holding | |
507 // the heap lock when it called it or not. | |
490 inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region, | 508 inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region, |
491 size_t word_size); | 509 size_t word_size, |
492 | 510 bool with_heap_lock); |
493 // It attempts to allocate out of the current alloc region. If that | 511 |
494 // fails, it retires the current alloc region (if there is one), | 512 // First-level of allocation slow path: it attempts to allocate out |
495 // tries to get a new one and retries the allocation. | 513 // of the current alloc region in a lock-free manner using a CAS. If |
514 // that fails it takes the Heap_lock and calls | |
515 // attempt_allocation_locked() for the second-level slow path. | |
496 inline HeapWord* attempt_allocation(size_t word_size); | 516 inline HeapWord* attempt_allocation(size_t word_size); |
517 | |
518 // Second-level of allocation slow path: while holding the Heap_lock | |
519 // it tries to allocate out of the current alloc region and, if that | |
520 // fails, tries to allocate out of a new current alloc region. | |
521 inline HeapWord* attempt_allocation_locked(size_t word_size); | |
497 | 522 |
498 // It assumes that the current alloc region has been retired and | 523 // It assumes that the current alloc region has been retired and |
499 // tries to allocate a new one. If it's successful, it performs the | 524 // tries to allocate a new one. If it's successful, it performs the |
500 // allocation out of the new current alloc region and updates | 525 // allocation out of the new current alloc region and updates |
501 // _cur_alloc_region. Normally, it would try to allocate a new | 526 // _cur_alloc_region. Normally, it would try to allocate a new |
504 HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size, | 529 HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size, |
505 bool at_safepoint, | 530 bool at_safepoint, |
506 bool do_dirtying, | 531 bool do_dirtying, |
507 bool can_expand); | 532 bool can_expand); |
508 | 533 |
509 // The slow path when we are unable to allocate a new current alloc | 534 // Third-level of allocation slow path: when we are unable to |
510 // region to satisfy an allocation request (i.e., when | 535 // allocate a new current alloc region to satisfy an allocation |
511 // attempt_allocation() fails). It will try to do an evacuation | 536 // request (i.e., when attempt_allocation_locked() fails). It will |
512 // pause, which might stall due to the GC locker, and retry the | 537 // try to do an evacuation pause, which might stall due to the GC |
513 // allocation attempt when appropriate. | 538 // locker, and retry the allocation attempt when appropriate. |
514 HeapWord* attempt_allocation_slow(size_t word_size); | 539 HeapWord* attempt_allocation_slow(size_t word_size); |
515 | 540 |
516 // The method that tries to satisfy a humongous allocation | 541 // The method that tries to satisfy a humongous allocation |
517 // request. If it cannot satisfy it it will try to do an evacuation | 542 // request. If it cannot satisfy it it will try to do an evacuation |
518 // pause to perhaps reclaim enough space to be able to satisfy the | 543 // pause to perhaps reclaim enough space to be able to satisfy the |
747 OopClosure* non_root_closure); | 772 OopClosure* non_root_closure); |
748 | 773 |
749 // Invoke "save_marks" on all heap regions. | 774 // Invoke "save_marks" on all heap regions. |
750 void save_marks(); | 775 void save_marks(); |
751 | 776 |
752 // Free a heap region. | 777 // It frees a non-humongous region by initializing its contents and |
753 void free_region(HeapRegion* hr); | 778 // adding it to the free list that's passed as a parameter (this is |
754 // A component of "free_region", exposed for 'batching'. | 779 // usually a local list which will be appended to the master free |
755 // All the params after "hr" are out params: the used bytes of the freed | 780 // list later). The used bytes of freed regions are accumulated in |
756 // region(s), the number of H regions cleared, the number of regions | 781 // pre_used. If par is true, the region's RSet will not be freed |
757 // freed, and pointers to the head and tail of a list of freed contig | 782 // up. The assumption is that this will be done later. |
758 // regions, linked throught the "next_on_unclean_list" field. | 783 void free_region(HeapRegion* hr, |
759 void free_region_work(HeapRegion* hr, | 784 size_t* pre_used, |
760 size_t& pre_used, | 785 FreeRegionList* free_list, |
761 size_t& cleared_h, | 786 bool par); |
762 size_t& freed_regions, | 787 |
763 UncleanRegionList* list, | 788 // It frees a humongous region by collapsing it into individual |
764 bool par = false); | 789 // regions and calling free_region() for each of them. The freed |
765 | 790 // regions will be added to the free list that's passed as a parameter |
791 // (this is usually a local list which will be appended to the | |
792 // master free list later). The used bytes of freed regions are | |
793 // accumulated in pre_used. If par is true, the region's RSet will | |
794 // not be freed up. The assumption is that this will be done later. | |
795 void free_humongous_region(HeapRegion* hr, | |
796 size_t* pre_used, | |
797 FreeRegionList* free_list, | |
798 HumongousRegionSet* humongous_proxy_set, | |
799 bool par); | |
766 | 800 |
767 // The concurrent marker (and the thread it runs in.) | 801 // The concurrent marker (and the thread it runs in.) |
768 ConcurrentMark* _cm; | 802 ConcurrentMark* _cm; |
769 ConcurrentMarkThread* _cmThread; | 803 ConcurrentMarkThread* _cmThread; |
770 bool _mark_in_progress; | 804 bool _mark_in_progress; |
771 | 805 |
772 // The concurrent refiner. | 806 // The concurrent refiner. |
773 ConcurrentG1Refine* _cg1r; | 807 ConcurrentG1Refine* _cg1r; |
774 | |
775 // The concurrent zero-fill thread. | |
776 ConcurrentZFThread* _czft; | |
777 | 808 |
778 // The parallel task queues | 809 // The parallel task queues |
779 RefToScanQueueSet *_task_queues; | 810 RefToScanQueueSet *_task_queues; |
780 | 811 |
781 // True iff a evacuation has failed in the current collection. | 812 // True iff a evacuation has failed in the current collection. |
824 // Do any necessary cleanup for evacuation-failure handling data | 855 // Do any necessary cleanup for evacuation-failure handling data |
825 // structures. | 856 // structures. |
826 void finalize_for_evac_failure(); | 857 void finalize_for_evac_failure(); |
827 | 858 |
828 // An attempt to evacuate "obj" has failed; take necessary steps. | 859 // An attempt to evacuate "obj" has failed; take necessary steps. |
829 void handle_evacuation_failure(oop obj); | |
830 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); | 860 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); |
831 void handle_evacuation_failure_common(oop obj, markOop m); | 861 void handle_evacuation_failure_common(oop obj, markOop m); |
832 | 862 |
833 | 863 |
834 // Ensure that the relevant gc_alloc regions are set. | 864 // Ensure that the relevant gc_alloc regions are set. |
865 G1H_PS_NumElements | 895 G1H_PS_NumElements |
866 }; | 896 }; |
867 | 897 |
868 SubTasksDone* _process_strong_tasks; | 898 SubTasksDone* _process_strong_tasks; |
869 | 899 |
870 // List of regions which require zero filling. | 900 volatile bool _free_regions_coming; |
871 UncleanRegionList _unclean_region_list; | |
872 bool _unclean_regions_coming; | |
873 | 901 |
874 public: | 902 public: |
875 | 903 |
876 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; } | 904 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; } |
877 | 905 |
990 | 1018 |
991 // The number of regions that are completely free. | 1019 // The number of regions that are completely free. |
992 size_t max_regions(); | 1020 size_t max_regions(); |
993 | 1021 |
994 // The number of regions that are completely free. | 1022 // The number of regions that are completely free. |
995 size_t free_regions(); | 1023 size_t free_regions() { |
1024 return _free_list.length(); | |
1025 } | |
996 | 1026 |
997 // The number of regions that are not completely free. | 1027 // The number of regions that are not completely free. |
998 size_t used_regions() { return n_regions() - free_regions(); } | 1028 size_t used_regions() { return n_regions() - free_regions(); } |
999 | 1029 |
1000 // True iff the ZF thread should run. | |
1001 bool should_zf(); | |
1002 | |
1003 // The number of regions available for "regular" expansion. | 1030 // The number of regions available for "regular" expansion. |
1004 size_t expansion_regions() { return _expansion_regions; } | 1031 size_t expansion_regions() { return _expansion_regions; } |
1005 | 1032 |
1006 #ifndef PRODUCT | 1033 // verify_region_sets() performs verification over the region |
1007 bool regions_accounted_for(); | 1034 // lists. It will be compiled in the product code to be used when |
1008 bool print_region_accounting_info(); | 1035 // necessary (i.e., during heap verification). |
1009 void print_region_counts(); | 1036 void verify_region_sets(); |
1010 #endif | 1037 |
1011 | 1038 // verify_region_sets_optional() is planted in the code for |
1012 HeapRegion* alloc_region_from_unclean_list(bool zero_filled); | 1039 // list verification in non-product builds (and it can be enabled in |
1013 HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled); | 1040 // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1). |
1014 | 1041 #if HEAP_REGION_SET_FORCE_VERIFY |
1015 void put_region_on_unclean_list(HeapRegion* r); | 1042 void verify_region_sets_optional() { |
1016 void put_region_on_unclean_list_locked(HeapRegion* r); | 1043 verify_region_sets(); |
1017 | 1044 } |
1018 void prepend_region_list_on_unclean_list(UncleanRegionList* list); | 1045 #else // HEAP_REGION_SET_FORCE_VERIFY |
1019 void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list); | 1046 void verify_region_sets_optional() { } |
1020 | 1047 #endif // HEAP_REGION_SET_FORCE_VERIFY |
1021 void set_unclean_regions_coming(bool b); | 1048 |
1022 void set_unclean_regions_coming_locked(bool b); | 1049 #ifdef ASSERT |
1023 // Wait for cleanup to be complete. | 1050 bool is_on_free_list(HeapRegion* hr) { |
1024 void wait_for_cleanup_complete(); | 1051 return hr->containing_set() == &_free_list; |
1025 // Like above, but assumes that the calling thread owns the Heap_lock. | 1052 } |
1026 void wait_for_cleanup_complete_locked(); | 1053 |
1027 | 1054 bool is_on_humongous_set(HeapRegion* hr) { |
1028 // Return the head of the unclean list. | 1055 return hr->containing_set() == &_humongous_set; |
1029 HeapRegion* peek_unclean_region_list_locked(); | 1056 } |
1030 // Remove and return the head of the unclean list. | 1057 #endif // ASSERT |
1031 HeapRegion* pop_unclean_region_list_locked(); | 1058 |
1032 | 1059 // Wrapper for the region list operations that can be called from |
1033 // List of regions which are zero filled and ready for allocation. | 1060 // methods outside this class. |
1034 HeapRegion* _free_region_list; | 1061 |
1035 // Number of elements on the free list. | 1062 void secondary_free_list_add_as_tail(FreeRegionList* list) { |
1036 size_t _free_region_list_size; | 1063 _secondary_free_list.add_as_tail(list); |
1037 | 1064 } |
1038 // If the head of the unclean list is ZeroFilled, move it to the free | 1065 |
1039 // list. | 1066 void append_secondary_free_list() { |
1040 bool move_cleaned_region_to_free_list_locked(); | 1067 _free_list.add_as_tail(&_secondary_free_list); |
1041 bool move_cleaned_region_to_free_list(); | 1068 } |
1042 | 1069 |
1043 void put_free_region_on_list_locked(HeapRegion* r); | 1070 void append_secondary_free_list_if_not_empty() { |
1044 void put_free_region_on_list(HeapRegion* r); | 1071 if (!_secondary_free_list.is_empty()) { |
1045 | 1072 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
1046 // Remove and return the head element of the free list. | 1073 append_secondary_free_list(); |
1047 HeapRegion* pop_free_region_list_locked(); | 1074 } |
1048 | 1075 } |
1049 // If "zero_filled" is true, we first try the free list, then we try the | 1076 |
1050 // unclean list, zero-filling the result. If "zero_filled" is false, we | 1077 void set_free_regions_coming(); |
1051 // first try the unclean list, then the zero-filled list. | 1078 void reset_free_regions_coming(); |
1052 HeapRegion* alloc_free_region_from_lists(bool zero_filled); | 1079 bool free_regions_coming() { return _free_regions_coming; } |
1053 | 1080 void wait_while_free_regions_coming(); |
1054 // Verify the integrity of the region lists. | |
1055 void remove_allocated_regions_from_lists(); | |
1056 bool verify_region_lists(); | |
1057 bool verify_region_lists_locked(); | |
1058 size_t unclean_region_list_length(); | |
1059 size_t free_region_list_length(); | |
1060 | 1081 |
1061 // Perform a collection of the heap; intended for use in implementing | 1082 // Perform a collection of the heap; intended for use in implementing |
1062 // "System.gc". This probably implies as full a collection as the | 1083 // "System.gc". This probably implies as full a collection as the |
1063 // "CollectedHeap" supports. | 1084 // "CollectedHeap" supports. |
1064 virtual void collect(GCCause::Cause cause); | 1085 virtual void collect(GCCause::Cause cause); |
1073 virtual void collect_as_vm_thread(GCCause::Cause cause); | 1094 virtual void collect_as_vm_thread(GCCause::Cause cause); |
1074 | 1095 |
1075 // True iff a evacuation has failed in the most-recent collection. | 1096 // True iff a evacuation has failed in the most-recent collection. |
1076 bool evacuation_failed() { return _evacuation_failed; } | 1097 bool evacuation_failed() { return _evacuation_failed; } |
1077 | 1098 |
1078 // Free a region if it is totally full of garbage. Returns the number of | 1099 // It will free a region if it has allocated objects in it that are |
1079 // bytes freed (0 ==> didn't free it). | 1100 // all dead. It calls either free_region() or |
1080 size_t free_region_if_totally_empty(HeapRegion *hr); | 1101 // free_humongous_region() depending on the type of the region that |
1081 void free_region_if_totally_empty_work(HeapRegion *hr, | 1102 // is passed to it. |
1082 size_t& pre_used, | 1103 void free_region_if_empty(HeapRegion* hr, |
1083 size_t& cleared_h_regions, | 1104 size_t* pre_used, |
1084 size_t& freed_regions, | 1105 FreeRegionList* free_list, |
1085 UncleanRegionList* list, | 1106 HumongousRegionSet* humongous_proxy_set, |
1086 bool par = false); | 1107 HRRSCleanupTask* hrrs_cleanup_task, |
1087 | 1108 bool par); |
1088 // If we've done free region work that yields the given changes, update | 1109 |
1089 // the relevant global variables. | 1110 // It appends the free list to the master free list and updates the |
1090 void finish_free_region_work(size_t pre_used, | 1111 // master humongous list according to the contents of the proxy |
1091 size_t cleared_h_regions, | 1112 // list. It also adjusts the total used bytes according to pre_used |
1092 size_t freed_regions, | 1113 // (if par is true, it will do so by taking the ParGCRareEvent_lock). |
1093 UncleanRegionList* list); | 1114 void update_sets_after_freeing_regions(size_t pre_used, |
1094 | 1115 FreeRegionList* free_list, |
1116 HumongousRegionSet* humongous_proxy_set, | |
1117 bool par); | |
1095 | 1118 |
1096 // Returns "TRUE" iff "p" points into the allocated area of the heap. | 1119 // Returns "TRUE" iff "p" points into the allocated area of the heap. |
1097 virtual bool is_in(const void* p) const; | 1120 virtual bool is_in(const void* p) const; |
1098 | 1121 |
1099 // Return "TRUE" iff the given object address is within the collection | 1122 // Return "TRUE" iff the given object address is within the collection |
1302 // At least until perm gen collection is also G1-ified, at | 1325 // At least until perm gen collection is also G1-ified, at |
1303 // which point this should return false. | 1326 // which point this should return false. |
1304 return true; | 1327 return true; |
1305 } | 1328 } |
1306 | 1329 |
1307 virtual bool allocs_are_zero_filled(); | |
1308 | |
1309 // The boundary between a "large" and "small" array of primitives, in | 1330 // The boundary between a "large" and "small" array of primitives, in |
1310 // words. | 1331 // words. |
1311 virtual size_t large_typearray_limit(); | 1332 virtual size_t large_typearray_limit(); |
1312 | 1333 |
1313 // Returns "true" iff the given word_size is "very large". | 1334 // Returns "true" iff the given word_size is "very large". |
1534 | 1555 |
1535 // </NEW PREDICTION> | 1556 // </NEW PREDICTION> |
1536 | 1557 |
1537 protected: | 1558 protected: |
1538 size_t _max_heap_capacity; | 1559 size_t _max_heap_capacity; |
1539 | |
1540 public: | |
1541 // Temporary: call to mark things unimplemented for the G1 heap (e.g., | |
1542 // MemoryService). In productization, we can make this assert false | |
1543 // to catch such places (as well as searching for calls to this...) | |
1544 static void g1_unimplemented(); | |
1545 | |
1546 }; | 1560 }; |
1547 | 1561 |
1548 #define use_local_bitmaps 1 | 1562 #define use_local_bitmaps 1 |
1549 #define verify_local_bitmaps 0 | 1563 #define verify_local_bitmaps 0 |
1550 #define oop_buffer_length 256 | 1564 #define oop_buffer_length 256 |