comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 23286:dd9cc155639c

Merge with jdk8u66-b17
author Gilles Duboscq <gilles.m.duboscq@oracle.com>
date Thu, 07 Jan 2016 17:28:46 +0100
parents d3cec14f33f3 ea47136e6ea4
children
comparison
equal deleted inserted replaced
22786:ac649db7fec4 23286:dd9cc155639c
1 /* 1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
30 #include "gc_implementation/g1/concurrentMark.hpp" 30 #include "gc_implementation/g1/concurrentMark.hpp"
31 #include "gc_implementation/g1/evacuationInfo.hpp" 31 #include "gc_implementation/g1/evacuationInfo.hpp"
32 #include "gc_implementation/g1/g1AllocRegion.hpp" 32 #include "gc_implementation/g1/g1AllocRegion.hpp"
33 #include "gc_implementation/g1/g1BiasedArray.hpp" 33 #include "gc_implementation/g1/g1BiasedArray.hpp"
34 #include "gc_implementation/g1/g1HRPrinter.hpp" 34 #include "gc_implementation/g1/g1HRPrinter.hpp"
35 #include "gc_implementation/g1/g1InCSetState.hpp"
35 #include "gc_implementation/g1/g1MonitoringSupport.hpp" 36 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
36 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 37 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
37 #include "gc_implementation/g1/g1YCTypes.hpp" 38 #include "gc_implementation/g1/g1YCTypes.hpp"
38 #include "gc_implementation/g1/heapRegionManager.hpp" 39 #include "gc_implementation/g1/heapRegionManager.hpp"
39 #include "gc_implementation/g1/heapRegionSet.hpp" 40 #include "gc_implementation/g1/heapRegionSet.hpp"
210 211
211 friend class G1FreeHumongousRegionClosure; 212 friend class G1FreeHumongousRegionClosure;
212 // Other related classes. 213 // Other related classes.
213 friend class G1MarkSweep; 214 friend class G1MarkSweep;
214 215
216 // Testing classes.
217 friend class G1CheckCSetFastTableClosure;
218
215 private: 219 private:
216 // The one and only G1CollectedHeap, so static functions can find it. 220 // The one and only G1CollectedHeap, so static functions can find it.
217 static G1CollectedHeap* _g1h; 221 static G1CollectedHeap* _g1h;
218 222
219 static size_t _humongous_object_threshold_in_words; 223 static size_t _humongous_object_threshold_in_words;
227 HeapRegionSet _old_set; 231 HeapRegionSet _old_set;
228 232
229 // It keeps track of the humongous regions. 233 // It keeps track of the humongous regions.
230 HeapRegionSet _humongous_set; 234 HeapRegionSet _humongous_set;
231 235
232 void clear_humongous_is_live_table();
233 void eagerly_reclaim_humongous_regions(); 236 void eagerly_reclaim_humongous_regions();
234 237
235 // The number of regions we could create by expansion. 238 // The number of regions we could create by expansion.
236 uint _expansion_regions; 239 uint _expansion_regions;
237 240
297 void abandon_gc_alloc_regions(); 300 void abandon_gc_alloc_regions();
298 301
299 // Helper for monitoring and management support. 302 // Helper for monitoring and management support.
300 G1MonitoringSupport* _g1mm; 303 G1MonitoringSupport* _g1mm;
301 304
302 // Records whether the region at the given index is kept live by roots or 305 // Records whether the region at the given index is (still) a
303 // references from the young generation. 306 // candidate for eager reclaim. Only valid for humongous start
304 class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> { 307 // regions; other regions have unspecified values. Humongous start
308 // regions are initialized at start of collection pause, with
309 // candidates removed from the set as they are found reachable from
310 // roots or the young generation.
311 class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
305 protected: 312 protected:
306 bool default_value() const { return false; } 313 bool default_value() const { return false; }
307 public: 314 public:
308 void clear() { G1BiasedMappedArray<bool>::clear(); } 315 void clear() { G1BiasedMappedArray<bool>::clear(); }
309 void set_live(uint region) { 316 void set_candidate(uint region, bool value) {
310 set_by_index(region, true); 317 set_by_index(region, value);
311 } 318 }
312 bool is_live(uint region) { 319 bool is_candidate(uint region) {
313 return get_by_index(region); 320 return get_by_index(region);
314 } 321 }
315 }; 322 };
316 323
317 HumongousIsLiveBiasedMappedArray _humongous_is_live; 324 HumongousReclaimCandidates _humongous_reclaim_candidates;
318 // Stores whether during humongous object registration we found candidate regions. 325 // Stores whether during humongous object registration we found candidate regions.
319 // If not, we can skip a few steps. 326 // If not, we can skip a few steps.
320 bool _has_humongous_reclaim_candidates; 327 bool _has_humongous_reclaim_candidates;
321 328
322 volatile unsigned _gc_time_stamp; 329 volatile unsigned _gc_time_stamp;
337 // (c) cause == _g1_humongous_allocation 344 // (c) cause == _g1_humongous_allocation
338 bool should_do_concurrent_full_gc(GCCause::Cause cause); 345 bool should_do_concurrent_full_gc(GCCause::Cause cause);
339 346
340 // Keeps track of how many "old marking cycles" (i.e., Full GCs or 347 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
341 // concurrent cycles) we have started. 348 // concurrent cycles) we have started.
342 volatile unsigned int _old_marking_cycles_started; 349 volatile uint _old_marking_cycles_started;
343 350
344 // Keeps track of how many "old marking cycles" (i.e., Full GCs or 351 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
345 // concurrent cycles) we have completed. 352 // concurrent cycles) we have completed.
346 volatile unsigned int _old_marking_cycles_completed; 353 volatile uint _old_marking_cycles_completed;
347 354
348 bool _concurrent_cycle_started; 355 bool _concurrent_cycle_started;
356 bool _heap_summary_sent;
349 357
350 // This is a non-product method that is helpful for testing. It is 358 // This is a non-product method that is helpful for testing. It is
351 // called at the end of a GC and artificially expands the heap by 359 // called at the end of a GC and artificially expands the heap by
352 // allocating a number of dead regions. This way we can induce very 360 // allocating a number of dead regions. This way we can induce very
353 // frequent marking cycles and stress the cleanup / concurrent 361 // frequent marking cycles and stress the cleanup / concurrent
359 void clear_rsets_post_compaction(); 367 void clear_rsets_post_compaction();
360 368
361 // If the HR printer is active, dump the state of the regions in the 369 // If the HR printer is active, dump the state of the regions in the
362 // heap after a compaction. 370 // heap after a compaction.
363 void print_hrm_post_compaction(); 371 void print_hrm_post_compaction();
372
373 // Create a memory mapper for auxiliary data structures of the given size and
374 // translation factor.
375 static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
376 size_t size,
377 size_t translation_factor);
364 378
365 double verify(bool guard, const char* msg); 379 double verify(bool guard, const char* msg);
366 void verify_before_gc(); 380 void verify_before_gc();
367 void verify_after_gc(); 381 void verify_after_gc();
368 382
508 522
509 // First-level mutator allocation attempt: try to allocate out of 523 // First-level mutator allocation attempt: try to allocate out of
510 // the mutator alloc region without taking the Heap_lock. This 524 // the mutator alloc region without taking the Heap_lock. This
511 // should only be used for non-humongous allocations. 525 // should only be used for non-humongous allocations.
512 inline HeapWord* attempt_allocation(size_t word_size, 526 inline HeapWord* attempt_allocation(size_t word_size,
513 unsigned int* gc_count_before_ret, 527 uint* gc_count_before_ret,
514 int* gclocker_retry_count_ret); 528 uint* gclocker_retry_count_ret);
515 529
516 // Second-level mutator allocation attempt: take the Heap_lock and 530 // Second-level mutator allocation attempt: take the Heap_lock and
517 // retry the allocation attempt, potentially scheduling a GC 531 // retry the allocation attempt, potentially scheduling a GC
518 // pause. This should only be used for non-humongous allocations. 532 // pause. This should only be used for non-humongous allocations.
519 HeapWord* attempt_allocation_slow(size_t word_size, 533 HeapWord* attempt_allocation_slow(size_t word_size,
520 AllocationContext_t context, 534 AllocationContext_t context,
521 unsigned int* gc_count_before_ret, 535 uint* gc_count_before_ret,
522 int* gclocker_retry_count_ret); 536 uint* gclocker_retry_count_ret);
523 537
524 // Takes the Heap_lock and attempts a humongous allocation. It can 538 // Takes the Heap_lock and attempts a humongous allocation. It can
525 // potentially schedule a GC pause. 539 // potentially schedule a GC pause.
526 HeapWord* attempt_allocation_humongous(size_t word_size, 540 HeapWord* attempt_allocation_humongous(size_t word_size,
527 unsigned int* gc_count_before_ret, 541 uint* gc_count_before_ret,
528 int* gclocker_retry_count_ret); 542 uint* gclocker_retry_count_ret);
529 543
530 // Allocation attempt that should be called during safepoints (e.g., 544 // Allocation attempt that should be called during safepoints (e.g.,
531 // at the end of a successful GC). expect_null_mutator_alloc_region 545 // at the end of a successful GC). expect_null_mutator_alloc_region
532 // specifies whether the mutator alloc region is expected to be NULL 546 // specifies whether the mutator alloc region is expected to be NULL
533 // or not. 547 // or not.
543 557
544 // Allocate blocks during garbage collection. Will ensure an 558 // Allocate blocks during garbage collection. Will ensure an
545 // allocation region, either by picking one or expanding the 559 // allocation region, either by picking one or expanding the
546 // heap, and then allocate a block of the given size. The block 560 // heap, and then allocate a block of the given size. The block
547 // may not be a humongous - it must fit into a single heap region. 561 // may not be a humongous - it must fit into a single heap region.
548 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, 562 inline HeapWord* par_allocate_during_gc(InCSetState dest,
549 size_t word_size, 563 size_t word_size,
550 AllocationContext_t context); 564 AllocationContext_t context);
551
552 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
553 HeapRegion* alloc_region,
554 bool par,
555 size_t word_size);
556
557 // Ensure that no further allocations can happen in "r", bearing in mind 565 // Ensure that no further allocations can happen in "r", bearing in mind
558 // that parallel threads might be attempting allocations. 566 // that parallel threads might be attempting allocations.
559 void par_allocate_remaining_space(HeapRegion* r); 567 void par_allocate_remaining_space(HeapRegion* r);
560 568
561 // Allocation attempt during GC for a survivor object / PLAB. 569 // Allocation attempt during GC for a survivor object / PLAB.
573 void retire_mutator_alloc_region(HeapRegion* alloc_region, 581 void retire_mutator_alloc_region(HeapRegion* alloc_region,
574 size_t allocated_bytes); 582 size_t allocated_bytes);
575 583
576 // For GC alloc regions. 584 // For GC alloc regions.
577 HeapRegion* new_gc_alloc_region(size_t word_size, uint count, 585 HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
578 GCAllocPurpose ap); 586 InCSetState dest);
579 void retire_gc_alloc_region(HeapRegion* alloc_region, 587 void retire_gc_alloc_region(HeapRegion* alloc_region,
580 size_t allocated_bytes, GCAllocPurpose ap); 588 size_t allocated_bytes, InCSetState dest);
581 589
582 // - if explicit_gc is true, the GC is for a System.gc() or a heap 590 // - if explicit_gc is true, the GC is for a System.gc() or a heap
583 // inspection request and should collect the entire heap 591 // inspection request and should collect the entire heap
584 // - if clear_all_soft_refs is true, all soft references should be 592 // - if clear_all_soft_refs is true, all soft references should be
585 // cleared during the GC 593 // cleared during the GC
636 // Returns true if the heap was expanded by the requested amount; 644 // Returns true if the heap was expanded by the requested amount;
637 // false otherwise. 645 // false otherwise.
638 // (Rounds up to a HeapRegion boundary.) 646 // (Rounds up to a HeapRegion boundary.)
639 bool expand(size_t expand_bytes); 647 bool expand(size_t expand_bytes);
640 648
641 // Returns the PLAB statistics given a purpose. 649 // Returns the PLAB statistics for a given destination.
642 PLABStats* stats_for_purpose(GCAllocPurpose purpose) { 650 inline PLABStats* alloc_buffer_stats(InCSetState dest);
643 PLABStats* stats = NULL; 651
644 652 // Determines PLAB size for a given destination.
645 switch (purpose) { 653 inline size_t desired_plab_sz(InCSetState dest);
646 case GCAllocForSurvived:
647 stats = &_survivor_plab_stats;
648 break;
649 case GCAllocForTenured:
650 stats = &_old_plab_stats;
651 break;
652 default:
653 assert(false, "unrecognized GCAllocPurpose");
654 }
655
656 return stats;
657 }
658
659 // Determines PLAB size for a particular allocation purpose.
660 size_t desired_plab_sz(GCAllocPurpose purpose);
661 654
662 inline AllocationContextStats& allocation_context_stats(); 655 inline AllocationContextStats& allocation_context_stats();
663 656
664 // Do anything common to GC's. 657 // Do anything common to GC's.
665 virtual void gc_prologue(bool full); 658 virtual void gc_prologue(bool full);
666 virtual void gc_epilogue(bool full); 659 virtual void gc_epilogue(bool full);
667 660
661 // Modify the reclaim candidate set and test for presence.
662 // These are only valid for starts_humongous regions.
663 inline void set_humongous_reclaim_candidate(uint region, bool value);
664 inline bool is_humongous_reclaim_candidate(uint region);
665
666 // Remove from the reclaim candidate set. Also remove from the
667 // collection set so that later encounters avoid the slow path.
668 inline void set_humongous_is_live(oop obj); 668 inline void set_humongous_is_live(oop obj);
669 669
670 bool humongous_is_live(uint region) {
671 return _humongous_is_live.is_live(region);
672 }
673
674 // Returns whether the given region (which must be a humongous (start) region)
675 // is to be considered conservatively live regardless of any other conditions.
676 bool humongous_region_is_always_live(uint index);
677 // Register the given region to be part of the collection set. 670 // Register the given region to be part of the collection set.
678 inline void register_humongous_region_with_in_cset_fast_test(uint index); 671 inline void register_humongous_region_with_in_cset_fast_test(uint index);
679 // Register regions with humongous objects (actually on the start region) in 672 // Register regions with humongous objects (actually on the start region) in
680 // the in_cset_fast_test table. 673 // the in_cset_fast_test table.
681 void register_humongous_regions_with_in_cset_fast_test(); 674 void register_humongous_regions_with_in_cset_fast_test();
682 // We register a region with the fast "in collection set" test. We 675 // We register a region with the fast "in collection set" test. We
683 // simply set to true the array slot corresponding to this region. 676 // simply set to true the array slot corresponding to this region.
684 void register_region_with_in_cset_fast_test(HeapRegion* r) { 677 void register_young_region_with_in_cset_fast_test(HeapRegion* r) {
685 _in_cset_fast_test.set_in_cset(r->hrm_index()); 678 _in_cset_fast_test.set_in_young(r->hrm_index());
679 }
680 void register_old_region_with_in_cset_fast_test(HeapRegion* r) {
681 _in_cset_fast_test.set_in_old(r->hrm_index());
686 } 682 }
687 683
688 // This is a fast test on whether a reference points into the 684 // This is a fast test on whether a reference points into the
689 // collection set or not. Assume that the reference 685 // collection set or not. Assume that the reference
690 // points into the heap. 686 // points into the heap.
712 // the FullGCCount_lock in case a Java thread is waiting for a full 708 // the FullGCCount_lock in case a Java thread is waiting for a full
713 // GC to happen (e.g., it called System.gc() with 709 // GC to happen (e.g., it called System.gc() with
714 // +ExplicitGCInvokesConcurrent). 710 // +ExplicitGCInvokesConcurrent).
715 void increment_old_marking_cycles_completed(bool concurrent); 711 void increment_old_marking_cycles_completed(bool concurrent);
716 712
717 unsigned int old_marking_cycles_completed() { 713 uint old_marking_cycles_completed() {
718 return _old_marking_cycles_completed; 714 return _old_marking_cycles_completed;
719 } 715 }
720 716
721 void register_concurrent_cycle_start(const Ticks& start_time); 717 void register_concurrent_cycle_start(const Ticks& start_time);
722 void register_concurrent_cycle_end(); 718 void register_concurrent_cycle_end();
771 // gc_count_before (i.e., total_collections()) as a parameter since 767 // gc_count_before (i.e., total_collections()) as a parameter since
772 // it has to be read while holding the Heap_lock. Currently, both 768 // it has to be read while holding the Heap_lock. Currently, both
773 // methods that call do_collection_pause() release the Heap_lock 769 // methods that call do_collection_pause() release the Heap_lock
774 // before the call, so it's easy to read gc_count_before just before. 770 // before the call, so it's easy to read gc_count_before just before.
775 HeapWord* do_collection_pause(size_t word_size, 771 HeapWord* do_collection_pause(size_t word_size,
776 unsigned int gc_count_before, 772 uint gc_count_before,
777 bool* succeeded, 773 bool* succeeded,
778 GCCause::Cause gc_cause); 774 GCCause::Cause gc_cause);
779 775
780 // The guts of the incremental collection pause, executed by the vm 776 // The guts of the incremental collection pause, executed by the vm
781 // thread. It returns false if it is unable to do the collection due 777 // thread. It returns false if it is unable to do the collection due
809 void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info); 805 void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info);
810 806
811 // Abandon the current collection set without recording policy 807 // Abandon the current collection set without recording policy
812 // statistics or updating free lists. 808 // statistics or updating free lists.
813 void abandon_collection_set(HeapRegion* cs_head); 809 void abandon_collection_set(HeapRegion* cs_head);
814
815 // Applies "scan_non_heap_roots" to roots outside the heap,
816 // "scan_rs" to roots inside the heap (having done "set_region" to
817 // indicate the region in which the root resides),
818 // and does "scan_metadata" If "scan_rs" is
819 // NULL, then this step is skipped. The "worker_i"
820 // param is for use with parallel roots processing, and should be
821 // the "i" of the calling parallel worker thread's work(i) function.
822 // In the sequential case this param will be ignored.
823 void g1_process_roots(OopClosure* scan_non_heap_roots,
824 OopClosure* scan_non_heap_weak_roots,
825 OopsInHeapRegionClosure* scan_rs,
826 CLDClosure* scan_strong_clds,
827 CLDClosure* scan_weak_clds,
828 CodeBlobClosure* scan_strong_code,
829 uint worker_i);
830 810
831 // The concurrent marker (and the thread it runs in.) 811 // The concurrent marker (and the thread it runs in.)
832 ConcurrentMark* _cm; 812 ConcurrentMark* _cm;
833 ConcurrentMarkThread* _cmThread; 813 ConcurrentMarkThread* _cmThread;
834 bool _mark_in_progress; 814 bool _mark_in_progress;
1010 // Time stamp to validate the regions recorded in the cache 990 // Time stamp to validate the regions recorded in the cache
1011 // used by G1CollectedHeap::start_cset_region_for_worker(). 991 // used by G1CollectedHeap::start_cset_region_for_worker().
1012 // The heap region entry for a given worker is valid iff 992 // The heap region entry for a given worker is valid iff
1013 // the associated time stamp value matches the current value 993 // the associated time stamp value matches the current value
1014 // of G1CollectedHeap::_gc_time_stamp. 994 // of G1CollectedHeap::_gc_time_stamp.
1015 unsigned int* _worker_cset_start_region_time_stamp; 995 uint* _worker_cset_start_region_time_stamp;
1016
1017 enum G1H_process_roots_tasks {
1018 G1H_PS_filter_satb_buffers,
1019 G1H_PS_refProcessor_oops_do,
1020 // Leave this one last.
1021 G1H_PS_NumElements
1022 };
1023
1024 SubTasksDone* _process_strong_tasks;
1025 996
1026 volatile bool _free_regions_coming; 997 volatile bool _free_regions_coming;
1027 998
1028 public: 999 public:
1029
1030 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
1031 1000
1032 void set_refine_cte_cl_concurrency(bool concurrent); 1001 void set_refine_cte_cl_concurrency(bool concurrent);
1033 1002
1034 RefToScanQueue *task_queue(int i) const; 1003 RefToScanQueue *task_queue(int i) const;
1035 1004
1059 static size_t conservative_max_heap_alignment(); 1028 static size_t conservative_max_heap_alignment();
1060 1029
1061 // Initialize weak reference processing. 1030 // Initialize weak reference processing.
1062 virtual void ref_processing_init(); 1031 virtual void ref_processing_init();
1063 1032
1064 void set_par_threads(uint t) { 1033 // Explicitly import set_par_threads into this scope
1065 SharedHeap::set_par_threads(t); 1034 using SharedHeap::set_par_threads;
1066 // Done in SharedHeap but oddly there are
1067 // two _process_strong_tasks's in a G1CollectedHeap
1068 // so do it here too.
1069 _process_strong_tasks->set_n_threads(t);
1070 }
1071
1072 // Set _n_par_threads according to a policy TBD. 1035 // Set _n_par_threads according to a policy TBD.
1073 void set_par_threads(); 1036 void set_par_threads();
1074
1075 void set_n_termination(int t) {
1076 _process_strong_tasks->set_n_threads(t);
1077 }
1078 1037
1079 virtual CollectedHeap::Name kind() const { 1038 virtual CollectedHeap::Name kind() const {
1080 return CollectedHeap::G1CollectedHeap; 1039 return CollectedHeap::G1CollectedHeap;
1081 } 1040 }
1082 1041
1147 // The max number of regions in the heap. 1106 // The max number of regions in the heap.
1148 uint max_regions() const { return _hrm.max_length(); } 1107 uint max_regions() const { return _hrm.max_length(); }
1149 1108
1150 // The number of regions that are completely free. 1109 // The number of regions that are completely free.
1151 uint num_free_regions() const { return _hrm.num_free_regions(); } 1110 uint num_free_regions() const { return _hrm.num_free_regions(); }
1111
1112 MemoryUsage get_auxiliary_data_memory_usage() const {
1113 return _hrm.get_auxiliary_data_memory_usage();
1114 }
1152 1115
1153 // The number of regions that are not completely free. 1116 // The number of regions that are not completely free.
1154 uint num_used_regions() const { return num_regions() - num_free_regions(); } 1117 uint num_used_regions() const { return num_regions() - num_free_regions(); }
1155 1118
1156 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; 1119 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1180 // If G1VerifyBitmaps is set, verify that the marking bitmaps do not 1143 // If G1VerifyBitmaps is set, verify that the marking bitmaps do not
1181 // have any spurious marks. If errors are detected, print 1144 // have any spurious marks. If errors are detected, print
1182 // appropriate error messages and crash. 1145 // appropriate error messages and crash.
1183 void check_bitmaps(const char* caller) PRODUCT_RETURN; 1146 void check_bitmaps(const char* caller) PRODUCT_RETURN;
1184 1147
1148 // Do sanity check on the contents of the in-cset fast test table.
1149 bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
1150
1185 // verify_region_sets() performs verification over the region 1151 // verify_region_sets() performs verification over the region
1186 // lists. It will be compiled in the product code to be used when 1152 // lists. It will be compiled in the product code to be used when
1187 // necessary (i.e., during heap verification). 1153 // necessary (i.e., during heap verification).
1188 void verify_region_sets(); 1154 void verify_region_sets();
1189 1155
1275 1241
1276 inline bool is_in_cset(oop obj); 1242 inline bool is_in_cset(oop obj);
1277 1243
1278 inline bool is_in_cset_or_humongous(const oop obj); 1244 inline bool is_in_cset_or_humongous(const oop obj);
1279 1245
1280 enum in_cset_state_t {
1281 InNeither, // neither in collection set nor humongous
1282 InCSet, // region is in collection set only
1283 IsHumongous // region is a humongous start region
1284 };
1285 private: 1246 private:
1286 // Instances of this class are used for quick tests on whether a reference points
1287 // into the collection set or is a humongous object (points into a humongous
1288 // object).
1289 // Each of the array's elements denotes whether the corresponding region is in
1290 // the collection set or a humongous region.
1291 // We use this to quickly reclaim humongous objects: by making a humongous region
1292 // succeed this test, we sort-of add it to the collection set. During the reference
1293 // iteration closures, when we see a humongous region, we simply mark it as
1294 // referenced, i.e. live.
1295 class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<char> {
1296 protected:
1297 char default_value() const { return G1CollectedHeap::InNeither; }
1298 public:
1299 void set_humongous(uintptr_t index) {
1300 assert(get_by_index(index) != InCSet, "Should not overwrite InCSet values");
1301 set_by_index(index, G1CollectedHeap::IsHumongous);
1302 }
1303
1304 void clear_humongous(uintptr_t index) {
1305 set_by_index(index, G1CollectedHeap::InNeither);
1306 }
1307
1308 void set_in_cset(uintptr_t index) {
1309 assert(get_by_index(index) != G1CollectedHeap::IsHumongous, "Should not overwrite IsHumongous value");
1310 set_by_index(index, G1CollectedHeap::InCSet);
1311 }
1312
1313 bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; }
1314 bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; }
1315 G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); }
1316 void clear() { G1BiasedMappedArray<char>::clear(); }
1317 };
1318
1319 // This array is used for a quick test on whether a reference points into 1247 // This array is used for a quick test on whether a reference points into
1320 // the collection set or not. Each of the array's elements denotes whether the 1248 // the collection set or not. Each of the array's elements denotes whether the
1321 // corresponding region is in the collection set or not. 1249 // corresponding region is in the collection set or not.
1322 G1FastCSetBiasedMappedArray _in_cset_fast_test; 1250 G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
1323 1251
1324 public: 1252 public:
1325 1253
1326 inline in_cset_state_t in_cset_state(const oop obj); 1254 inline InCSetState in_cset_state(const oop obj);
1327 1255
1328 // Return "TRUE" iff the given object address is in the reserved 1256 // Return "TRUE" iff the given object address is in the reserved
1329 // region of g1. 1257 // region of g1.
1330 bool is_in_g1_reserved(const void* p) const { 1258 bool is_in_g1_reserved(const void* p) const {
1331 return _hrm.reserved().contains(p); 1259 return _hrm.reserved().contains(p);