comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 3830:f44782f04dd4

7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally Summary: Refactor the allocation code during GC to use the G1AllocRegion abstraction. Use separate subclasses of G1AllocRegion for survivor and old regions. Avoid BOT updates and dirty survivor cards incrementally for the former. Reviewed-by: brutisso, johnc, ysr
author tonyp
date Fri, 12 Aug 2011 11:31:06 -0400
parents 6aa4feb8a366
children ff53346271fe
comparison
equal deleted inserted replaced
3829:87e40b34bc2b 3830:f44782f04dd4
585 } 585 }
586 } 586 }
587 return res; 587 return res;
588 } 588 }
589 589
590 HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose,
591 size_t word_size) {
592 HeapRegion* alloc_region = NULL;
593 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
594 alloc_region = new_region(word_size, true /* do_expand */);
595 if (alloc_region != NULL) {
596 if (purpose == GCAllocForSurvived) {
597 _hr_printer.alloc(alloc_region, G1HRPrinter::Survivor);
598 alloc_region->set_survivor();
599 } else {
600 _hr_printer.alloc(alloc_region, G1HRPrinter::Old);
601 }
602 ++_gc_alloc_region_counts[purpose];
603 }
604 } else {
605 g1_policy()->note_alloc_region_limit_reached(purpose);
606 }
607 return alloc_region;
608 }
609
610 size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, 590 size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
611 size_t word_size) { 591 size_t word_size) {
612 assert(isHumongous(word_size), "word_size should be humongous"); 592 assert(isHumongous(word_size), "word_size should be humongous");
613 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); 593 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
614 594
1089 } 1069 }
1090 1070
1091 ShouldNotReachHere(); 1071 ShouldNotReachHere();
1092 } 1072 }
1093 1073
1094 void G1CollectedHeap::abandon_gc_alloc_regions() {
1095 // first, make sure that the GC alloc region list is empty (it should!)
1096 assert(_gc_alloc_region_list == NULL, "invariant");
1097 release_gc_alloc_regions(true /* totally */);
1098 }
1099
1100 class PostMCRemSetClearClosure: public HeapRegionClosure { 1074 class PostMCRemSetClearClosure: public HeapRegionClosure {
1101 ModRefBarrierSet* _mr_bs; 1075 ModRefBarrierSet* _mr_bs;
1102 public: 1076 public:
1103 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} 1077 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
1104 bool doHeapRegion(HeapRegion* r) { 1078 bool doHeapRegion(HeapRegion* r) {
1779 } 1753 }
1780 1754
1781 void G1CollectedHeap::shrink(size_t shrink_bytes) { 1755 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1782 verify_region_sets_optional(); 1756 verify_region_sets_optional();
1783 1757
1784 release_gc_alloc_regions(true /* totally */); 1758 // We should only reach here at the end of a Full GC which means we
1759 // should not not be holding to any GC alloc regions. The method
1760 // below will make sure of that and do any remaining clean up.
1761 abandon_gc_alloc_regions();
1762
1785 // Instead of tearing down / rebuilding the free lists here, we 1763 // Instead of tearing down / rebuilding the free lists here, we
1786 // could instead use the remove_all_pending() method on free_list to 1764 // could instead use the remove_all_pending() method on free_list to
1787 // remove only the ones that we need to remove. 1765 // remove only the ones that we need to remove.
1788 tear_down_region_lists(); // We will rebuild them in a moment. 1766 tear_down_region_lists(); // We will rebuild them in a moment.
1789 shrink_helper(shrink_bytes); 1767 shrink_helper(shrink_bytes);
1819 _secondary_free_list("Secondary Free List"), 1797 _secondary_free_list("Secondary Free List"),
1820 _humongous_set("Master Humongous Set"), 1798 _humongous_set("Master Humongous Set"),
1821 _free_regions_coming(false), 1799 _free_regions_coming(false),
1822 _young_list(new YoungList(this)), 1800 _young_list(new YoungList(this)),
1823 _gc_time_stamp(0), 1801 _gc_time_stamp(0),
1802 _retained_old_gc_alloc_region(NULL),
1824 _surviving_young_words(NULL), 1803 _surviving_young_words(NULL),
1825 _full_collections_completed(0), 1804 _full_collections_completed(0),
1826 _in_cset_fast_test(NULL), 1805 _in_cset_fast_test(NULL),
1827 _in_cset_fast_test_base(NULL), 1806 _in_cset_fast_test_base(NULL),
1828 _dirty_cards_region_list(NULL) { 1807 _dirty_cards_region_list(NULL) {
1849 for (int i = 0; i < n_queues; i++) { 1828 for (int i = 0; i < n_queues; i++) {
1850 RefToScanQueue* q = new RefToScanQueue(); 1829 RefToScanQueue* q = new RefToScanQueue();
1851 q->initialize(); 1830 q->initialize();
1852 _task_queues->register_queue(i, q); 1831 _task_queues->register_queue(i, q);
1853 } 1832 }
1854
1855 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
1856 _gc_alloc_regions[ap] = NULL;
1857 _gc_alloc_region_counts[ap] = 0;
1858 _retained_gc_alloc_regions[ap] = NULL;
1859 // by default, we do not retain a GC alloc region for each ap;
1860 // we'll override this, when appropriate, below
1861 _retain_gc_alloc_region[ap] = false;
1862 }
1863
1864 // We will try to remember the last half-full tenured region we
1865 // allocated to at the end of a collection so that we can re-use it
1866 // during the next collection.
1867 _retain_gc_alloc_region[GCAllocForTenured] = true;
1868 1833
1869 guarantee(_task_queues != NULL, "task_queues allocation failure."); 1834 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1870 } 1835 }
1871 1836
1872 jint G1CollectedHeap::initialize() { 1837 jint G1CollectedHeap::initialize() {
2080 &JavaThread::dirty_card_queue_set()); 2045 &JavaThread::dirty_card_queue_set());
2081 2046
2082 // In case we're keeping closure specialization stats, initialize those 2047 // In case we're keeping closure specialization stats, initialize those
2083 // counts and that mechanism. 2048 // counts and that mechanism.
2084 SpecializationStats::clear(); 2049 SpecializationStats::clear();
2085
2086 _gc_alloc_region_list = NULL;
2087 2050
2088 // Do later initialization work for concurrent refinement. 2051 // Do later initialization work for concurrent refinement.
2089 _cg1r->init(); 2052 _cg1r->init();
2090 2053
2091 // Here we allocate the dummy full region that is required by the 2054 // Here we allocate the dummy full region that is required by the
2202 size_t G1CollectedHeap::recalculate_used() const { 2165 size_t G1CollectedHeap::recalculate_used() const {
2203 SumUsedClosure blk; 2166 SumUsedClosure blk;
2204 heap_region_iterate(&blk); 2167 heap_region_iterate(&blk);
2205 return blk.result(); 2168 return blk.result();
2206 } 2169 }
2207
2208 #ifndef PRODUCT
2209 class SumUsedRegionsClosure: public HeapRegionClosure {
2210 size_t _num;
2211 public:
2212 SumUsedRegionsClosure() : _num(0) {}
2213 bool doHeapRegion(HeapRegion* r) {
2214 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) {
2215 _num += 1;
2216 }
2217 return false;
2218 }
2219 size_t result() { return _num; }
2220 };
2221
2222 size_t G1CollectedHeap::recalculate_used_regions() const {
2223 SumUsedRegionsClosure blk;
2224 heap_region_iterate(&blk);
2225 return blk.result();
2226 }
2227 #endif // PRODUCT
2228 2170
2229 size_t G1CollectedHeap::unsafe_max_alloc() { 2171 size_t G1CollectedHeap::unsafe_max_alloc() {
2230 if (free_regions() > 0) return HeapRegion::GrainBytes; 2172 if (free_regions() > 0) return HeapRegion::GrainBytes;
2231 // otherwise, is there space in the current allocation region? 2173 // otherwise, is there space in the current allocation region?
2232 2174
3406 // get entries from the secondary_free_list. 3348 // get entries from the secondary_free_list.
3407 if (!G1StressConcRegionFreeing) { 3349 if (!G1StressConcRegionFreeing) {
3408 append_secondary_free_list_if_not_empty_with_lock(); 3350 append_secondary_free_list_if_not_empty_with_lock();
3409 } 3351 }
3410 3352
3411 increment_gc_time_stamp();
3412
3413 if (g1_policy()->in_young_gc_mode()) { 3353 if (g1_policy()->in_young_gc_mode()) {
3414 assert(check_young_list_well_formed(), 3354 assert(check_young_list_well_formed(),
3415 "young list should be well formed"); 3355 "young list should be well formed");
3416 } 3356 }
3417 3357
3418 { // Call to jvmpi::post_class_unload_events must occur outside of active GC 3358 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3419 IsGCActiveMark x; 3359 IsGCActiveMark x;
3420 3360
3421 gc_prologue(false); 3361 gc_prologue(false);
3422 increment_total_collections(false /* full gc */); 3362 increment_total_collections(false /* full gc */);
3363 increment_gc_time_stamp();
3423 3364
3424 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { 3365 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
3425 HandleMark hm; // Discard invalid handles created during verification 3366 HandleMark hm; // Discard invalid handles created during verification
3426 gclog_or_tty->print(" VerifyBeforeGC:"); 3367 gclog_or_tty->print(" VerifyBeforeGC:");
3427 prepare_for_verify(); 3368 prepare_for_verify();
3470 #endif // YOUNG_LIST_VERBOSE 3411 #endif // YOUNG_LIST_VERBOSE
3471 3412
3472 if (g1_policy()->during_initial_mark_pause()) { 3413 if (g1_policy()->during_initial_mark_pause()) {
3473 concurrent_mark()->checkpointRootsInitialPre(); 3414 concurrent_mark()->checkpointRootsInitialPre();
3474 } 3415 }
3475 save_marks(); 3416 perm_gen()->save_marks();
3476 3417
3477 // We must do this before any possible evacuation that should propagate 3418 // We must do this before any possible evacuation that should propagate
3478 // marks. 3419 // marks.
3479 if (mark_in_progress()) { 3420 if (mark_in_progress()) {
3480 double start_time_sec = os::elapsedTime(); 3421 double start_time_sec = os::elapsedTime();
3532 collection_set_iterate(&cl); 3473 collection_set_iterate(&cl);
3533 #endif // ASSERT 3474 #endif // ASSERT
3534 3475
3535 setup_surviving_young_words(); 3476 setup_surviving_young_words();
3536 3477
3537 // Set up the gc allocation regions. 3478 // Initialize the GC alloc regions.
3538 get_gc_alloc_regions(); 3479 init_gc_alloc_regions();
3539 3480
3540 // Actually do the work... 3481 // Actually do the work...
3541 evacuate_collection_set(); 3482 evacuate_collection_set();
3542 3483
3543 free_collection_set(g1_policy()->collection_set()); 3484 free_collection_set(g1_policy()->collection_set());
3578 if (evacuation_failed()) { 3519 if (evacuation_failed()) {
3579 _summary_bytes_used = recalculate_used(); 3520 _summary_bytes_used = recalculate_used();
3580 } else { 3521 } else {
3581 // The "used" of the the collection set have already been subtracted 3522 // The "used" of the the collection set have already been subtracted
3582 // when they were freed. Add in the bytes evacuated. 3523 // when they were freed. Add in the bytes evacuated.
3583 _summary_bytes_used += g1_policy()->bytes_in_to_space(); 3524 _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
3584 } 3525 }
3585 3526
3586 if (g1_policy()->in_young_gc_mode() && 3527 if (g1_policy()->in_young_gc_mode() &&
3587 g1_policy()->during_initial_mark_pause()) { 3528 g1_policy()->during_initial_mark_pause()) {
3588 concurrent_mark()->checkpointRootsInitialPost(); 3529 concurrent_mark()->checkpointRootsInitialPost();
3611 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; 3552 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
3612 g1_policy()->record_pause_time_ms(pause_time_ms); 3553 g1_policy()->record_pause_time_ms(pause_time_ms);
3613 g1_policy()->record_collection_pause_end(); 3554 g1_policy()->record_collection_pause_end();
3614 3555
3615 MemoryService::track_memory_usage(); 3556 MemoryService::track_memory_usage();
3557
3558 // In prepare_for_verify() below we'll need to scan the deferred
3559 // update buffers to bring the RSets up-to-date if
3560 // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
3561 // the update buffers we'll probably need to scan cards on the
3562 // regions we just allocated to (i.e., the GC alloc
3563 // regions). However, during the last GC we called
3564 // set_saved_mark() on all the GC alloc regions, so card
3565 // scanning might skip the [saved_mark_word()...top()] area of
3566 // those regions (i.e., the area we allocated objects into
3567 // during the last GC). But it shouldn't. Given that
3568 // saved_mark_word() is conditional on whether the GC time stamp
3569 // on the region is current or not, by incrementing the GC time
3570 // stamp here we invalidate all the GC time stamps on all the
3571 // regions and saved_mark_word() will simply return top() for
3572 // all the regions. This is a nicer way of ensuring this rather
3573 // than iterating over the regions and fixing them. In fact, the
3574 // GC time stamp increment here also ensures that
3575 // saved_mark_word() will return top() between pauses, i.e.,
3576 // during concurrent refinement. So we don't need the
3577 // is_gc_active() check to decided which top to use when
3578 // scanning cards (see CR 7039627).
3579 increment_gc_time_stamp();
3616 3580
3617 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { 3581 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
3618 HandleMark hm; // Discard invalid handles created during verification 3582 HandleMark hm; // Discard invalid handles created during verification
3619 gclog_or_tty->print(" VerifyAfterGC:"); 3583 gclog_or_tty->print(" VerifyAfterGC:");
3620 prepare_for_verify(); 3584 prepare_for_verify();
3711 void G1CollectedHeap::release_mutator_alloc_region() { 3675 void G1CollectedHeap::release_mutator_alloc_region() {
3712 _mutator_alloc_region.release(); 3676 _mutator_alloc_region.release();
3713 assert(_mutator_alloc_region.get() == NULL, "post-condition"); 3677 assert(_mutator_alloc_region.get() == NULL, "post-condition");
3714 } 3678 }
3715 3679
3716 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { 3680 void G1CollectedHeap::init_gc_alloc_regions() {
3717 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
3718 // make sure we don't call set_gc_alloc_region() multiple times on
3719 // the same region
3720 assert(r == NULL || !r->is_gc_alloc_region(),
3721 "shouldn't already be a GC alloc region");
3722 assert(r == NULL || !r->isHumongous(),
3723 "humongous regions shouldn't be used as GC alloc regions");
3724
3725 HeapWord* original_top = NULL;
3726 if (r != NULL)
3727 original_top = r->top();
3728
3729 // We will want to record the used space in r as being there before gc.
3730 // One we install it as a GC alloc region it's eligible for allocation.
3731 // So record it now and use it later.
3732 size_t r_used = 0;
3733 if (r != NULL) {
3734 r_used = r->used();
3735
3736 if (G1CollectedHeap::use_parallel_gc_threads()) {
3737 // need to take the lock to guard against two threads calling
3738 // get_gc_alloc_region concurrently (very unlikely but...)
3739 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3740 r->save_marks();
3741 }
3742 }
3743 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose];
3744 _gc_alloc_regions[purpose] = r;
3745 if (old_alloc_region != NULL) {
3746 // Replace aliases too.
3747 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
3748 if (_gc_alloc_regions[ap] == old_alloc_region) {
3749 _gc_alloc_regions[ap] = r;
3750 }
3751 }
3752 }
3753 if (r != NULL) {
3754 push_gc_alloc_region(r);
3755 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) {
3756 // We are using a region as a GC alloc region after it has been used
3757 // as a mutator allocation region during the current marking cycle.
3758 // The mutator-allocated objects are currently implicitly marked, but
3759 // when we move hr->next_top_at_mark_start() forward at the the end
3760 // of the GC pause, they won't be. We therefore mark all objects in
3761 // the "gap". We do this object-by-object, since marking densely
3762 // does not currently work right with marking bitmap iteration. This
3763 // means we rely on TLAB filling at the start of pauses, and no
3764 // "resuscitation" of filled TLAB's. If we want to do this, we need
3765 // to fix the marking bitmap iteration.
3766 HeapWord* curhw = r->next_top_at_mark_start();
3767 HeapWord* t = original_top;
3768
3769 while (curhw < t) {
3770 oop cur = (oop)curhw;
3771 // We'll assume parallel for generality. This is rare code.
3772 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them?
3773 curhw = curhw + cur->size();
3774 }
3775 assert(curhw == t, "Should have parsed correctly.");
3776 }
3777 if (G1PolicyVerbose > 1) {
3778 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") "
3779 "for survivors:", r->bottom(), original_top, r->end());
3780 r->print();
3781 }
3782 g1_policy()->record_before_bytes(r_used);
3783 }
3784 }
3785
3786 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
3787 assert(Thread::current()->is_VM_thread() ||
3788 FreeList_lock->owned_by_self(), "Precondition");
3789 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
3790 "Precondition.");
3791 hr->set_is_gc_alloc_region(true);
3792 hr->set_next_gc_alloc_region(_gc_alloc_region_list);
3793 _gc_alloc_region_list = hr;
3794 }
3795
3796 #ifdef G1_DEBUG
3797 class FindGCAllocRegion: public HeapRegionClosure {
3798 public:
3799 bool doHeapRegion(HeapRegion* r) {
3800 if (r->is_gc_alloc_region()) {
3801 gclog_or_tty->print_cr("Region "HR_FORMAT" is still a GC alloc region",
3802 HR_FORMAT_PARAMS(r));
3803 }
3804 return false;
3805 }
3806 };
3807 #endif // G1_DEBUG
3808
3809 void G1CollectedHeap::forget_alloc_region_list() {
3810 assert_at_safepoint(true /* should_be_vm_thread */); 3681 assert_at_safepoint(true /* should_be_vm_thread */);
3811 while (_gc_alloc_region_list != NULL) { 3682
3812 HeapRegion* r = _gc_alloc_region_list; 3683 _survivor_gc_alloc_region.init();
3813 assert(r->is_gc_alloc_region(), "Invariant."); 3684 _old_gc_alloc_region.init();
3814 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on 3685 HeapRegion* retained_region = _retained_old_gc_alloc_region;
3815 // newly allocated data in order to be able to apply deferred updates 3686 _retained_old_gc_alloc_region = NULL;
3816 // before the GC is done for verification purposes (i.e to allow 3687
3817 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the 3688 // We will discard the current GC alloc region if:
3818 // collection. 3689 // a) it's in the collection set (it can happen!),
3819 r->ContiguousSpace::set_saved_mark(); 3690 // b) it's already full (no point in using it),
3820 _gc_alloc_region_list = r->next_gc_alloc_region(); 3691 // c) it's empty (this means that it was emptied during
3821 r->set_next_gc_alloc_region(NULL); 3692 // a cleanup and it should be on the free list now), or
3822 r->set_is_gc_alloc_region(false); 3693 // d) it's humongous (this means that it was emptied
3823 if (r->is_survivor()) { 3694 // during a cleanup and was added to the free list, but
3824 if (r->is_empty()) { 3695 // has been subseqently used to allocate a humongous
3825 r->set_not_young(); 3696 // object that may be less than the region size).
3826 } else { 3697 if (retained_region != NULL &&
3827 _young_list->add_survivor_region(r); 3698 !retained_region->in_collection_set() &&
3828 } 3699 !(retained_region->top() == retained_region->end()) &&
3829 } 3700 !retained_region->is_empty() &&
3830 } 3701 !retained_region->isHumongous()) {
3831 #ifdef G1_DEBUG 3702 retained_region->set_saved_mark();
3832 FindGCAllocRegion fa; 3703 _old_gc_alloc_region.set(retained_region);
3833 heap_region_iterate(&fa); 3704 _hr_printer.reuse(retained_region);
3834 #endif // G1_DEBUG 3705 }
3835 } 3706 }
3836 3707
3837 3708 void G1CollectedHeap::release_gc_alloc_regions() {
3838 bool G1CollectedHeap::check_gc_alloc_regions() { 3709 _survivor_gc_alloc_region.release();
3839 // TODO: allocation regions check 3710 // If we have an old GC alloc region to release, we'll save it in
3840 return true; 3711 // _retained_old_gc_alloc_region. If we don't
3841 } 3712 // _retained_old_gc_alloc_region will become NULL. This is what we
3842 3713 // want either way so no reason to check explicitly for either
3843 void G1CollectedHeap::get_gc_alloc_regions() { 3714 // condition.
3844 // First, let's check that the GC alloc region list is empty (it should) 3715 _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
3845 assert(_gc_alloc_region_list == NULL, "invariant"); 3716 }
3846 3717
3847 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 3718 void G1CollectedHeap::abandon_gc_alloc_regions() {
3848 assert(_gc_alloc_regions[ap] == NULL, "invariant"); 3719 assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
3849 assert(_gc_alloc_region_counts[ap] == 0, "invariant"); 3720 assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
3850 3721 _retained_old_gc_alloc_region = NULL;
3851 // Create new GC alloc regions. 3722 }
3852 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
3853 _retained_gc_alloc_regions[ap] = NULL;
3854
3855 if (alloc_region != NULL) {
3856 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region");
3857
3858 // let's make sure that the GC alloc region is not tagged as such
3859 // outside a GC operation
3860 assert(!alloc_region->is_gc_alloc_region(), "sanity");
3861
3862 if (alloc_region->in_collection_set() ||
3863 alloc_region->top() == alloc_region->end() ||
3864 alloc_region->top() == alloc_region->bottom() ||
3865 alloc_region->isHumongous()) {
3866 // we will discard the current GC alloc region if
3867 // * it's in the collection set (it can happen!),
3868 // * it's already full (no point in using it),
3869 // * it's empty (this means that it was emptied during
3870 // a cleanup and it should be on the free list now), or
3871 // * it's humongous (this means that it was emptied
3872 // during a cleanup and was added to the free list, but
3873 // has been subseqently used to allocate a humongous
3874 // object that may be less than the region size).
3875
3876 alloc_region = NULL;
3877 }
3878 }
3879
3880 if (alloc_region == NULL) {
3881 // we will get a new GC alloc region
3882 alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords);
3883 } else {
3884 // the region was retained from the last collection
3885 ++_gc_alloc_region_counts[ap];
3886
3887 _hr_printer.reuse(alloc_region);
3888 }
3889
3890 if (alloc_region != NULL) {
3891 assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
3892 set_gc_alloc_region(ap, alloc_region);
3893 }
3894
3895 assert(_gc_alloc_regions[ap] == NULL ||
3896 _gc_alloc_regions[ap]->is_gc_alloc_region(),
3897 "the GC alloc region should be tagged as such");
3898 assert(_gc_alloc_regions[ap] == NULL ||
3899 _gc_alloc_regions[ap] == _gc_alloc_region_list,
3900 "the GC alloc region should be the same as the GC alloc list head");
3901 }
3902 // Set alternative regions for allocation purposes that have reached
3903 // their limit.
3904 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
3905 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
3906 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
3907 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose];
3908 }
3909 }
3910 assert(check_gc_alloc_regions(), "alloc regions messed up");
3911 }
3912
3913 void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
3914 // We keep a separate list of all regions that have been alloc regions in
3915 // the current collection pause. Forget that now. This method will
3916 // untag the GC alloc regions and tear down the GC alloc region
3917 // list. It's desirable that no regions are tagged as GC alloc
3918 // outside GCs.
3919
3920 forget_alloc_region_list();
3921
3922 // The current alloc regions contain objs that have survived
3923 // collection. Make them no longer GC alloc regions.
3924 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
3925 HeapRegion* r = _gc_alloc_regions[ap];
3926 _retained_gc_alloc_regions[ap] = NULL;
3927 _gc_alloc_region_counts[ap] = 0;
3928
3929 if (r != NULL) {
3930 // we retain nothing on _gc_alloc_regions between GCs
3931 set_gc_alloc_region(ap, NULL);
3932
3933 if (r->is_empty()) {
3934 // We didn't actually allocate anything in it; let's just put
3935 // it back on the free list.
3936 _free_list.add_as_head(r);
3937 } else if (_retain_gc_alloc_region[ap] && !totally) {
3938 // retain it so that we can use it at the beginning of the next GC
3939 _retained_gc_alloc_regions[ap] = r;
3940 }
3941 }
3942 }
3943 }
3944
3945 #ifndef PRODUCT
3946 // Useful for debugging
3947
3948 void G1CollectedHeap::print_gc_alloc_regions() {
3949 gclog_or_tty->print_cr("GC alloc regions");
3950 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
3951 HeapRegion* r = _gc_alloc_regions[ap];
3952 if (r == NULL) {
3953 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL);
3954 } else {
3955 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT,
3956 ap, r->bottom(), r->used());
3957 }
3958 }
3959 }
3960 #endif // PRODUCT
3961 3723
3962 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { 3724 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
3963 _drain_in_progress = false; 3725 _drain_in_progress = false;
3964 set_evac_failure_closure(cl); 3726 set_evac_failure_closure(cl);
3965 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 3727 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
3971 "Postcondition"); 3733 "Postcondition");
3972 assert(!_drain_in_progress, "Postcondition"); 3734 assert(!_drain_in_progress, "Postcondition");
3973 delete _evac_failure_scan_stack; 3735 delete _evac_failure_scan_stack;
3974 _evac_failure_scan_stack = NULL; 3736 _evac_failure_scan_stack = NULL;
3975 } 3737 }
3976
3977
3978 3738
3979 // *** Sequential G1 Evacuation 3739 // *** Sequential G1 Evacuation
3980 3740
3981 class G1IsAliveClosure: public BoolObjectClosure { 3741 class G1IsAliveClosure: public BoolObjectClosure {
3982 G1CollectedHeap* _g1; 3742 G1CollectedHeap* _g1;
4285 _objs_with_preserved_marks->push(obj); 4045 _objs_with_preserved_marks->push(obj);
4286 _preserved_marks_of_objs->push(m); 4046 _preserved_marks_of_objs->push(m);
4287 } 4047 }
4288 } 4048 }
4289 4049
4290 // *** Parallel G1 Evacuation
4291
4292 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, 4050 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
4293 size_t word_size) { 4051 size_t word_size) {
4294 assert(!isHumongous(word_size), 4052 if (purpose == GCAllocForSurvived) {
4295 err_msg("we should not be seeing humongous allocation requests " 4053 HeapWord* result = survivor_attempt_allocation(word_size);
4296 "during GC, word_size = "SIZE_FORMAT, word_size)); 4054 if (result != NULL) {
4297 4055 return result;
4298 HeapRegion* alloc_region = _gc_alloc_regions[purpose];
4299 // let the caller handle alloc failure
4300 if (alloc_region == NULL) return NULL;
4301
4302 HeapWord* block = alloc_region->par_allocate(word_size);
4303 if (block == NULL) {
4304 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
4305 }
4306 return block;
4307 }
4308
4309 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
4310 bool par) {
4311 // Another thread might have obtained alloc_region for the given
4312 // purpose, and might be attempting to allocate in it, and might
4313 // succeed. Therefore, we can't do the "finalization" stuff on the
4314 // region below until we're sure the last allocation has happened.
4315 // We ensure this by allocating the remaining space with a garbage
4316 // object.
4317 if (par) par_allocate_remaining_space(alloc_region);
4318 // Now we can do the post-GC stuff on the region.
4319 alloc_region->note_end_of_copying();
4320 g1_policy()->record_after_bytes(alloc_region->used());
4321 _hr_printer.retire(alloc_region);
4322 }
4323
4324 HeapWord*
4325 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
4326 HeapRegion* alloc_region,
4327 bool par,
4328 size_t word_size) {
4329 assert(!isHumongous(word_size),
4330 err_msg("we should not be seeing humongous allocation requests "
4331 "during GC, word_size = "SIZE_FORMAT, word_size));
4332
4333 // We need to make sure we serialize calls to this method. Given
4334 // that the FreeList_lock guards accesses to the free_list anyway,
4335 // and we need to potentially remove a region from it, we'll use it
4336 // to protect the whole call.
4337 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
4338
4339 HeapWord* block = NULL;
4340 // In the parallel case, a previous thread to obtain the lock may have
4341 // already assigned a new gc_alloc_region.
4342 if (alloc_region != _gc_alloc_regions[purpose]) {
4343 assert(par, "But should only happen in parallel case.");
4344 alloc_region = _gc_alloc_regions[purpose];
4345 if (alloc_region == NULL) return NULL;
4346 block = alloc_region->par_allocate(word_size);
4347 if (block != NULL) return block;
4348 // Otherwise, continue; this new region is empty, too.
4349 }
4350 assert(alloc_region != NULL, "We better have an allocation region");
4351 retire_alloc_region(alloc_region, par);
4352
4353 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
4354 // Cannot allocate more regions for the given purpose.
4355 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose);
4356 // Is there an alternative?
4357 if (purpose != alt_purpose) {
4358 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
4359 // Has not the alternative region been aliased?
4360 if (alloc_region != alt_region && alt_region != NULL) {
4361 // Try to allocate in the alternative region.
4362 if (par) {
4363 block = alt_region->par_allocate(word_size);
4364 } else {
4365 block = alt_region->allocate(word_size);
4366 }
4367 // Make an alias.
4368 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
4369 if (block != NULL) {
4370 return block;
4371 }
4372 retire_alloc_region(alt_region, par);
4373 }
4374 // Both the allocation region and the alternative one are full
4375 // and aliased, replace them with a new allocation region.
4376 purpose = alt_purpose;
4377 } else { 4056 } else {
4378 set_gc_alloc_region(purpose, NULL); 4057 // Let's try to allocate in the old gen in case we can fit the
4379 return NULL; 4058 // object there.
4380 } 4059 return old_attempt_allocation(word_size);
4381 } 4060 }
4382 4061 } else {
4383 // Now allocate a new region for allocation. 4062 assert(purpose == GCAllocForTenured, "sanity");
4384 alloc_region = new_gc_alloc_region(purpose, word_size); 4063 HeapWord* result = old_attempt_allocation(word_size);
4385 4064 if (result != NULL) {
4386 // let the caller handle alloc failure 4065 return result;
4387 if (alloc_region != NULL) {
4388
4389 assert(check_gc_alloc_regions(), "alloc regions messed up");
4390 assert(alloc_region->saved_mark_at_top(),
4391 "Mark should have been saved already.");
4392 // This must be done last: once it's installed, other regions may
4393 // allocate in it (without holding the lock.)
4394 set_gc_alloc_region(purpose, alloc_region);
4395
4396 if (par) {
4397 block = alloc_region->par_allocate(word_size);
4398 } else { 4066 } else {
4399 block = alloc_region->allocate(word_size); 4067 // Let's try to allocate in the survivors in case we can fit the
4400 } 4068 // object there.
4401 // Caller handles alloc failure. 4069 return survivor_attempt_allocation(word_size);
4402 } else { 4070 }
4403 // This sets other apis using the same old alloc region to NULL, also. 4071 }
4404 set_gc_alloc_region(purpose, NULL); 4072
4405 } 4073 ShouldNotReachHere();
4406 return block; // May be NULL. 4074 // Trying to keep some compilers happy.
4407 } 4075 return NULL;
4408
4409 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
4410 HeapWord* block = NULL;
4411 size_t free_words;
4412 do {
4413 free_words = r->free()/HeapWordSize;
4414 // If there's too little space, no one can allocate, so we're done.
4415 if (free_words < CollectedHeap::min_fill_size()) return;
4416 // Otherwise, try to claim it.
4417 block = r->par_allocate(free_words);
4418 } while (block == NULL);
4419 fill_with_object(block, free_words);
4420 } 4076 }
4421 4077
4422 #ifndef PRODUCT 4078 #ifndef PRODUCT
4423 bool GCLabBitMapClosure::do_bit(size_t offset) { 4079 bool GCLabBitMapClosure::do_bit(size_t offset) {
4424 HeapWord* addr = _bitmap->offsetToHeapWord(offset); 4080 HeapWord* addr = _bitmap->offsetToHeapWord(offset);
4954 OopClosure* non_root_closure) { 4610 OopClosure* non_root_closure) {
4955 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); 4611 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
4956 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); 4612 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
4957 } 4613 }
4958 4614
4959
4960 class SaveMarksClosure: public HeapRegionClosure {
4961 public:
4962 bool doHeapRegion(HeapRegion* r) {
4963 r->save_marks();
4964 return false;
4965 }
4966 };
4967
4968 void G1CollectedHeap::save_marks() {
4969 if (!CollectedHeap::use_parallel_gc_threads()) {
4970 SaveMarksClosure sm;
4971 heap_region_iterate(&sm);
4972 }
4973 // We do this even in the parallel case
4974 perm_gen()->save_marks();
4975 }
4976
4977 void G1CollectedHeap::evacuate_collection_set() { 4615 void G1CollectedHeap::evacuate_collection_set() {
4978 set_evacuation_failed(false); 4616 set_evacuation_failed(false);
4979 4617
4980 g1_rem_set()->prepare_for_oops_into_collection_set_do(); 4618 g1_rem_set()->prepare_for_oops_into_collection_set_do();
4981 concurrent_g1_refine()->set_use_cache(false); 4619 concurrent_g1_refine()->set_use_cache(false);
5002 } 4640 }
5003 4641
5004 double par_time = (os::elapsedTime() - start_par) * 1000.0; 4642 double par_time = (os::elapsedTime() - start_par) * 1000.0;
5005 g1_policy()->record_par_time(par_time); 4643 g1_policy()->record_par_time(par_time);
5006 set_par_threads(0); 4644 set_par_threads(0);
5007 // Is this the right thing to do here? We don't save marks
5008 // on individual heap regions when we allocate from
5009 // them in parallel, so this seems like the correct place for this.
5010 retire_all_alloc_regions();
5011 4645
5012 // Weak root processing. 4646 // Weak root processing.
5013 // Note: when JSR 292 is enabled and code blobs can contain 4647 // Note: when JSR 292 is enabled and code blobs can contain
5014 // non-perm oops then we will need to process the code blobs 4648 // non-perm oops then we will need to process the code blobs
5015 // here too. 4649 // here too.
5016 { 4650 {
5017 G1IsAliveClosure is_alive(this); 4651 G1IsAliveClosure is_alive(this);
5018 G1KeepAliveClosure keep_alive(this); 4652 G1KeepAliveClosure keep_alive(this);
5019 JNIHandles::weak_oops_do(&is_alive, &keep_alive); 4653 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5020 } 4654 }
5021 release_gc_alloc_regions(false /* totally */); 4655 release_gc_alloc_regions();
5022 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); 4656 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5023 4657
5024 concurrent_g1_refine()->clear_hot_cache(); 4658 concurrent_g1_refine()->clear_hot_cache();
5025 concurrent_g1_refine()->set_use_cache(true); 4659 concurrent_g1_refine()->set_use_cache(true);
5026 4660
5137 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); 4771 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5138 _humongous_set.update_from_proxy(humongous_proxy_set); 4772 _humongous_set.update_from_proxy(humongous_proxy_set);
5139 } 4773 }
5140 } 4774 }
5141 4775
5142 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
5143 while (list != NULL) {
5144 guarantee( list->is_young(), "invariant" );
5145
5146 HeapWord* bottom = list->bottom();
5147 HeapWord* end = list->end();
5148 MemRegion mr(bottom, end);
5149 ct_bs->dirty(mr);
5150
5151 list = list->get_next_young_region();
5152 }
5153 }
5154
5155
5156 class G1ParCleanupCTTask : public AbstractGangTask { 4776 class G1ParCleanupCTTask : public AbstractGangTask {
5157 CardTableModRefBS* _ct_bs; 4777 CardTableModRefBS* _ct_bs;
5158 G1CollectedHeap* _g1h; 4778 G1CollectedHeap* _g1h;
5159 HeapRegion* volatile _su_head; 4779 HeapRegion* volatile _su_head;
5160 public: 4780 public:
5161 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, 4781 G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
5162 G1CollectedHeap* g1h, 4782 G1CollectedHeap* g1h) :
5163 HeapRegion* survivor_list) :
5164 AbstractGangTask("G1 Par Cleanup CT Task"), 4783 AbstractGangTask("G1 Par Cleanup CT Task"),
5165 _ct_bs(ct_bs), 4784 _ct_bs(ct_bs), _g1h(g1h) { }
5166 _g1h(g1h),
5167 _su_head(survivor_list)
5168 { }
5169 4785
5170 void work(int i) { 4786 void work(int i) {
5171 HeapRegion* r; 4787 HeapRegion* r;
5172 while (r = _g1h->pop_dirty_cards_region()) { 4788 while (r = _g1h->pop_dirty_cards_region()) {
5173 clear_cards(r); 4789 clear_cards(r);
5174 } 4790 }
5175 // Redirty the cards of the survivor regions.
5176 dirty_list(&this->_su_head);
5177 } 4791 }
5178 4792
5179 void clear_cards(HeapRegion* r) { 4793 void clear_cards(HeapRegion* r) {
5180 // Cards for Survivor regions will be dirtied later. 4794 // Cards of the survivors should have already been dirtied.
5181 if (!r->is_survivor()) { 4795 if (!r->is_survivor()) {
5182 _ct_bs->clear(MemRegion(r->bottom(), r->end())); 4796 _ct_bs->clear(MemRegion(r->bottom(), r->end()));
5183 } 4797 }
5184 } 4798 }
5185
5186 void dirty_list(HeapRegion* volatile * head_ptr) {
5187 HeapRegion* head;
5188 do {
5189 // Pop region off the list.
5190 head = *head_ptr;
5191 if (head != NULL) {
5192 HeapRegion* r = (HeapRegion*)
5193 Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head);
5194 if (r == head) {
5195 assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list");
5196 _ct_bs->dirty(MemRegion(r->bottom(), r->end()));
5197 }
5198 }
5199 } while (*head_ptr != NULL);
5200 }
5201 }; 4799 };
5202
5203 4800
5204 #ifndef PRODUCT 4801 #ifndef PRODUCT
5205 class G1VerifyCardTableCleanup: public HeapRegionClosure { 4802 class G1VerifyCardTableCleanup: public HeapRegionClosure {
5206 G1CollectedHeap* _g1h; 4803 G1CollectedHeap* _g1h;
5207 CardTableModRefBS* _ct_bs; 4804 CardTableModRefBS* _ct_bs;
5254 void G1CollectedHeap::cleanUpCardTable() { 4851 void G1CollectedHeap::cleanUpCardTable() {
5255 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); 4852 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
5256 double start = os::elapsedTime(); 4853 double start = os::elapsedTime();
5257 4854
5258 // Iterate over the dirty cards region list. 4855 // Iterate over the dirty cards region list.
5259 G1ParCleanupCTTask cleanup_task(ct_bs, this, 4856 G1ParCleanupCTTask cleanup_task(ct_bs, this);
5260 _young_list->first_survivor_region());
5261 4857
5262 if (ParallelGCThreads > 0) { 4858 if (ParallelGCThreads > 0) {
5263 set_par_threads(workers()->total_workers()); 4859 set_par_threads(workers()->total_workers());
5264 workers()->run_task(&cleanup_task); 4860 workers()->run_task(&cleanup_task);
5265 set_par_threads(0); 4861 set_par_threads(0);
5272 // The last region. 4868 // The last region.
5273 _dirty_cards_region_list = NULL; 4869 _dirty_cards_region_list = NULL;
5274 } 4870 }
5275 r->set_next_dirty_cards_region(NULL); 4871 r->set_next_dirty_cards_region(NULL);
5276 } 4872 }
5277 // now, redirty the cards of the survivor regions
5278 // (it seemed faster to do it this way, instead of iterating over
5279 // all regions and then clearing / dirtying as appropriate)
5280 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
5281 } 4873 }
5282 4874
5283 double elapsed = os::elapsedTime() - start; 4875 double elapsed = os::elapsedTime() - start;
5284 g1_policy()->record_clear_ct_time( elapsed * 1000.0); 4876 g1_policy()->record_clear_ct_time( elapsed * 1000.0);
5285 #ifndef PRODUCT 4877 #ifndef PRODUCT
5502 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); 5094 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode");
5503 5095
5504 _young_list->empty_list(); 5096 _young_list->empty_list();
5505 } 5097 }
5506 5098
5507 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() {
5508 bool no_allocs = true;
5509 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) {
5510 HeapRegion* r = _gc_alloc_regions[ap];
5511 no_allocs = r == NULL || r->saved_mark_at_top();
5512 }
5513 return no_allocs;
5514 }
5515
5516 void G1CollectedHeap::retire_all_alloc_regions() {
5517 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
5518 HeapRegion* r = _gc_alloc_regions[ap];
5519 if (r != NULL) {
5520 // Check for aliases.
5521 bool has_processed_alias = false;
5522 for (int i = 0; i < ap; ++i) {
5523 if (_gc_alloc_regions[i] == r) {
5524 has_processed_alias = true;
5525 break;
5526 }
5527 }
5528 if (!has_processed_alias) {
5529 retire_alloc_region(r, false /* par */);
5530 }
5531 }
5532 }
5533 }
5534
5535 // Done at the start of full GC. 5099 // Done at the start of full GC.
5536 void G1CollectedHeap::tear_down_region_lists() { 5100 void G1CollectedHeap::tear_down_region_lists() {
5537 _free_list.remove_all(); 5101 _free_list.remove_all();
5538 } 5102 }
5539 5103
5583 return is_in_permanent(p); 5147 return is_in_permanent(p);
5584 } else { 5148 } else {
5585 return hr->is_in(p); 5149 return hr->is_in(p);
5586 } 5150 }
5587 } 5151 }
5152
5153 // Methods for the mutator alloc region
5588 5154
5589 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, 5155 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
5590 bool force) { 5156 bool force) {
5591 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 5157 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5592 assert(!force || g1_policy()->can_expand_young_list(), 5158 assert(!force || g1_policy()->can_expand_young_list(),
5624 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region, 5190 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
5625 size_t allocated_bytes) { 5191 size_t allocated_bytes) {
5626 _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes); 5192 _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
5627 } 5193 }
5628 5194
5195 // Methods for the GC alloc regions
5196
5197 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
5198 size_t count,
5199 GCAllocPurpose ap) {
5200 assert(FreeList_lock->owned_by_self(), "pre-condition");
5201
5202 if (count < g1_policy()->max_regions(ap)) {
5203 HeapRegion* new_alloc_region = new_region(word_size,
5204 true /* do_expand */);
5205 if (new_alloc_region != NULL) {
5206 // We really only need to do this for old regions given that we
5207 // should never scan survivors. But it doesn't hurt to do it
5208 // for survivors too.
5209 new_alloc_region->set_saved_mark();
5210 if (ap == GCAllocForSurvived) {
5211 new_alloc_region->set_survivor();
5212 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
5213 } else {
5214 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
5215 }
5216 return new_alloc_region;
5217 } else {
5218 g1_policy()->note_alloc_region_limit_reached(ap);
5219 }
5220 }
5221 return NULL;
5222 }
5223
5224 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
5225 size_t allocated_bytes,
5226 GCAllocPurpose ap) {
5227 alloc_region->note_end_of_copying();
5228 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
5229 if (ap == GCAllocForSurvived) {
5230 young_list()->add_survivor_region(alloc_region);
5231 }
5232 _hr_printer.retire(alloc_region);
5233 }
5234
5235 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
5236 bool force) {
5237 assert(!force, "not supported for GC alloc regions");
5238 return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
5239 }
5240
5241 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
5242 size_t allocated_bytes) {
5243 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
5244 GCAllocForSurvived);
5245 }
5246
5247 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
5248 bool force) {
5249 assert(!force, "not supported for GC alloc regions");
5250 return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
5251 }
5252
5253 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
5254 size_t allocated_bytes) {
5255 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
5256 GCAllocForTenured);
5257 }
5629 // Heap region set verification 5258 // Heap region set verification
5630 5259
5631 class VerifyRegionListsClosure : public HeapRegionClosure { 5260 class VerifyRegionListsClosure : public HeapRegionClosure {
5632 private: 5261 private:
5633 HumongousRegionSet* _humongous_set; 5262 HumongousRegionSet* _humongous_set;