Mercurial > hg > graal-jvmci-8
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 20305:755930f931e3
8027959: Early reclamation of large objects in G1
Summary: Try to reclaim humongous objects at every young collection after doing a conservative estimate of its liveness.
Reviewed-by: brutisso, mgerdin
author | tschatzl |
---|---|
date | Wed, 23 Jul 2014 09:03:32 +0200 |
parents | a22acf6d7598 |
children | 04d77ac27223 |
comparison
equal
deleted
inserted
replaced
20304:a22acf6d7598 | 20305:755930f931e3 |
---|---|
1912 _full_collection(false), | 1912 _full_collection(false), |
1913 _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()), | 1913 _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()), |
1914 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()), | 1914 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()), |
1915 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()), | 1915 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()), |
1916 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()), | 1916 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()), |
1917 _humongous_is_live(), | |
1918 _has_humongous_reclaim_candidates(false), | |
1917 _free_regions_coming(false), | 1919 _free_regions_coming(false), |
1918 _young_list(new YoungList(this)), | 1920 _young_list(new YoungList(this)), |
1919 _gc_time_stamp(0), | 1921 _gc_time_stamp(0), |
1920 _retained_old_gc_alloc_region(NULL), | 1922 _retained_old_gc_alloc_region(NULL), |
1921 _survivor_plab_stats(YoungPLABSize, PLABWeight), | 1923 _survivor_plab_stats(YoungPLABSize, PLABWeight), |
2068 heap_word_size(init_byte_size)); | 2070 heap_word_size(init_byte_size)); |
2069 | 2071 |
2070 _g1h = this; | 2072 _g1h = this; |
2071 | 2073 |
2072 _in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes); | 2074 _in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes); |
2075 _humongous_is_live.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes); | |
2073 | 2076 |
2074 // Create the ConcurrentMark data structure and thread. | 2077 // Create the ConcurrentMark data structure and thread. |
2075 // (Must do this late, so that "max_regions" is defined.) | 2078 // (Must do this late, so that "max_regions" is defined.) |
2076 _cm = new ConcurrentMark(this, heap_rs); | 2079 _cm = new ConcurrentMark(this, heap_rs); |
2077 if (_cm == NULL || !_cm->completed_initialization()) { | 2080 if (_cm == NULL || !_cm->completed_initialization()) { |
2161 _cg1r->stop(); | 2164 _cg1r->stop(); |
2162 _cmThread->stop(); | 2165 _cmThread->stop(); |
2163 if (G1StringDedup::is_enabled()) { | 2166 if (G1StringDedup::is_enabled()) { |
2164 G1StringDedup::stop(); | 2167 G1StringDedup::stop(); |
2165 } | 2168 } |
2169 } | |
2170 | |
2171 void G1CollectedHeap::clear_humongous_is_live_table() { | |
2172 guarantee(G1ReclaimDeadHumongousObjectsAtYoungGC, "Should only be called if true"); | |
2173 _humongous_is_live.clear(); | |
2166 } | 2174 } |
2167 | 2175 |
2168 size_t G1CollectedHeap::conservative_max_heap_alignment() { | 2176 size_t G1CollectedHeap::conservative_max_heap_alignment() { |
2169 return HeapRegion::max_region_size(); | 2177 return HeapRegion::max_region_size(); |
2170 } | 2178 } |
3781 | 3789 |
3782 size_t G1CollectedHeap::cards_scanned() { | 3790 size_t G1CollectedHeap::cards_scanned() { |
3783 return g1_rem_set()->cardsScanned(); | 3791 return g1_rem_set()->cardsScanned(); |
3784 } | 3792 } |
3785 | 3793 |
3794 bool G1CollectedHeap::humongous_region_is_always_live(uint index) { | |
3795 HeapRegion* region = region_at(index); | |
3796 assert(region->startsHumongous(), "Must start a humongous object"); | |
3797 return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty(); | |
3798 } | |
3799 | |
3800 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure { | |
3801 private: | |
3802 size_t _total_humongous; | |
3803 size_t _candidate_humongous; | |
3804 public: | |
3805 RegisterHumongousWithInCSetFastTestClosure() : _total_humongous(0), _candidate_humongous(0) { | |
3806 } | |
3807 | |
3808 virtual bool doHeapRegion(HeapRegion* r) { | |
3809 if (!r->startsHumongous()) { | |
3810 return false; | |
3811 } | |
3812 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
3813 | |
3814 uint region_idx = r->hrs_index(); | |
3815 bool is_candidate = !g1h->humongous_region_is_always_live(region_idx); | |
3816 // Is_candidate already filters out humongous regions with some remembered set. | |
3817 // This will not lead to humongous object that we mistakenly keep alive because | |
3818 // during young collection the remembered sets will only be added to. | |
3819 if (is_candidate) { | |
3820 g1h->register_humongous_region_with_in_cset_fast_test(region_idx); | |
3821 _candidate_humongous++; | |
3822 } | |
3823 _total_humongous++; | |
3824 | |
3825 return false; | |
3826 } | |
3827 | |
3828 size_t total_humongous() const { return _total_humongous; } | |
3829 size_t candidate_humongous() const { return _candidate_humongous; } | |
3830 }; | |
3831 | |
3832 void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() { | |
3833 if (!G1ReclaimDeadHumongousObjectsAtYoungGC) { | |
3834 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0, 0); | |
3835 return; | |
3836 } | |
3837 | |
3838 RegisterHumongousWithInCSetFastTestClosure cl; | |
3839 heap_region_iterate(&cl); | |
3840 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(cl.total_humongous(), | |
3841 cl.candidate_humongous()); | |
3842 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0; | |
3843 | |
3844 if (_has_humongous_reclaim_candidates) { | |
3845 clear_humongous_is_live_table(); | |
3846 } | |
3847 } | |
3848 | |
3786 void | 3849 void |
3787 G1CollectedHeap::setup_surviving_young_words() { | 3850 G1CollectedHeap::setup_surviving_young_words() { |
3788 assert(_surviving_young_words == NULL, "pre-condition"); | 3851 assert(_surviving_young_words == NULL, "pre-condition"); |
3789 uint array_length = g1_policy()->young_cset_region_length(); | 3852 uint array_length = g1_policy()->young_cset_region_length(); |
3790 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC); | 3853 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC); |
4065 _young_list->print(); | 4128 _young_list->print(); |
4066 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); | 4129 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
4067 #endif // YOUNG_LIST_VERBOSE | 4130 #endif // YOUNG_LIST_VERBOSE |
4068 | 4131 |
4069 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info); | 4132 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info); |
4133 | |
4134 register_humongous_regions_with_in_cset_fast_test(); | |
4070 | 4135 |
4071 _cm->note_start_of_gc(); | 4136 _cm->note_start_of_gc(); |
4072 // We should not verify the per-thread SATB buffers given that | 4137 // We should not verify the per-thread SATB buffers given that |
4073 // we have not filtered them yet (we'll do so during the | 4138 // we have not filtered them yet (we'll do so during the |
4074 // GC). We also call this after finalize_cset() to | 4139 // GC). We also call this after finalize_cset() to |
4116 false /* verify_enqueued_buffers */, | 4181 false /* verify_enqueued_buffers */, |
4117 true /* verify_thread_buffers */, | 4182 true /* verify_thread_buffers */, |
4118 true /* verify_fingers */); | 4183 true /* verify_fingers */); |
4119 | 4184 |
4120 free_collection_set(g1_policy()->collection_set(), evacuation_info); | 4185 free_collection_set(g1_policy()->collection_set(), evacuation_info); |
4186 | |
4187 eagerly_reclaim_humongous_regions(); | |
4188 | |
4121 g1_policy()->clear_collection_set(); | 4189 g1_policy()->clear_collection_set(); |
4122 | 4190 |
4123 cleanup_surviving_young_words(); | 4191 cleanup_surviving_young_words(); |
4124 | 4192 |
4125 // Start a new incremental collection set for the next pause. | 4193 // Start a new incremental collection set for the next pause. |
4626 | 4694 |
4627 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); | 4695 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
4628 | 4696 |
4629 assert(_worker_id == _par_scan_state->queue_num(), "sanity"); | 4697 assert(_worker_id == _par_scan_state->queue_num(), "sanity"); |
4630 | 4698 |
4631 if (_g1->in_cset_fast_test(obj)) { | 4699 G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj); |
4700 | |
4701 if (state == G1CollectedHeap::InCSet) { | |
4632 oop forwardee; | 4702 oop forwardee; |
4633 if (obj->is_forwarded()) { | 4703 if (obj->is_forwarded()) { |
4634 forwardee = obj->forwardee(); | 4704 forwardee = obj->forwardee(); |
4635 } else { | 4705 } else { |
4636 forwardee = _par_scan_state->copy_to_survivor_space(obj); | 4706 forwardee = _par_scan_state->copy_to_survivor_space(obj); |
4645 | 4715 |
4646 if (barrier == G1BarrierKlass) { | 4716 if (barrier == G1BarrierKlass) { |
4647 do_klass_barrier(p, forwardee); | 4717 do_klass_barrier(p, forwardee); |
4648 } | 4718 } |
4649 } else { | 4719 } else { |
4720 if (state == G1CollectedHeap::IsHumongous) { | |
4721 _g1->set_humongous_is_live(obj); | |
4722 } | |
4650 // The object is not in collection set. If we're a root scanning | 4723 // The object is not in collection set. If we're a root scanning |
4651 // closure during an initial mark pause then attempt to mark the object. | 4724 // closure during an initial mark pause then attempt to mark the object. |
4652 if (do_mark_object == G1MarkFromRoot) { | 4725 if (do_mark_object == G1MarkFromRoot) { |
4653 mark_object(obj); | 4726 mark_object(obj); |
4654 } | 4727 } |
5474 class G1KeepAliveClosure: public OopClosure { | 5547 class G1KeepAliveClosure: public OopClosure { |
5475 G1CollectedHeap* _g1; | 5548 G1CollectedHeap* _g1; |
5476 public: | 5549 public: |
5477 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | 5550 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} |
5478 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } | 5551 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
5479 void do_oop( oop* p) { | 5552 void do_oop(oop* p) { |
5480 oop obj = *p; | 5553 oop obj = *p; |
5481 | 5554 |
5482 if (_g1->obj_in_cs(obj)) { | 5555 G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj); |
5556 if (obj == NULL || cset_state == G1CollectedHeap::InNeither) { | |
5557 return; | |
5558 } | |
5559 if (cset_state == G1CollectedHeap::InCSet) { | |
5483 assert( obj->is_forwarded(), "invariant" ); | 5560 assert( obj->is_forwarded(), "invariant" ); |
5484 *p = obj->forwardee(); | 5561 *p = obj->forwardee(); |
5562 } else { | |
5563 assert(!obj->is_forwarded(), "invariant" ); | |
5564 assert(cset_state == G1CollectedHeap::IsHumongous, | |
5565 err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state)); | |
5566 _g1->set_humongous_is_live(obj); | |
5485 } | 5567 } |
5486 } | 5568 } |
5487 }; | 5569 }; |
5488 | 5570 |
5489 // Copying Keep Alive closure - can be called from both | 5571 // Copying Keep Alive closure - can be called from both |
5509 virtual void do_oop( oop* p) { do_oop_work(p); } | 5591 virtual void do_oop( oop* p) { do_oop_work(p); } |
5510 | 5592 |
5511 template <class T> void do_oop_work(T* p) { | 5593 template <class T> void do_oop_work(T* p) { |
5512 oop obj = oopDesc::load_decode_heap_oop(p); | 5594 oop obj = oopDesc::load_decode_heap_oop(p); |
5513 | 5595 |
5514 if (_g1h->obj_in_cs(obj)) { | 5596 if (_g1h->is_in_cset_or_humongous(obj)) { |
5515 // If the referent object has been forwarded (either copied | 5597 // If the referent object has been forwarded (either copied |
5516 // to a new location or to itself in the event of an | 5598 // to a new location or to itself in the event of an |
5517 // evacuation failure) then we need to update the reference | 5599 // evacuation failure) then we need to update the reference |
5518 // field and, if both reference and referent are in the G1 | 5600 // field and, if both reference and referent are in the G1 |
5519 // heap, update the RSet for the referent. | 5601 // heap, update the RSet for the referent. |
5534 _par_scan_state->push_on_queue(p); | 5616 _par_scan_state->push_on_queue(p); |
5535 } else { | 5617 } else { |
5536 assert(!Metaspace::contains((const void*)p), | 5618 assert(!Metaspace::contains((const void*)p), |
5537 err_msg("Unexpectedly found a pointer from metadata: " | 5619 err_msg("Unexpectedly found a pointer from metadata: " |
5538 PTR_FORMAT, p)); | 5620 PTR_FORMAT, p)); |
5539 _copy_non_heap_obj_cl->do_oop(p); | 5621 _copy_non_heap_obj_cl->do_oop(p); |
5540 } | |
5541 } | 5622 } |
5542 } | 5623 } |
5624 } | |
5543 }; | 5625 }; |
5544 | 5626 |
5545 // Serial drain queue closure. Called as the 'complete_gc' | 5627 // Serial drain queue closure. Called as the 'complete_gc' |
5546 // closure for each discovered list in some of the | 5628 // closure for each discovered list in some of the |
5547 // reference processing phases. | 5629 // reference processing phases. |
6372 | 6454 |
6373 prepend_to_freelist(&local_free_list); | 6455 prepend_to_freelist(&local_free_list); |
6374 decrement_summary_bytes(pre_used); | 6456 decrement_summary_bytes(pre_used); |
6375 policy->phase_times()->record_young_free_cset_time_ms(young_time_ms); | 6457 policy->phase_times()->record_young_free_cset_time_ms(young_time_ms); |
6376 policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms); | 6458 policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms); |
6459 } | |
6460 | |
6461 class G1FreeHumongousRegionClosure : public HeapRegionClosure { | |
6462 private: | |
6463 FreeRegionList* _free_region_list; | |
6464 HeapRegionSet* _proxy_set; | |
6465 HeapRegionSetCount _humongous_regions_removed; | |
6466 size_t _freed_bytes; | |
6467 public: | |
6468 | |
6469 G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) : | |
6470 _free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) { | |
6471 } | |
6472 | |
6473 virtual bool doHeapRegion(HeapRegion* r) { | |
6474 if (!r->startsHumongous()) { | |
6475 return false; | |
6476 } | |
6477 | |
6478 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
6479 | |
6480 // The following checks whether the humongous object is live are sufficient. | |
6481 // The main additional check (in addition to having a reference from the roots | |
6482 // or the young gen) is whether the humongous object has a remembered set entry. | |
6483 // | |
6484 // A humongous object cannot be live if there is no remembered set for it | |
6485 // because: | |
6486 // - there can be no references from within humongous starts regions referencing | |
6487 // the object because we never allocate other objects into them. | |
6488 // (I.e. there are no intra-region references that may be missed by the | |
6489 // remembered set) | |
6490 // - as soon there is a remembered set entry to the humongous starts region | |
6491 // (i.e. it has "escaped" to an old object) this remembered set entry will stay | |
6492 // until the end of a concurrent mark. | |
6493 // | |
6494 // It is not required to check whether the object has been found dead by marking | |
6495 // or not, in fact it would prevent reclamation within a concurrent cycle, as | |
6496 // all objects allocated during that time are considered live. | |
6497 // SATB marking is even more conservative than the remembered set. | |
6498 // So if at this point in the collection there is no remembered set entry, | |
6499 // nobody has a reference to it. | |
6500 // At the start of collection we flush all refinement logs, and remembered sets | |
6501 // are completely up-to-date wrt to references to the humongous object. | |
6502 // | |
6503 // Other implementation considerations: | |
6504 // - never consider object arrays: while they are a valid target, they have not | |
6505 // been observed to be used as temporary objects. | |
6506 // - they would also pose considerable effort for cleaning up the the remembered | |
6507 // sets. | |
6508 // While this cleanup is not strictly necessary to be done (or done instantly), | |
6509 // given that their occurrence is very low, this saves us this additional | |
6510 // complexity. | |
6511 uint region_idx = r->hrs_index(); | |
6512 if (g1h->humongous_is_live(region_idx) || | |
6513 g1h->humongous_region_is_always_live(region_idx)) { | |
6514 | |
6515 if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) { | |
6516 gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is dead-bitmap %d live-other %d obj array %d", | |
6517 r->isHumongous(), | |
6518 region_idx, | |
6519 r->rem_set()->occupied(), | |
6520 r->rem_set()->strong_code_roots_list_length(), | |
6521 g1h->mark_in_progress() && !g1h->g1_policy()->during_initial_mark_pause(), | |
6522 g1h->humongous_is_live(region_idx), | |
6523 oop(r->bottom())->is_objArray() | |
6524 ); | |
6525 } | |
6526 | |
6527 return false; | |
6528 } | |
6529 | |
6530 guarantee(!((oop)(r->bottom()))->is_objArray(), | |
6531 err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.", | |
6532 r->bottom())); | |
6533 | |
6534 if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) { | |
6535 gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is dead-bitmap %d live-other %d obj array %d", | |
6536 r->isHumongous(), | |
6537 r->bottom(), | |
6538 region_idx, | |
6539 r->region_num(), | |
6540 r->rem_set()->occupied(), | |
6541 r->rem_set()->strong_code_roots_list_length(), | |
6542 g1h->mark_in_progress() && !g1h->g1_policy()->during_initial_mark_pause(), | |
6543 g1h->humongous_is_live(region_idx), | |
6544 oop(r->bottom())->is_objArray() | |
6545 ); | |
6546 } | |
6547 _freed_bytes += r->used(); | |
6548 r->set_containing_set(NULL); | |
6549 _humongous_regions_removed.increment(1u, r->capacity()); | |
6550 g1h->free_humongous_region(r, _free_region_list, false); | |
6551 | |
6552 return false; | |
6553 } | |
6554 | |
6555 HeapRegionSetCount& humongous_free_count() { | |
6556 return _humongous_regions_removed; | |
6557 } | |
6558 | |
6559 size_t bytes_freed() const { | |
6560 return _freed_bytes; | |
6561 } | |
6562 | |
6563 size_t humongous_reclaimed() const { | |
6564 return _humongous_regions_removed.length(); | |
6565 } | |
6566 }; | |
6567 | |
6568 void G1CollectedHeap::eagerly_reclaim_humongous_regions() { | |
6569 assert_at_safepoint(true); | |
6570 | |
6571 if (!G1ReclaimDeadHumongousObjectsAtYoungGC || !_has_humongous_reclaim_candidates) { | |
6572 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0); | |
6573 return; | |
6574 } | |
6575 | |
6576 double start_time = os::elapsedTime(); | |
6577 | |
6578 FreeRegionList local_cleanup_list("Local Humongous Cleanup List"); | |
6579 | |
6580 G1FreeHumongousRegionClosure cl(&local_cleanup_list); | |
6581 heap_region_iterate(&cl); | |
6582 | |
6583 HeapRegionSetCount empty_set; | |
6584 remove_from_old_sets(empty_set, cl.humongous_free_count()); | |
6585 | |
6586 G1HRPrinter* hr_printer = _g1h->hr_printer(); | |
6587 if (hr_printer->is_active()) { | |
6588 FreeRegionListIterator iter(&local_cleanup_list); | |
6589 while (iter.more_available()) { | |
6590 HeapRegion* hr = iter.get_next(); | |
6591 hr_printer->cleanup(hr); | |
6592 } | |
6593 } | |
6594 | |
6595 prepend_to_freelist(&local_cleanup_list); | |
6596 decrement_summary_bytes(cl.bytes_freed()); | |
6597 | |
6598 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0, | |
6599 cl.humongous_reclaimed()); | |
6377 } | 6600 } |
6378 | 6601 |
6379 // This routine is similar to the above but does not record | 6602 // This routine is similar to the above but does not record |
6380 // any policy statistics or update free lists; we are abandoning | 6603 // any policy statistics or update free lists; we are abandoning |
6381 // the current incremental collection set in preparation of a | 6604 // the current incremental collection set in preparation of a |