Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 637:25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
Summary: Enabled full verification for humongous regions. Also made sure that the VerifyAfterGC works with deferred updates and G1HRRSFlushLogBuffersOnVerify.
Reviewed-by: tonyp
author | iveresov |
---|---|
date | Mon, 16 Mar 2009 08:01:32 -0700 |
parents | 6c4cea9bfa11 |
children | ba50942c8138 |
comparison
equal
deleted
inserted
replaced
636:6c4cea9bfa11 | 637:25e146966e7c |
---|---|
959 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | 959 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
960 | 960 |
961 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { | 961 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
962 HandleMark hm; // Discard invalid handles created during verification | 962 HandleMark hm; // Discard invalid handles created during verification |
963 gclog_or_tty->print(" VerifyAfterGC:"); | 963 gclog_or_tty->print(" VerifyAfterGC:"); |
964 prepare_for_verify(); | |
964 Universe::verify(false); | 965 Universe::verify(false); |
965 } | 966 } |
966 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | 967 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); |
967 | 968 |
968 reset_gc_time_stamp(); | 969 reset_gc_time_stamp(); |
2133 VerifyRegionClosure(bool allow_dirty, bool par = false) | 2134 VerifyRegionClosure(bool allow_dirty, bool par = false) |
2134 : _allow_dirty(allow_dirty), _par(par) {} | 2135 : _allow_dirty(allow_dirty), _par(par) {} |
2135 bool doHeapRegion(HeapRegion* r) { | 2136 bool doHeapRegion(HeapRegion* r) { |
2136 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, | 2137 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2137 "Should be unclaimed at verify points."); | 2138 "Should be unclaimed at verify points."); |
2138 if (r->isHumongous()) { | 2139 if (!r->continuesHumongous()) { |
2139 if (r->startsHumongous()) { | |
2140 // Verify the single H object. | |
2141 oop(r->bottom())->verify(); | |
2142 size_t word_sz = oop(r->bottom())->size(); | |
2143 guarantee(r->top() == r->bottom() + word_sz, | |
2144 "Only one object in a humongous region"); | |
2145 } | |
2146 } else { | |
2147 VerifyObjsInRegionClosure not_dead_yet_cl(r); | 2140 VerifyObjsInRegionClosure not_dead_yet_cl(r); |
2148 r->verify(_allow_dirty); | 2141 r->verify(_allow_dirty); |
2149 r->object_iterate(¬_dead_yet_cl); | 2142 r->object_iterate(¬_dead_yet_cl); |
2150 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(), | 2143 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(), |
2151 "More live objects than counted in last complete marking."); | 2144 "More live objects than counted in last complete marking."); |
2193 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) : | 2186 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) : |
2194 AbstractGangTask("Parallel verify task"), | 2187 AbstractGangTask("Parallel verify task"), |
2195 _g1h(g1h), _allow_dirty(allow_dirty) { } | 2188 _g1h(g1h), _allow_dirty(allow_dirty) { } |
2196 | 2189 |
2197 void work(int worker_i) { | 2190 void work(int worker_i) { |
2191 HandleMark hm; | |
2198 VerifyRegionClosure blk(_allow_dirty, true); | 2192 VerifyRegionClosure blk(_allow_dirty, true); |
2199 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, | 2193 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
2200 HeapRegion::ParVerifyClaimValue); | 2194 HeapRegion::ParVerifyClaimValue); |
2201 } | 2195 } |
2202 }; | 2196 }; |
2711 assert(regions_accounted_for(), "Region leakage."); | 2705 assert(regions_accounted_for(), "Region leakage."); |
2712 | 2706 |
2713 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { | 2707 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
2714 HandleMark hm; // Discard invalid handles created during verification | 2708 HandleMark hm; // Discard invalid handles created during verification |
2715 gclog_or_tty->print(" VerifyAfterGC:"); | 2709 gclog_or_tty->print(" VerifyAfterGC:"); |
2710 prepare_for_verify(); | |
2716 Universe::verify(false); | 2711 Universe::verify(false); |
2717 } | 2712 } |
2718 | 2713 |
2719 if (was_enabled) ref_processor()->enable_discovery(); | 2714 if (was_enabled) ref_processor()->enable_discovery(); |
2720 | 2715 |
2842 void G1CollectedHeap::forget_alloc_region_list() { | 2837 void G1CollectedHeap::forget_alloc_region_list() { |
2843 assert(Thread::current()->is_VM_thread(), "Precondition"); | 2838 assert(Thread::current()->is_VM_thread(), "Precondition"); |
2844 while (_gc_alloc_region_list != NULL) { | 2839 while (_gc_alloc_region_list != NULL) { |
2845 HeapRegion* r = _gc_alloc_region_list; | 2840 HeapRegion* r = _gc_alloc_region_list; |
2846 assert(r->is_gc_alloc_region(), "Invariant."); | 2841 assert(r->is_gc_alloc_region(), "Invariant."); |
2842 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on | |
2843 // newly allocated data in order to be able to apply deferred updates | |
2844 // before the GC is done for verification purposes (i.e to allow | |
2845 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the | |
2846 // collection. | |
2847 r->ContiguousSpace::set_saved_mark(); | |
2847 _gc_alloc_region_list = r->next_gc_alloc_region(); | 2848 _gc_alloc_region_list = r->next_gc_alloc_region(); |
2848 r->set_next_gc_alloc_region(NULL); | 2849 r->set_next_gc_alloc_region(NULL); |
2849 r->set_is_gc_alloc_region(false); | 2850 r->set_is_gc_alloc_region(false); |
2850 if (r->is_survivor()) { | 2851 if (r->is_survivor()) { |
2851 if (r->is_empty()) { | 2852 if (r->is_empty()) { |
3736 | 3737 |
3737 DirtyCardQueue& dirty_card_queue() { return _dcq; } | 3738 DirtyCardQueue& dirty_card_queue() { return _dcq; } |
3738 CardTableModRefBS* ctbs() { return _ct_bs; } | 3739 CardTableModRefBS* ctbs() { return _ct_bs; } |
3739 | 3740 |
3740 void immediate_rs_update(HeapRegion* from, oop* p, int tid) { | 3741 void immediate_rs_update(HeapRegion* from, oop* p, int tid) { |
3741 _g1_rem->par_write_ref(from, p, tid); | 3742 if (!from->is_survivor()) { |
3743 _g1_rem->par_write_ref(from, p, tid); | |
3744 } | |
3742 } | 3745 } |
3743 | 3746 |
3744 void deferred_rs_update(HeapRegion* from, oop* p, int tid) { | 3747 void deferred_rs_update(HeapRegion* from, oop* p, int tid) { |
3745 // If the new value of the field points to the same region or | 3748 // If the new value of the field points to the same region or |
3746 // is the to-space, we don't need to include it in the Rset updates. | 3749 // is the to-space, we don't need to include it in the Rset updates. |