# HG changeset patch # User jmasa # Date 1251135391 25200 # Node ID 05f89f00a8645a7bd970a25c83bda4544fc81a5c # Parent 308762b2bf14063c45ee5a0f44087404840f9016 6798898: CMS: bugs related to class unloading Summary: Override should_remember_klasses() and remember_klass() as needed. Reviewed-by: ysr, jcoomes diff -r 308762b2bf14 -r 05f89f00a864 src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp Fri Aug 14 13:44:15 2009 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp Mon Aug 24 10:36:31 2009 -0700 @@ -92,17 +92,50 @@ } }; +// KlassRememberingOopClosure is used when marking of the permanent generation +// is being done. It adds fields to support revisiting of klasses +// for class unloading. _should_remember_klasses should be set to +// indicate if klasses should be remembered. Currently that is whenever +// CMS class unloading is turned on. The _revisit_stack is used +// to save the klasses for later processing. +class KlassRememberingOopClosure : public OopClosure { + protected: + CMSCollector* _collector; + CMSMarkStack* _revisit_stack; + bool const _should_remember_klasses; + public: + void check_remember_klasses() const PRODUCT_RETURN; + virtual const bool should_remember_klasses() const { + check_remember_klasses(); + return _should_remember_klasses; + } + virtual void remember_klass(Klass* k); + + KlassRememberingOopClosure(CMSCollector* collector, + ReferenceProcessor* rp, + CMSMarkStack* revisit_stack); +}; + +// Similar to KlassRememberingOopClosure for use when multiple +// GC threads will execute the closure. + +class Par_KlassRememberingOopClosure : public KlassRememberingOopClosure { + public: + Par_KlassRememberingOopClosure(CMSCollector* collector, + ReferenceProcessor* rp, + CMSMarkStack* revisit_stack): + KlassRememberingOopClosure(collector, rp, revisit_stack) {} + virtual void remember_klass(Klass* k); +}; + // The non-parallel version (the parallel version appears further below). -class PushAndMarkClosure: public OopClosure { +class PushAndMarkClosure: public KlassRememberingOopClosure { private: - CMSCollector* _collector; MemRegion _span; CMSBitMap* _bit_map; CMSBitMap* _mod_union_table; CMSMarkStack* _mark_stack; - CMSMarkStack* _revisit_stack; bool _concurrent_precleaning; - bool const _should_remember_klasses; protected: DO_OOP_WORK_DEFN public: @@ -122,10 +155,6 @@ Prefetch::style prefetch_style() { return Prefetch::do_read; } - virtual const bool should_remember_klasses() const { - return _should_remember_klasses; - } - virtual void remember_klass(Klass* k); }; // In the parallel case, the revisit stack, the bit map and the @@ -134,14 +163,11 @@ // synchronization (for instance, via CAS). The marking stack // used in the non-parallel case above is here replaced with // an OopTaskQueue structure to allow efficient work stealing. -class Par_PushAndMarkClosure: public OopClosure { +class Par_PushAndMarkClosure: public Par_KlassRememberingOopClosure { private: - CMSCollector* _collector; MemRegion _span; CMSBitMap* _bit_map; OopTaskQueue* _work_queue; - CMSMarkStack* _revisit_stack; - bool const _should_remember_klasses; protected: DO_OOP_WORK_DEFN public: @@ -159,10 +185,6 @@ Prefetch::style prefetch_style() { return Prefetch::do_read; } - virtual const bool should_remember_klasses() const { - return _should_remember_klasses; - } - virtual void remember_klass(Klass* k); }; // The non-parallel version (the parallel version appears further below). @@ -201,6 +223,12 @@ void set_freelistLock(Mutex* m) { _freelistLock = m; } + virtual const bool should_remember_klasses() const { + return _pushAndMarkClosure.should_remember_klasses(); + } + virtual void remember_klass(Klass* k) { + _pushAndMarkClosure.remember_klass(k); + } private: inline void do_yield_check(); @@ -234,6 +262,16 @@ inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } bool do_header() { return true; } virtual const bool do_nmethods() const { return true; } + // When ScanMarkedObjectsAgainClosure is used, + // it passes [Par_]MarkRefsIntoAndScanClosure to oop_oop_iterate(), + // and this delegation is used. + virtual const bool should_remember_klasses() const { + return _par_pushAndMarkClosure.should_remember_klasses(); + } + // See comment on should_remember_klasses() above. + virtual void remember_klass(Klass* k) { + _par_pushAndMarkClosure.remember_klass(k); + } Prefetch::style prefetch_style() { return Prefetch::do_read; } @@ -243,17 +281,14 @@ // This closure is used during the concurrent marking phase // following the first checkpoint. Its use is buried in // the closure MarkFromRootsClosure. -class PushOrMarkClosure: public OopClosure { +class PushOrMarkClosure: public KlassRememberingOopClosure { private: - CMSCollector* _collector; MemRegion _span; CMSBitMap* _bitMap; CMSMarkStack* _markStack; - CMSMarkStack* _revisitStack; HeapWord* const _finger; MarkFromRootsClosure* const _parent; - bool const _should_remember_klasses; protected: DO_OOP_WORK_DEFN public: @@ -268,10 +303,6 @@ virtual void do_oop(narrowOop* p); inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); } inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } - virtual const bool should_remember_klasses() const { - return _should_remember_klasses; - } - virtual void remember_klass(Klass* k); // Deal with a stack overflow condition void handle_stack_overflow(HeapWord* lost); private: @@ -282,20 +313,17 @@ // This closure is used during the concurrent marking phase // following the first checkpoint. Its use is buried in // the closure Par_MarkFromRootsClosure. -class Par_PushOrMarkClosure: public OopClosure { +class Par_PushOrMarkClosure: public Par_KlassRememberingOopClosure { private: - CMSCollector* _collector; MemRegion _whole_span; MemRegion _span; // local chunk CMSBitMap* _bit_map; OopTaskQueue* _work_queue; CMSMarkStack* _overflow_stack; - CMSMarkStack* _revisit_stack; HeapWord* const _finger; HeapWord** const _global_finger_addr; Par_MarkFromRootsClosure* const _parent; - bool const _should_remember_klasses; protected: DO_OOP_WORK_DEFN public: @@ -312,10 +340,6 @@ virtual void do_oop(narrowOop* p); inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } - virtual const bool should_remember_klasses() const { - return _should_remember_klasses; - } - virtual void remember_klass(Klass* k); // Deal with a stack overflow condition void handle_stack_overflow(HeapWord* lost); private: @@ -328,9 +352,8 @@ // processing phase of the CMS final checkpoint step, as // well as during the concurrent precleaning of the discovered // reference lists. -class CMSKeepAliveClosure: public OopClosure { +class CMSKeepAliveClosure: public KlassRememberingOopClosure { private: - CMSCollector* _collector; const MemRegion _span; CMSMarkStack* _mark_stack; CMSBitMap* _bit_map; @@ -340,14 +363,7 @@ public: CMSKeepAliveClosure(CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, CMSMarkStack* mark_stack, - bool cpc): - _collector(collector), - _span(span), - _bit_map(bit_map), - _mark_stack(mark_stack), - _concurrent_precleaning(cpc) { - assert(!_span.is_empty(), "Empty span could spell trouble"); - } + CMSMarkStack* revisit_stack, bool cpc); bool concurrent_precleaning() const { return _concurrent_precleaning; } virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); @@ -355,9 +371,8 @@ inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } }; -class CMSInnerParMarkAndPushClosure: public OopClosure { +class CMSInnerParMarkAndPushClosure: public Par_KlassRememberingOopClosure { private: - CMSCollector* _collector; MemRegion _span; OopTaskQueue* _work_queue; CMSBitMap* _bit_map; @@ -366,11 +381,8 @@ public: CMSInnerParMarkAndPushClosure(CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, - OopTaskQueue* work_queue): - _collector(collector), - _span(span), - _bit_map(bit_map), - _work_queue(work_queue) { } + CMSMarkStack* revisit_stack, + OopTaskQueue* work_queue); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } @@ -380,9 +392,8 @@ // A parallel (MT) version of the above, used when // reference processing is parallel; the only difference // is in the do_oop method. -class CMSParKeepAliveClosure: public OopClosure { +class CMSParKeepAliveClosure: public Par_KlassRememberingOopClosure { private: - CMSCollector* _collector; MemRegion _span; OopTaskQueue* _work_queue; CMSBitMap* _bit_map; @@ -394,7 +405,8 @@ DO_OOP_WORK_DEFN public: CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span, - CMSBitMap* bit_map, OopTaskQueue* work_queue); + CMSBitMap* bit_map, CMSMarkStack* revisit_stack, + OopTaskQueue* work_queue); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); } diff -r 308762b2bf14 -r 05f89f00a864 src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp Fri Aug 14 13:44:15 2009 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp Mon Aug 24 10:36:31 2009 -0700 @@ -37,16 +37,25 @@ } } -inline void PushOrMarkClosure::remember_klass(Klass* k) { - if (!_revisitStack->push(oop(k))) { +#ifndef PRODUCT +void KlassRememberingOopClosure::check_remember_klasses() const { + assert(_should_remember_klasses == must_remember_klasses(), + "Should remember klasses in this context."); +} +#endif + +void KlassRememberingOopClosure::remember_klass(Klass* k) { + if (!_revisit_stack->push(oop(k))) { fatal("Revisit stack overflow in PushOrMarkClosure"); } + check_remember_klasses(); } -inline void Par_PushOrMarkClosure::remember_klass(Klass* k) { +void Par_KlassRememberingOopClosure::remember_klass(Klass* k) { if (!_revisit_stack->par_push(oop(k))) { fatal("Revisit stack overflow in PushOrMarkClosure"); } + check_remember_klasses(); } inline void PushOrMarkClosure::do_yield_check() { diff -r 308762b2bf14 -r 05f89f00a864 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Aug 14 13:44:15 2009 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Mon Aug 24 10:36:31 2009 -0700 @@ -2276,7 +2276,7 @@ VM_CMS_Final_Remark final_remark_op(this); VMThread::execute(&final_remark_op); - } + } assert(_foregroundGCShouldWait, "block post-condition"); break; case Sweeping: @@ -3499,6 +3499,7 @@ ref_processor()->set_enqueuing_is_done(false); { + // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);) COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. gch->gen_process_strong_roots(_cmsGen->level(), @@ -3623,6 +3624,8 @@ verify_overflow_empty(); assert(_revisitStack.isEmpty(), "tabula rasa"); + DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);) + bool result = false; if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) { result = do_marking_mt(asynch); @@ -3958,24 +3961,24 @@ pst->all_tasks_completed(); } -class Par_ConcMarkingClosure: public OopClosure { +class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure { private: - CMSCollector* _collector; MemRegion _span; CMSBitMap* _bit_map; CMSMarkStack* _overflow_stack; - CMSMarkStack* _revisit_stack; // XXXXXX Check proper use OopTaskQueue* _work_queue; protected: DO_OOP_WORK_DEFN public: Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue, - CMSBitMap* bit_map, CMSMarkStack* overflow_stack): - _collector(collector), + CMSBitMap* bit_map, CMSMarkStack* overflow_stack, + CMSMarkStack* revisit_stack): + Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), _span(_collector->_span), _work_queue(work_queue), _bit_map(bit_map), - _overflow_stack(overflow_stack) { } // need to initialize revisit stack etc. + _overflow_stack(overflow_stack) + { } virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); void trim_queue(size_t max); @@ -4063,8 +4066,9 @@ oop obj_to_scan; CMSBitMap* bm = &(_collector->_markBitMap); CMSMarkStack* ovflw = &(_collector->_markStack); + CMSMarkStack* revisit = &(_collector->_revisitStack); int* seed = _collector->hash_seed(i); - Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw); + Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw, revisit); while (true) { cl.trim_queue(0); assert(work_q->size() == 0, "Should have been emptied above"); @@ -4089,6 +4093,7 @@ assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "CMS thread should hold CMS token"); + DEBUG_ONLY(RememberKlassesChecker mux(false);) // First give up the locks, then yield, then re-lock // We should probably use a constructor/destructor idiom to // do this unlock/lock or modify the MutexUnlocker class to @@ -4165,6 +4170,8 @@ // multi-threaded marking phase. ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1); + DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);) + conc_workers()->start_task(&tsk); while (tsk.yielded()) { tsk.coordinator_yield(); @@ -4404,7 +4411,8 @@ CMSPrecleanRefsYieldClosure yield_cl(this); assert(rp->span().equals(_span), "Spans should be equal"); CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, - &_markStack, true /* preclean */); + &_markStack, &_revisitStack, + true /* preclean */); CMSDrainMarkingStackClosure complete_trace(this, _span, &_markBitMap, &_markStack, &keep_alive, true /* preclean */); @@ -4424,6 +4432,7 @@ bitMapLock()); startTimer(); sample_eden(); + // The following will yield to allow foreground // collection to proceed promptly. XXX YSR: // The code in this method may need further @@ -4453,6 +4462,7 @@ SurvivorSpacePrecleanClosure sss_cl(this, _span, &_markBitMap, &_markStack, &pam_cl, before_count, CMSYield); + DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);) dng->from()->object_iterate_careful(&sss_cl); dng->to()->object_iterate_careful(&sss_cl); } @@ -4554,6 +4564,13 @@ verify_work_stacks_empty(); verify_overflow_empty(); + // Turn off checking for this method but turn it back on + // selectively. There are yield points in this method + // but it is difficult to turn the checking off just around + // the yield points. It is simpler to selectively turn + // it on. + DEBUG_ONLY(RememberKlassesChecker mux(false);) + // strategy: starting with the first card, accumulate contiguous // ranges of dirty cards; clear these cards, then scan the region // covered by these cards. @@ -4582,6 +4599,7 @@ MemRegion dirtyRegion; { stopTimer(); + // Potential yield point CMSTokenSync ts(true); startTimer(); sample_eden(); @@ -4607,6 +4625,7 @@ assert(numDirtyCards > 0, "consistency check"); HeapWord* stop_point = NULL; stopTimer(); + // Potential yield point CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock()); startTimer(); @@ -4614,6 +4633,7 @@ verify_work_stacks_empty(); verify_overflow_empty(); sample_eden(); + DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);) stop_point = gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); } @@ -4701,6 +4721,7 @@ sample_eden(); verify_work_stacks_empty(); verify_overflow_empty(); + DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);) HeapWord* stop_point = gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); if (stop_point != NULL) { @@ -4800,6 +4821,7 @@ assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); + DEBUG_ONLY(RememberKlassesChecker fmx(CMSClassUnloadingEnabled);) if (!init_mark_was_synchronous) { // We might assume that we need not fill TLAB's when // CMSScavengeBeforeRemark is set, because we may have just done @@ -4903,6 +4925,9 @@ _markStack._hit_limit = 0; _markStack._failed_double = 0; + // Check that all the klasses have been checked + assert(_revisitStack.isEmpty(), "Not all klasses revisited"); + if ((VerifyAfterGC || VerifyDuringGC) && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { verify_after_remark(); @@ -5574,9 +5599,13 @@ void CMSRefProcTaskProxy::work(int i) { assert(_collector->_span.equals(_span), "Inconsistency in _span"); CMSParKeepAliveClosure par_keep_alive(_collector, _span, - _mark_bit_map, work_queue(i)); + _mark_bit_map, + &_collector->_revisitStack, + work_queue(i)); CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span, - _mark_bit_map, work_queue(i)); + _mark_bit_map, + &_collector->_revisitStack, + work_queue(i)); CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map); _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack); if (_task.marks_oops_alive()) { @@ -5604,12 +5633,13 @@ }; CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector, - MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue): - _collector(collector), + MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack, + OopTaskQueue* work_queue): + Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), _span(span), _bit_map(bit_map), _work_queue(work_queue), - _mark_and_push(collector, span, bit_map, work_queue), + _mark_and_push(collector, span, bit_map, revisit_stack, work_queue), _low_water_mark(MIN2((uint)(work_queue->max_elems()/4), (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))) { } @@ -5696,7 +5726,8 @@ verify_work_stacks_empty(); CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, - &_markStack, false /* !preclean */); + &_markStack, &_revisitStack, + false /* !preclean */); CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, _span, &_markBitMap, &_markStack, &cmsKeepAliveClosure, false /* !preclean */); @@ -6531,6 +6562,7 @@ assert_lock_strong(_freelistLock); assert_lock_strong(_bit_map->lock()); // relinquish the free_list_lock and bitMaplock() + DEBUG_ONLY(RememberKlassesChecker mux(false);) _bit_map->lock()->unlock(); _freelistLock->unlock(); ConcurrentMarkSweepThread::desynchronize(true); @@ -6703,6 +6735,7 @@ "CMS thread should hold CMS token"); assert_lock_strong(_freelistLock); assert_lock_strong(_bitMap->lock()); + DEBUG_ONLY(RememberKlassesChecker mux(false);) // relinquish the free_list_lock and bitMaplock() _bitMap->lock()->unlock(); _freelistLock->unlock(); @@ -6779,6 +6812,7 @@ assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "CMS thread should hold CMS token"); assert_lock_strong(_bit_map->lock()); + DEBUG_ONLY(RememberKlassesChecker smx(false);) // Relinquish the bit map lock _bit_map->lock()->unlock(); ConcurrentMarkSweepThread::desynchronize(true); @@ -6941,6 +6975,7 @@ assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "CMS thread should hold CMS token"); assert_lock_strong(_bitMap->lock()); + DEBUG_ONLY(RememberKlassesChecker mux(false);) _bitMap->lock()->unlock(); ConcurrentMarkSweepThread::desynchronize(true); ConcurrentMarkSweepThread::acknowledge_yield_request(); @@ -7295,15 +7330,12 @@ CMSBitMap* bitMap, CMSMarkStack* markStack, CMSMarkStack* revisitStack, HeapWord* finger, MarkFromRootsClosure* parent) : - OopClosure(collector->ref_processor()), - _collector(collector), + KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack), _span(span), _bitMap(bitMap), _markStack(markStack), - _revisitStack(revisitStack), _finger(finger), - _parent(parent), - _should_remember_klasses(collector->should_unload_classes()) + _parent(parent) { } Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, @@ -7315,18 +7347,17 @@ HeapWord* finger, HeapWord** global_finger_addr, Par_MarkFromRootsClosure* parent) : - OopClosure(collector->ref_processor()), - _collector(collector), + Par_KlassRememberingOopClosure(collector, + collector->ref_processor(), + revisit_stack), _whole_span(collector->_span), _span(span), _bit_map(bit_map), _work_queue(work_queue), _overflow_stack(overflow_stack), - _revisit_stack(revisit_stack), _finger(finger), _global_finger_addr(global_finger_addr), - _parent(parent), - _should_remember_klasses(collector->should_unload_classes()) + _parent(parent) { } // Assumes thread-safe access by callers, who are @@ -7456,6 +7487,14 @@ void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } +KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector, + ReferenceProcessor* rp, + CMSMarkStack* revisit_stack) : + OopClosure(rp), + _collector(collector), + _revisit_stack(revisit_stack), + _should_remember_klasses(collector->should_unload_classes()) {} + PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, MemRegion span, ReferenceProcessor* rp, @@ -7464,15 +7503,12 @@ CMSMarkStack* mark_stack, CMSMarkStack* revisit_stack, bool concurrent_precleaning): - OopClosure(rp), - _collector(collector), + KlassRememberingOopClosure(collector, rp, revisit_stack), _span(span), _bit_map(bit_map), _mod_union_table(mod_union_table), _mark_stack(mark_stack), - _revisit_stack(revisit_stack), - _concurrent_precleaning(concurrent_precleaning), - _should_remember_klasses(collector->should_unload_classes()) + _concurrent_precleaning(concurrent_precleaning) { assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); } @@ -7540,13 +7576,10 @@ CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack): - OopClosure(rp), - _collector(collector), + Par_KlassRememberingOopClosure(collector, rp, revisit_stack), _span(span), _bit_map(bit_map), - _work_queue(work_queue), - _revisit_stack(revisit_stack), - _should_remember_klasses(collector->should_unload_classes()) + _work_queue(work_queue) { assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); } @@ -7599,19 +7632,8 @@ void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } -void PushAndMarkClosure::remember_klass(Klass* k) { - if (!_revisit_stack->push(oop(k))) { - fatal("Revisit stack overflowed in PushAndMarkClosure"); - } -} - -void Par_PushAndMarkClosure::remember_klass(Klass* k) { - if (!_revisit_stack->par_push(oop(k))) { - fatal("Revist stack overflowed in Par_PushAndMarkClosure"); - } -} - void CMSPrecleanRefsYieldClosure::do_yield_work() { + DEBUG_ONLY(RememberKlassesChecker mux(false);) Mutex* bml = _collector->bitMapLock(); assert_lock_strong(bml); assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), @@ -8302,6 +8324,19 @@ (!_span.contains(addr) || _bit_map->isMarked(addr)); } +CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector, + MemRegion span, + CMSBitMap* bit_map, CMSMarkStack* mark_stack, + CMSMarkStack* revisit_stack, bool cpc): + KlassRememberingOopClosure(collector, NULL, revisit_stack), + _span(span), + _bit_map(bit_map), + _mark_stack(mark_stack), + _concurrent_precleaning(cpc) { + assert(!_span.is_empty(), "Empty span could spell trouble"); +} + + // CMSKeepAliveClosure: the serial version void CMSKeepAliveClosure::do_oop(oop obj) { HeapWord* addr = (HeapWord*)obj; @@ -8385,6 +8420,16 @@ } } +CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure( + CMSCollector* collector, + MemRegion span, CMSBitMap* bit_map, + CMSMarkStack* revisit_stack, + OopTaskQueue* work_queue): + Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), + _span(span), + _bit_map(bit_map), + _work_queue(work_queue) { } + void CMSInnerParMarkAndPushClosure::do_oop(oop obj) { HeapWord* addr = (HeapWord*)obj; if (_span.contains(addr) && diff -r 308762b2bf14 -r 05f89f00a864 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Fri Aug 14 13:44:15 2009 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Mon Aug 24 10:36:31 2009 -0700 @@ -1790,12 +1790,13 @@ public: CMSParDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, + CMSMarkStack* revisit_stack, OopTaskQueue* work_queue): _collector(collector), _span(span), _bit_map(bit_map), _work_queue(work_queue), - _mark_and_push(collector, span, bit_map, work_queue) { } + _mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { } public: void trim_queue(uint max); diff -r 308762b2bf14 -r 05f89f00a864 src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep --- a/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep Fri Aug 14 13:44:15 2009 -0700 +++ b/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep Mon Aug 24 10:36:31 2009 -0700 @@ -145,6 +145,7 @@ concurrentMarkSweepGeneration.cpp globals_extension.hpp concurrentMarkSweepGeneration.cpp handles.inline.hpp concurrentMarkSweepGeneration.cpp isGCActiveMark.hpp +concurrentMarkSweepGeneration.cpp iterator.hpp concurrentMarkSweepGeneration.cpp java.hpp concurrentMarkSweepGeneration.cpp jvmtiExport.hpp concurrentMarkSweepGeneration.cpp oop.inline.hpp diff -r 308762b2bf14 -r 05f89f00a864 src/share/vm/memory/iterator.cpp --- a/src/share/vm/memory/iterator.cpp Fri Aug 14 13:44:15 2009 -0700 +++ b/src/share/vm/memory/iterator.cpp Mon Aug 24 10:36:31 2009 -0700 @@ -25,6 +25,10 @@ # include "incls/_precompiled.incl" # include "incls/_iterator.cpp.incl" +#ifdef ASSERT +bool OopClosure::_must_remember_klasses = false; +#endif + void ObjectToOopClosure::do_object(oop obj) { obj->oop_iterate(_cl); } @@ -32,3 +36,13 @@ void VoidClosure::do_void() { ShouldNotCallThis(); } + +#ifdef ASSERT +bool OopClosure::must_remember_klasses() { + return _must_remember_klasses; +} +void OopClosure::set_must_remember_klasses(bool v) { + _must_remember_klasses = v; +} +#endif + diff -r 308762b2bf14 -r 05f89f00a864 src/share/vm/memory/iterator.hpp --- a/src/share/vm/memory/iterator.hpp Fri Aug 14 13:44:15 2009 -0700 +++ b/src/share/vm/memory/iterator.hpp Mon Aug 24 10:36:31 2009 -0700 @@ -54,7 +54,12 @@ // In support of post-processing of weak links of KlassKlass objects; // see KlassKlass::oop_oop_iterate(). - virtual const bool should_remember_klasses() const { return false; } + + virtual const bool should_remember_klasses() const { + assert(!must_remember_klasses(), "Should have overriden this method."); + return false; + } + virtual void remember_klass(Klass* k) { /* do nothing */ } // If "true", invoke on nmethods (when scanning compiled frames). @@ -74,6 +79,12 @@ // location without an intervening "major reset" (like the end of a GC). virtual bool idempotent() { return false; } virtual bool apply_to_weak_ref_discovered_field() { return false; } + +#ifdef ASSERT + static bool _must_remember_klasses; + static bool must_remember_klasses(); + static void set_must_remember_klasses(bool v); +#endif }; // ObjectClosure is used for iterating through an object space @@ -219,3 +230,38 @@ // correct length. virtual void do_tag(int tag) = 0; }; + +#ifdef ASSERT +// This class is used to flag phases of a collection that +// can unload classes and which should override the +// should_remember_klasses() and remember_klass() of OopClosure. +// The _must_remember_klasses is set in the contructor and restored +// in the destructor. _must_remember_klasses is checked in assertions +// in the OopClosure implementations of should_remember_klasses() and +// remember_klass() and the expectation is that the OopClosure +// implementation should not be in use if _must_remember_klasses is set. +// Instances of RememberKlassesChecker can be place in +// marking phases of collections which can do class unloading. +// RememberKlassesChecker can be passed "false" to turn off checking. +// It is used by CMS when CMS yields to a different collector. +class RememberKlassesChecker: StackObj { + bool _state; + bool _skip; + public: + RememberKlassesChecker(bool checking_on) : _state(false), _skip(false) { + _skip = !(ClassUnloading && !UseConcMarkSweepGC || + CMSClassUnloadingEnabled && UseConcMarkSweepGC); + if (_skip) { + return; + } + _state = OopClosure::must_remember_klasses(); + OopClosure::set_must_remember_klasses(checking_on); + } + ~RememberKlassesChecker() { + if (_skip) { + return; + } + OopClosure::set_must_remember_klasses(_state); + } +}; +#endif // ASSERT diff -r 308762b2bf14 -r 05f89f00a864 src/share/vm/memory/referenceProcessor.cpp --- a/src/share/vm/memory/referenceProcessor.cpp Fri Aug 14 13:44:15 2009 -0700 +++ b/src/share/vm/memory/referenceProcessor.cpp Mon Aug 24 10:36:31 2009 -0700 @@ -1231,6 +1231,11 @@ NOT_PRODUCT(verify_ok_to_handle_reflists()); +#ifdef ASSERT + bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC || + CMSClassUnloadingEnabled && UseConcMarkSweepGC; + RememberKlassesChecker mx(must_remember_klasses); +#endif // Soft references { TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,