Mercurial > hg > graal-compiler
comparison src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @ 935:05f89f00a864
6798898: CMS: bugs related to class unloading
Summary: Override should_remember_klasses() and remember_klass() as needed.
Reviewed-by: ysr, jcoomes
author | jmasa |
---|---|
date | Mon, 24 Aug 2009 10:36:31 -0700 |
parents | fe1574da39fc |
children | 8b46c4d82093 |
comparison
equal
deleted
inserted
replaced
912:308762b2bf14 | 935:05f89f00a864 |
---|---|
2274 { | 2274 { |
2275 ReleaseForegroundGC x(this); | 2275 ReleaseForegroundGC x(this); |
2276 | 2276 |
2277 VM_CMS_Final_Remark final_remark_op(this); | 2277 VM_CMS_Final_Remark final_remark_op(this); |
2278 VMThread::execute(&final_remark_op); | 2278 VMThread::execute(&final_remark_op); |
2279 } | 2279 } |
2280 assert(_foregroundGCShouldWait, "block post-condition"); | 2280 assert(_foregroundGCShouldWait, "block post-condition"); |
2281 break; | 2281 break; |
2282 case Sweeping: | 2282 case Sweeping: |
2283 if (UseAdaptiveSizePolicy) { | 2283 if (UseAdaptiveSizePolicy) { |
2284 size_policy()->concurrent_sweeping_begin(); | 2284 size_policy()->concurrent_sweeping_begin(); |
3497 | 3497 |
3498 // weak reference processing has not started yet. | 3498 // weak reference processing has not started yet. |
3499 ref_processor()->set_enqueuing_is_done(false); | 3499 ref_processor()->set_enqueuing_is_done(false); |
3500 | 3500 |
3501 { | 3501 { |
3502 // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);) | |
3502 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) | 3503 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) |
3503 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. | 3504 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. |
3504 gch->gen_process_strong_roots(_cmsGen->level(), | 3505 gch->gen_process_strong_roots(_cmsGen->level(), |
3505 true, // younger gens are roots | 3506 true, // younger gens are roots |
3506 true, // collecting perm gen | 3507 true, // collecting perm gen |
3620 // obsolete contents from a short-circuited previous CMS cycle. | 3621 // obsolete contents from a short-circuited previous CMS cycle. |
3621 _revisitStack.reset(); | 3622 _revisitStack.reset(); |
3622 verify_work_stacks_empty(); | 3623 verify_work_stacks_empty(); |
3623 verify_overflow_empty(); | 3624 verify_overflow_empty(); |
3624 assert(_revisitStack.isEmpty(), "tabula rasa"); | 3625 assert(_revisitStack.isEmpty(), "tabula rasa"); |
3626 | |
3627 DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);) | |
3625 | 3628 |
3626 bool result = false; | 3629 bool result = false; |
3627 if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) { | 3630 if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) { |
3628 result = do_marking_mt(asynch); | 3631 result = do_marking_mt(asynch); |
3629 } else { | 3632 } else { |
3956 // have been bumped up by the thread that claimed the last | 3959 // have been bumped up by the thread that claimed the last |
3957 // task. | 3960 // task. |
3958 pst->all_tasks_completed(); | 3961 pst->all_tasks_completed(); |
3959 } | 3962 } |
3960 | 3963 |
3961 class Par_ConcMarkingClosure: public OopClosure { | 3964 class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure { |
3962 private: | 3965 private: |
3963 CMSCollector* _collector; | |
3964 MemRegion _span; | 3966 MemRegion _span; |
3965 CMSBitMap* _bit_map; | 3967 CMSBitMap* _bit_map; |
3966 CMSMarkStack* _overflow_stack; | 3968 CMSMarkStack* _overflow_stack; |
3967 CMSMarkStack* _revisit_stack; // XXXXXX Check proper use | |
3968 OopTaskQueue* _work_queue; | 3969 OopTaskQueue* _work_queue; |
3969 protected: | 3970 protected: |
3970 DO_OOP_WORK_DEFN | 3971 DO_OOP_WORK_DEFN |
3971 public: | 3972 public: |
3972 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue, | 3973 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue, |
3973 CMSBitMap* bit_map, CMSMarkStack* overflow_stack): | 3974 CMSBitMap* bit_map, CMSMarkStack* overflow_stack, |
3974 _collector(collector), | 3975 CMSMarkStack* revisit_stack): |
3976 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), | |
3975 _span(_collector->_span), | 3977 _span(_collector->_span), |
3976 _work_queue(work_queue), | 3978 _work_queue(work_queue), |
3977 _bit_map(bit_map), | 3979 _bit_map(bit_map), |
3978 _overflow_stack(overflow_stack) { } // need to initialize revisit stack etc. | 3980 _overflow_stack(overflow_stack) |
3981 { } | |
3979 virtual void do_oop(oop* p); | 3982 virtual void do_oop(oop* p); |
3980 virtual void do_oop(narrowOop* p); | 3983 virtual void do_oop(narrowOop* p); |
3981 void trim_queue(size_t max); | 3984 void trim_queue(size_t max); |
3982 void handle_stack_overflow(HeapWord* lost); | 3985 void handle_stack_overflow(HeapWord* lost); |
3983 }; | 3986 }; |
4061 void CMSConcMarkingTask::do_work_steal(int i) { | 4064 void CMSConcMarkingTask::do_work_steal(int i) { |
4062 OopTaskQueue* work_q = work_queue(i); | 4065 OopTaskQueue* work_q = work_queue(i); |
4063 oop obj_to_scan; | 4066 oop obj_to_scan; |
4064 CMSBitMap* bm = &(_collector->_markBitMap); | 4067 CMSBitMap* bm = &(_collector->_markBitMap); |
4065 CMSMarkStack* ovflw = &(_collector->_markStack); | 4068 CMSMarkStack* ovflw = &(_collector->_markStack); |
4069 CMSMarkStack* revisit = &(_collector->_revisitStack); | |
4066 int* seed = _collector->hash_seed(i); | 4070 int* seed = _collector->hash_seed(i); |
4067 Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw); | 4071 Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw, revisit); |
4068 while (true) { | 4072 while (true) { |
4069 cl.trim_queue(0); | 4073 cl.trim_queue(0); |
4070 assert(work_q->size() == 0, "Should have been emptied above"); | 4074 assert(work_q->size() == 0, "Should have been emptied above"); |
4071 if (get_work_from_overflow_stack(ovflw, work_q)) { | 4075 if (get_work_from_overflow_stack(ovflw, work_q)) { |
4072 // Can't assert below because the work obtained from the | 4076 // Can't assert below because the work obtained from the |
4087 // This is run by the CMS (coordinator) thread. | 4091 // This is run by the CMS (coordinator) thread. |
4088 void CMSConcMarkingTask::coordinator_yield() { | 4092 void CMSConcMarkingTask::coordinator_yield() { |
4089 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | 4093 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
4090 "CMS thread should hold CMS token"); | 4094 "CMS thread should hold CMS token"); |
4091 | 4095 |
4096 DEBUG_ONLY(RememberKlassesChecker mux(false);) | |
4092 // First give up the locks, then yield, then re-lock | 4097 // First give up the locks, then yield, then re-lock |
4093 // We should probably use a constructor/destructor idiom to | 4098 // We should probably use a constructor/destructor idiom to |
4094 // do this unlock/lock or modify the MutexUnlocker class to | 4099 // do this unlock/lock or modify the MutexUnlocker class to |
4095 // serve our purpose. XXX | 4100 // serve our purpose. XXX |
4096 assert_lock_strong(_bit_map_lock); | 4101 assert_lock_strong(_bit_map_lock); |
4162 // Refs discovery is already non-atomic. | 4167 // Refs discovery is already non-atomic. |
4163 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic"); | 4168 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic"); |
4164 // Mutate the Refs discovery so it is MT during the | 4169 // Mutate the Refs discovery so it is MT during the |
4165 // multi-threaded marking phase. | 4170 // multi-threaded marking phase. |
4166 ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1); | 4171 ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1); |
4172 | |
4173 DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);) | |
4167 | 4174 |
4168 conc_workers()->start_task(&tsk); | 4175 conc_workers()->start_task(&tsk); |
4169 while (tsk.yielded()) { | 4176 while (tsk.yielded()) { |
4170 tsk.coordinator_yield(); | 4177 tsk.coordinator_yield(); |
4171 conc_workers()->continue_task(&tsk); | 4178 conc_workers()->continue_task(&tsk); |
4402 if (clean_refs) { | 4409 if (clean_refs) { |
4403 ReferenceProcessor* rp = ref_processor(); | 4410 ReferenceProcessor* rp = ref_processor(); |
4404 CMSPrecleanRefsYieldClosure yield_cl(this); | 4411 CMSPrecleanRefsYieldClosure yield_cl(this); |
4405 assert(rp->span().equals(_span), "Spans should be equal"); | 4412 assert(rp->span().equals(_span), "Spans should be equal"); |
4406 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, | 4413 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, |
4407 &_markStack, true /* preclean */); | 4414 &_markStack, &_revisitStack, |
4415 true /* preclean */); | |
4408 CMSDrainMarkingStackClosure complete_trace(this, | 4416 CMSDrainMarkingStackClosure complete_trace(this, |
4409 _span, &_markBitMap, &_markStack, | 4417 _span, &_markBitMap, &_markStack, |
4410 &keep_alive, true /* preclean */); | 4418 &keep_alive, true /* preclean */); |
4411 | 4419 |
4412 // We don't want this step to interfere with a young | 4420 // We don't want this step to interfere with a young |
4422 stopTimer(); | 4430 stopTimer(); |
4423 CMSTokenSyncWithLocks x(true /* is cms thread */, | 4431 CMSTokenSyncWithLocks x(true /* is cms thread */, |
4424 bitMapLock()); | 4432 bitMapLock()); |
4425 startTimer(); | 4433 startTimer(); |
4426 sample_eden(); | 4434 sample_eden(); |
4435 | |
4427 // The following will yield to allow foreground | 4436 // The following will yield to allow foreground |
4428 // collection to proceed promptly. XXX YSR: | 4437 // collection to proceed promptly. XXX YSR: |
4429 // The code in this method may need further | 4438 // The code in this method may need further |
4430 // tweaking for better performance and some restructuring | 4439 // tweaking for better performance and some restructuring |
4431 // for cleaner interfaces. | 4440 // for cleaner interfaces. |
4451 unsigned int before_count = | 4460 unsigned int before_count = |
4452 GenCollectedHeap::heap()->total_collections(); | 4461 GenCollectedHeap::heap()->total_collections(); |
4453 SurvivorSpacePrecleanClosure | 4462 SurvivorSpacePrecleanClosure |
4454 sss_cl(this, _span, &_markBitMap, &_markStack, | 4463 sss_cl(this, _span, &_markBitMap, &_markStack, |
4455 &pam_cl, before_count, CMSYield); | 4464 &pam_cl, before_count, CMSYield); |
4465 DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);) | |
4456 dng->from()->object_iterate_careful(&sss_cl); | 4466 dng->from()->object_iterate_careful(&sss_cl); |
4457 dng->to()->object_iterate_careful(&sss_cl); | 4467 dng->to()->object_iterate_careful(&sss_cl); |
4458 } | 4468 } |
4459 MarkRefsIntoAndScanClosure | 4469 MarkRefsIntoAndScanClosure |
4460 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable, | 4470 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable, |
4552 ConcurrentMarkSweepGeneration* gen, | 4562 ConcurrentMarkSweepGeneration* gen, |
4553 ScanMarkedObjectsAgainCarefullyClosure* cl) { | 4563 ScanMarkedObjectsAgainCarefullyClosure* cl) { |
4554 verify_work_stacks_empty(); | 4564 verify_work_stacks_empty(); |
4555 verify_overflow_empty(); | 4565 verify_overflow_empty(); |
4556 | 4566 |
4567 // Turn off checking for this method but turn it back on | |
4568 // selectively. There are yield points in this method | |
4569 // but it is difficult to turn the checking off just around | |
4570 // the yield points. It is simpler to selectively turn | |
4571 // it on. | |
4572 DEBUG_ONLY(RememberKlassesChecker mux(false);) | |
4573 | |
4557 // strategy: starting with the first card, accumulate contiguous | 4574 // strategy: starting with the first card, accumulate contiguous |
4558 // ranges of dirty cards; clear these cards, then scan the region | 4575 // ranges of dirty cards; clear these cards, then scan the region |
4559 // covered by these cards. | 4576 // covered by these cards. |
4560 | 4577 |
4561 // Since all of the MUT is committed ahead, we can just use | 4578 // Since all of the MUT is committed ahead, we can just use |
4580 HandleMark hm; | 4597 HandleMark hm; |
4581 | 4598 |
4582 MemRegion dirtyRegion; | 4599 MemRegion dirtyRegion; |
4583 { | 4600 { |
4584 stopTimer(); | 4601 stopTimer(); |
4602 // Potential yield point | |
4585 CMSTokenSync ts(true); | 4603 CMSTokenSync ts(true); |
4586 startTimer(); | 4604 startTimer(); |
4587 sample_eden(); | 4605 sample_eden(); |
4588 // Get dirty region starting at nextOffset (inclusive), | 4606 // Get dirty region starting at nextOffset (inclusive), |
4589 // simultaneously clearing it. | 4607 // simultaneously clearing it. |
4605 // yields for foreground GC as needed). | 4623 // yields for foreground GC as needed). |
4606 if (!dirtyRegion.is_empty()) { | 4624 if (!dirtyRegion.is_empty()) { |
4607 assert(numDirtyCards > 0, "consistency check"); | 4625 assert(numDirtyCards > 0, "consistency check"); |
4608 HeapWord* stop_point = NULL; | 4626 HeapWord* stop_point = NULL; |
4609 stopTimer(); | 4627 stopTimer(); |
4628 // Potential yield point | |
4610 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), | 4629 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), |
4611 bitMapLock()); | 4630 bitMapLock()); |
4612 startTimer(); | 4631 startTimer(); |
4613 { | 4632 { |
4614 verify_work_stacks_empty(); | 4633 verify_work_stacks_empty(); |
4615 verify_overflow_empty(); | 4634 verify_overflow_empty(); |
4616 sample_eden(); | 4635 sample_eden(); |
4636 DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);) | |
4617 stop_point = | 4637 stop_point = |
4618 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); | 4638 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); |
4619 } | 4639 } |
4620 if (stop_point != NULL) { | 4640 if (stop_point != NULL) { |
4621 // The careful iteration stopped early either because it found an | 4641 // The careful iteration stopped early either because it found an |
4699 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock()); | 4719 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock()); |
4700 startTimer(); | 4720 startTimer(); |
4701 sample_eden(); | 4721 sample_eden(); |
4702 verify_work_stacks_empty(); | 4722 verify_work_stacks_empty(); |
4703 verify_overflow_empty(); | 4723 verify_overflow_empty(); |
4724 DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);) | |
4704 HeapWord* stop_point = | 4725 HeapWord* stop_point = |
4705 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); | 4726 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); |
4706 if (stop_point != NULL) { | 4727 if (stop_point != NULL) { |
4707 // The careful iteration stopped early because it found an | 4728 // The careful iteration stopped early because it found an |
4708 // uninitialized object. Redirty the bits corresponding to the | 4729 // uninitialized object. Redirty the bits corresponding to the |
4798 CodeCache::gc_prologue(); | 4819 CodeCache::gc_prologue(); |
4799 } | 4820 } |
4800 assert(haveFreelistLocks(), "must have free list locks"); | 4821 assert(haveFreelistLocks(), "must have free list locks"); |
4801 assert_lock_strong(bitMapLock()); | 4822 assert_lock_strong(bitMapLock()); |
4802 | 4823 |
4824 DEBUG_ONLY(RememberKlassesChecker fmx(CMSClassUnloadingEnabled);) | |
4803 if (!init_mark_was_synchronous) { | 4825 if (!init_mark_was_synchronous) { |
4804 // We might assume that we need not fill TLAB's when | 4826 // We might assume that we need not fill TLAB's when |
4805 // CMSScavengeBeforeRemark is set, because we may have just done | 4827 // CMSScavengeBeforeRemark is set, because we may have just done |
4806 // a scavenge which would have filled all TLAB's -- and besides | 4828 // a scavenge which would have filled all TLAB's -- and besides |
4807 // Eden would be empty. This however may not always be the case -- | 4829 // Eden would be empty. This however may not always be the case -- |
4900 _markStack.capacity()); | 4922 _markStack.capacity()); |
4901 } | 4923 } |
4902 } | 4924 } |
4903 _markStack._hit_limit = 0; | 4925 _markStack._hit_limit = 0; |
4904 _markStack._failed_double = 0; | 4926 _markStack._failed_double = 0; |
4927 | |
4928 // Check that all the klasses have been checked | |
4929 assert(_revisitStack.isEmpty(), "Not all klasses revisited"); | |
4905 | 4930 |
4906 if ((VerifyAfterGC || VerifyDuringGC) && | 4931 if ((VerifyAfterGC || VerifyDuringGC) && |
4907 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | 4932 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { |
4908 verify_after_remark(); | 4933 verify_after_remark(); |
4909 } | 4934 } |
5572 }; | 5597 }; |
5573 | 5598 |
5574 void CMSRefProcTaskProxy::work(int i) { | 5599 void CMSRefProcTaskProxy::work(int i) { |
5575 assert(_collector->_span.equals(_span), "Inconsistency in _span"); | 5600 assert(_collector->_span.equals(_span), "Inconsistency in _span"); |
5576 CMSParKeepAliveClosure par_keep_alive(_collector, _span, | 5601 CMSParKeepAliveClosure par_keep_alive(_collector, _span, |
5577 _mark_bit_map, work_queue(i)); | 5602 _mark_bit_map, |
5603 &_collector->_revisitStack, | |
5604 work_queue(i)); | |
5578 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span, | 5605 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span, |
5579 _mark_bit_map, work_queue(i)); | 5606 _mark_bit_map, |
5607 &_collector->_revisitStack, | |
5608 work_queue(i)); | |
5580 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map); | 5609 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map); |
5581 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack); | 5610 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack); |
5582 if (_task.marks_oops_alive()) { | 5611 if (_task.marks_oops_alive()) { |
5583 do_work_steal(i, &par_drain_stack, &par_keep_alive, | 5612 do_work_steal(i, &par_drain_stack, &par_keep_alive, |
5584 _collector->hash_seed(i)); | 5613 _collector->hash_seed(i)); |
5602 _task.work(i); | 5631 _task.work(i); |
5603 } | 5632 } |
5604 }; | 5633 }; |
5605 | 5634 |
5606 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector, | 5635 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector, |
5607 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue): | 5636 MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack, |
5608 _collector(collector), | 5637 OopTaskQueue* work_queue): |
5638 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), | |
5609 _span(span), | 5639 _span(span), |
5610 _bit_map(bit_map), | 5640 _bit_map(bit_map), |
5611 _work_queue(work_queue), | 5641 _work_queue(work_queue), |
5612 _mark_and_push(collector, span, bit_map, work_queue), | 5642 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue), |
5613 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4), | 5643 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4), |
5614 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))) | 5644 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))) |
5615 { } | 5645 { } |
5616 | 5646 |
5617 // . see if we can share work_queues with ParNew? XXX | 5647 // . see if we can share work_queues with ParNew? XXX |
5694 // Process weak references. | 5724 // Process weak references. |
5695 rp->setup_policy(clear_all_soft_refs); | 5725 rp->setup_policy(clear_all_soft_refs); |
5696 verify_work_stacks_empty(); | 5726 verify_work_stacks_empty(); |
5697 | 5727 |
5698 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, | 5728 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, |
5699 &_markStack, false /* !preclean */); | 5729 &_markStack, &_revisitStack, |
5730 false /* !preclean */); | |
5700 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, | 5731 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, |
5701 _span, &_markBitMap, &_markStack, | 5732 _span, &_markBitMap, &_markStack, |
5702 &cmsKeepAliveClosure, false /* !preclean */); | 5733 &cmsKeepAliveClosure, false /* !preclean */); |
5703 { | 5734 { |
5704 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); | 5735 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); |
6529 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | 6560 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
6530 "CMS thread should hold CMS token"); | 6561 "CMS thread should hold CMS token"); |
6531 assert_lock_strong(_freelistLock); | 6562 assert_lock_strong(_freelistLock); |
6532 assert_lock_strong(_bit_map->lock()); | 6563 assert_lock_strong(_bit_map->lock()); |
6533 // relinquish the free_list_lock and bitMaplock() | 6564 // relinquish the free_list_lock and bitMaplock() |
6565 DEBUG_ONLY(RememberKlassesChecker mux(false);) | |
6534 _bit_map->lock()->unlock(); | 6566 _bit_map->lock()->unlock(); |
6535 _freelistLock->unlock(); | 6567 _freelistLock->unlock(); |
6536 ConcurrentMarkSweepThread::desynchronize(true); | 6568 ConcurrentMarkSweepThread::desynchronize(true); |
6537 ConcurrentMarkSweepThread::acknowledge_yield_request(); | 6569 ConcurrentMarkSweepThread::acknowledge_yield_request(); |
6538 _collector->stopTimer(); | 6570 _collector->stopTimer(); |
6701 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() { | 6733 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() { |
6702 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | 6734 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
6703 "CMS thread should hold CMS token"); | 6735 "CMS thread should hold CMS token"); |
6704 assert_lock_strong(_freelistLock); | 6736 assert_lock_strong(_freelistLock); |
6705 assert_lock_strong(_bitMap->lock()); | 6737 assert_lock_strong(_bitMap->lock()); |
6738 DEBUG_ONLY(RememberKlassesChecker mux(false);) | |
6706 // relinquish the free_list_lock and bitMaplock() | 6739 // relinquish the free_list_lock and bitMaplock() |
6707 _bitMap->lock()->unlock(); | 6740 _bitMap->lock()->unlock(); |
6708 _freelistLock->unlock(); | 6741 _freelistLock->unlock(); |
6709 ConcurrentMarkSweepThread::desynchronize(true); | 6742 ConcurrentMarkSweepThread::desynchronize(true); |
6710 ConcurrentMarkSweepThread::acknowledge_yield_request(); | 6743 ConcurrentMarkSweepThread::acknowledge_yield_request(); |
6777 | 6810 |
6778 void SurvivorSpacePrecleanClosure::do_yield_work() { | 6811 void SurvivorSpacePrecleanClosure::do_yield_work() { |
6779 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | 6812 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
6780 "CMS thread should hold CMS token"); | 6813 "CMS thread should hold CMS token"); |
6781 assert_lock_strong(_bit_map->lock()); | 6814 assert_lock_strong(_bit_map->lock()); |
6815 DEBUG_ONLY(RememberKlassesChecker smx(false);) | |
6782 // Relinquish the bit map lock | 6816 // Relinquish the bit map lock |
6783 _bit_map->lock()->unlock(); | 6817 _bit_map->lock()->unlock(); |
6784 ConcurrentMarkSweepThread::desynchronize(true); | 6818 ConcurrentMarkSweepThread::desynchronize(true); |
6785 ConcurrentMarkSweepThread::acknowledge_yield_request(); | 6819 ConcurrentMarkSweepThread::acknowledge_yield_request(); |
6786 _collector->stopTimer(); | 6820 _collector->stopTimer(); |
6939 // do this unlock/lock or modify the MutexUnlocker class to | 6973 // do this unlock/lock or modify the MutexUnlocker class to |
6940 // serve our purpose. XXX | 6974 // serve our purpose. XXX |
6941 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | 6975 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
6942 "CMS thread should hold CMS token"); | 6976 "CMS thread should hold CMS token"); |
6943 assert_lock_strong(_bitMap->lock()); | 6977 assert_lock_strong(_bitMap->lock()); |
6978 DEBUG_ONLY(RememberKlassesChecker mux(false);) | |
6944 _bitMap->lock()->unlock(); | 6979 _bitMap->lock()->unlock(); |
6945 ConcurrentMarkSweepThread::desynchronize(true); | 6980 ConcurrentMarkSweepThread::desynchronize(true); |
6946 ConcurrentMarkSweepThread::acknowledge_yield_request(); | 6981 ConcurrentMarkSweepThread::acknowledge_yield_request(); |
6947 _collector->stopTimer(); | 6982 _collector->stopTimer(); |
6948 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | 6983 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); |
7293 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector, | 7328 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector, |
7294 MemRegion span, | 7329 MemRegion span, |
7295 CMSBitMap* bitMap, CMSMarkStack* markStack, | 7330 CMSBitMap* bitMap, CMSMarkStack* markStack, |
7296 CMSMarkStack* revisitStack, | 7331 CMSMarkStack* revisitStack, |
7297 HeapWord* finger, MarkFromRootsClosure* parent) : | 7332 HeapWord* finger, MarkFromRootsClosure* parent) : |
7298 OopClosure(collector->ref_processor()), | 7333 KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack), |
7299 _collector(collector), | |
7300 _span(span), | 7334 _span(span), |
7301 _bitMap(bitMap), | 7335 _bitMap(bitMap), |
7302 _markStack(markStack), | 7336 _markStack(markStack), |
7303 _revisitStack(revisitStack), | |
7304 _finger(finger), | 7337 _finger(finger), |
7305 _parent(parent), | 7338 _parent(parent) |
7306 _should_remember_klasses(collector->should_unload_classes()) | |
7307 { } | 7339 { } |
7308 | 7340 |
7309 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, | 7341 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, |
7310 MemRegion span, | 7342 MemRegion span, |
7311 CMSBitMap* bit_map, | 7343 CMSBitMap* bit_map, |
7313 CMSMarkStack* overflow_stack, | 7345 CMSMarkStack* overflow_stack, |
7314 CMSMarkStack* revisit_stack, | 7346 CMSMarkStack* revisit_stack, |
7315 HeapWord* finger, | 7347 HeapWord* finger, |
7316 HeapWord** global_finger_addr, | 7348 HeapWord** global_finger_addr, |
7317 Par_MarkFromRootsClosure* parent) : | 7349 Par_MarkFromRootsClosure* parent) : |
7318 OopClosure(collector->ref_processor()), | 7350 Par_KlassRememberingOopClosure(collector, |
7319 _collector(collector), | 7351 collector->ref_processor(), |
7352 revisit_stack), | |
7320 _whole_span(collector->_span), | 7353 _whole_span(collector->_span), |
7321 _span(span), | 7354 _span(span), |
7322 _bit_map(bit_map), | 7355 _bit_map(bit_map), |
7323 _work_queue(work_queue), | 7356 _work_queue(work_queue), |
7324 _overflow_stack(overflow_stack), | 7357 _overflow_stack(overflow_stack), |
7325 _revisit_stack(revisit_stack), | |
7326 _finger(finger), | 7358 _finger(finger), |
7327 _global_finger_addr(global_finger_addr), | 7359 _global_finger_addr(global_finger_addr), |
7328 _parent(parent), | 7360 _parent(parent) |
7329 _should_remember_klasses(collector->should_unload_classes()) | |
7330 { } | 7361 { } |
7331 | 7362 |
7332 // Assumes thread-safe access by callers, who are | 7363 // Assumes thread-safe access by callers, who are |
7333 // responsible for mutual exclusion. | 7364 // responsible for mutual exclusion. |
7334 void CMSCollector::lower_restart_addr(HeapWord* low) { | 7365 void CMSCollector::lower_restart_addr(HeapWord* low) { |
7454 } | 7485 } |
7455 | 7486 |
7456 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } | 7487 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } |
7457 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } | 7488 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } |
7458 | 7489 |
7490 KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector, | |
7491 ReferenceProcessor* rp, | |
7492 CMSMarkStack* revisit_stack) : | |
7493 OopClosure(rp), | |
7494 _collector(collector), | |
7495 _revisit_stack(revisit_stack), | |
7496 _should_remember_klasses(collector->should_unload_classes()) {} | |
7497 | |
7459 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, | 7498 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, |
7460 MemRegion span, | 7499 MemRegion span, |
7461 ReferenceProcessor* rp, | 7500 ReferenceProcessor* rp, |
7462 CMSBitMap* bit_map, | 7501 CMSBitMap* bit_map, |
7463 CMSBitMap* mod_union_table, | 7502 CMSBitMap* mod_union_table, |
7464 CMSMarkStack* mark_stack, | 7503 CMSMarkStack* mark_stack, |
7465 CMSMarkStack* revisit_stack, | 7504 CMSMarkStack* revisit_stack, |
7466 bool concurrent_precleaning): | 7505 bool concurrent_precleaning): |
7467 OopClosure(rp), | 7506 KlassRememberingOopClosure(collector, rp, revisit_stack), |
7468 _collector(collector), | |
7469 _span(span), | 7507 _span(span), |
7470 _bit_map(bit_map), | 7508 _bit_map(bit_map), |
7471 _mod_union_table(mod_union_table), | 7509 _mod_union_table(mod_union_table), |
7472 _mark_stack(mark_stack), | 7510 _mark_stack(mark_stack), |
7473 _revisit_stack(revisit_stack), | 7511 _concurrent_precleaning(concurrent_precleaning) |
7474 _concurrent_precleaning(concurrent_precleaning), | |
7475 _should_remember_klasses(collector->should_unload_classes()) | |
7476 { | 7512 { |
7477 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); | 7513 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); |
7478 } | 7514 } |
7479 | 7515 |
7480 // Grey object rescan during pre-cleaning and second checkpoint phases -- | 7516 // Grey object rescan during pre-cleaning and second checkpoint phases -- |
7538 MemRegion span, | 7574 MemRegion span, |
7539 ReferenceProcessor* rp, | 7575 ReferenceProcessor* rp, |
7540 CMSBitMap* bit_map, | 7576 CMSBitMap* bit_map, |
7541 OopTaskQueue* work_queue, | 7577 OopTaskQueue* work_queue, |
7542 CMSMarkStack* revisit_stack): | 7578 CMSMarkStack* revisit_stack): |
7543 OopClosure(rp), | 7579 Par_KlassRememberingOopClosure(collector, rp, revisit_stack), |
7544 _collector(collector), | |
7545 _span(span), | 7580 _span(span), |
7546 _bit_map(bit_map), | 7581 _bit_map(bit_map), |
7547 _work_queue(work_queue), | 7582 _work_queue(work_queue) |
7548 _revisit_stack(revisit_stack), | |
7549 _should_remember_klasses(collector->should_unload_classes()) | |
7550 { | 7583 { |
7551 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); | 7584 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); |
7552 } | 7585 } |
7553 | 7586 |
7554 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); } | 7587 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); } |
7597 } | 7630 } |
7598 | 7631 |
7599 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } | 7632 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } |
7600 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } | 7633 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } |
7601 | 7634 |
7602 void PushAndMarkClosure::remember_klass(Klass* k) { | |
7603 if (!_revisit_stack->push(oop(k))) { | |
7604 fatal("Revisit stack overflowed in PushAndMarkClosure"); | |
7605 } | |
7606 } | |
7607 | |
7608 void Par_PushAndMarkClosure::remember_klass(Klass* k) { | |
7609 if (!_revisit_stack->par_push(oop(k))) { | |
7610 fatal("Revist stack overflowed in Par_PushAndMarkClosure"); | |
7611 } | |
7612 } | |
7613 | |
7614 void CMSPrecleanRefsYieldClosure::do_yield_work() { | 7635 void CMSPrecleanRefsYieldClosure::do_yield_work() { |
7636 DEBUG_ONLY(RememberKlassesChecker mux(false);) | |
7615 Mutex* bml = _collector->bitMapLock(); | 7637 Mutex* bml = _collector->bitMapLock(); |
7616 assert_lock_strong(bml); | 7638 assert_lock_strong(bml); |
7617 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | 7639 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
7618 "CMS thread should hold CMS token"); | 7640 "CMS thread should hold CMS token"); |
7619 | 7641 |
8300 HeapWord* addr = (HeapWord*)obj; | 8322 HeapWord* addr = (HeapWord*)obj; |
8301 return addr != NULL && | 8323 return addr != NULL && |
8302 (!_span.contains(addr) || _bit_map->isMarked(addr)); | 8324 (!_span.contains(addr) || _bit_map->isMarked(addr)); |
8303 } | 8325 } |
8304 | 8326 |
8327 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector, | |
8328 MemRegion span, | |
8329 CMSBitMap* bit_map, CMSMarkStack* mark_stack, | |
8330 CMSMarkStack* revisit_stack, bool cpc): | |
8331 KlassRememberingOopClosure(collector, NULL, revisit_stack), | |
8332 _span(span), | |
8333 _bit_map(bit_map), | |
8334 _mark_stack(mark_stack), | |
8335 _concurrent_precleaning(cpc) { | |
8336 assert(!_span.is_empty(), "Empty span could spell trouble"); | |
8337 } | |
8338 | |
8339 | |
8305 // CMSKeepAliveClosure: the serial version | 8340 // CMSKeepAliveClosure: the serial version |
8306 void CMSKeepAliveClosure::do_oop(oop obj) { | 8341 void CMSKeepAliveClosure::do_oop(oop obj) { |
8307 HeapWord* addr = (HeapWord*)obj; | 8342 HeapWord* addr = (HeapWord*)obj; |
8308 if (_span.contains(addr) && | 8343 if (_span.contains(addr) && |
8309 !_bit_map->isMarked(addr)) { | 8344 !_bit_map->isMarked(addr)) { |
8383 new_oop->oop_iterate(&_mark_and_push); | 8418 new_oop->oop_iterate(&_mark_and_push); |
8384 } | 8419 } |
8385 } | 8420 } |
8386 } | 8421 } |
8387 | 8422 |
8423 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure( | |
8424 CMSCollector* collector, | |
8425 MemRegion span, CMSBitMap* bit_map, | |
8426 CMSMarkStack* revisit_stack, | |
8427 OopTaskQueue* work_queue): | |
8428 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), | |
8429 _span(span), | |
8430 _bit_map(bit_map), | |
8431 _work_queue(work_queue) { } | |
8432 | |
8388 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) { | 8433 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) { |
8389 HeapWord* addr = (HeapWord*)obj; | 8434 HeapWord* addr = (HeapWord*)obj; |
8390 if (_span.contains(addr) && | 8435 if (_span.contains(addr) && |
8391 !_bit_map->isMarked(addr)) { | 8436 !_bit_map->isMarked(addr)) { |
8392 if (_bit_map->par_mark(addr)) { | 8437 if (_bit_map->par_mark(addr)) { |