comparison src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @ 993:54b3b351d6f9

Merge
author jrose
date Wed, 23 Sep 2009 23:56:15 -0700
parents 148e5441d916 8b46c4d82093
children 753cf9794df9
comparison
equal deleted inserted replaced
992:6a8ccac44f41 993:54b3b351d6f9
2274 { 2274 {
2275 ReleaseForegroundGC x(this); 2275 ReleaseForegroundGC x(this);
2276 2276
2277 VM_CMS_Final_Remark final_remark_op(this); 2277 VM_CMS_Final_Remark final_remark_op(this);
2278 VMThread::execute(&final_remark_op); 2278 VMThread::execute(&final_remark_op);
2279 } 2279 }
2280 assert(_foregroundGCShouldWait, "block post-condition"); 2280 assert(_foregroundGCShouldWait, "block post-condition");
2281 break; 2281 break;
2282 case Sweeping: 2282 case Sweeping:
2283 if (UseAdaptiveSizePolicy) { 2283 if (UseAdaptiveSizePolicy) {
2284 size_policy()->concurrent_sweeping_begin(); 2284 size_policy()->concurrent_sweeping_begin();
3503 3503
3504 // weak reference processing has not started yet. 3504 // weak reference processing has not started yet.
3505 ref_processor()->set_enqueuing_is_done(false); 3505 ref_processor()->set_enqueuing_is_done(false);
3506 3506
3507 { 3507 {
3508 // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
3508 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) 3509 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3509 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 3510 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3510 gch->gen_process_strong_roots(_cmsGen->level(), 3511 gch->gen_process_strong_roots(_cmsGen->level(),
3511 true, // younger gens are roots 3512 true, // younger gens are roots
3512 true, // activate StrongRootsScope 3513 true, // activate StrongRootsScope
3629 // obsolete contents from a short-circuited previous CMS cycle. 3630 // obsolete contents from a short-circuited previous CMS cycle.
3630 _revisitStack.reset(); 3631 _revisitStack.reset();
3631 verify_work_stacks_empty(); 3632 verify_work_stacks_empty();
3632 verify_overflow_empty(); 3633 verify_overflow_empty();
3633 assert(_revisitStack.isEmpty(), "tabula rasa"); 3634 assert(_revisitStack.isEmpty(), "tabula rasa");
3635
3636 DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
3634 3637
3635 bool result = false; 3638 bool result = false;
3636 if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) { 3639 if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
3637 result = do_marking_mt(asynch); 3640 result = do_marking_mt(asynch);
3638 } else { 3641 } else {
3965 // have been bumped up by the thread that claimed the last 3968 // have been bumped up by the thread that claimed the last
3966 // task. 3969 // task.
3967 pst->all_tasks_completed(); 3970 pst->all_tasks_completed();
3968 } 3971 }
3969 3972
3970 class Par_ConcMarkingClosure: public OopClosure { 3973 class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure {
3971 private: 3974 private:
3972 CMSCollector* _collector;
3973 MemRegion _span; 3975 MemRegion _span;
3974 CMSBitMap* _bit_map; 3976 CMSBitMap* _bit_map;
3975 CMSMarkStack* _overflow_stack; 3977 CMSMarkStack* _overflow_stack;
3976 CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
3977 OopTaskQueue* _work_queue; 3978 OopTaskQueue* _work_queue;
3978 protected: 3979 protected:
3979 DO_OOP_WORK_DEFN 3980 DO_OOP_WORK_DEFN
3980 public: 3981 public:
3981 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue, 3982 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
3982 CMSBitMap* bit_map, CMSMarkStack* overflow_stack): 3983 CMSBitMap* bit_map, CMSMarkStack* overflow_stack,
3983 _collector(collector), 3984 CMSMarkStack* revisit_stack):
3985 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
3984 _span(_collector->_span), 3986 _span(_collector->_span),
3985 _work_queue(work_queue), 3987 _work_queue(work_queue),
3986 _bit_map(bit_map), 3988 _bit_map(bit_map),
3987 _overflow_stack(overflow_stack) { } // need to initialize revisit stack etc. 3989 _overflow_stack(overflow_stack)
3990 { }
3988 virtual void do_oop(oop* p); 3991 virtual void do_oop(oop* p);
3989 virtual void do_oop(narrowOop* p); 3992 virtual void do_oop(narrowOop* p);
3990 void trim_queue(size_t max); 3993 void trim_queue(size_t max);
3991 void handle_stack_overflow(HeapWord* lost); 3994 void handle_stack_overflow(HeapWord* lost);
3992 }; 3995 };
4070 void CMSConcMarkingTask::do_work_steal(int i) { 4073 void CMSConcMarkingTask::do_work_steal(int i) {
4071 OopTaskQueue* work_q = work_queue(i); 4074 OopTaskQueue* work_q = work_queue(i);
4072 oop obj_to_scan; 4075 oop obj_to_scan;
4073 CMSBitMap* bm = &(_collector->_markBitMap); 4076 CMSBitMap* bm = &(_collector->_markBitMap);
4074 CMSMarkStack* ovflw = &(_collector->_markStack); 4077 CMSMarkStack* ovflw = &(_collector->_markStack);
4078 CMSMarkStack* revisit = &(_collector->_revisitStack);
4075 int* seed = _collector->hash_seed(i); 4079 int* seed = _collector->hash_seed(i);
4076 Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw); 4080 Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw, revisit);
4077 while (true) { 4081 while (true) {
4078 cl.trim_queue(0); 4082 cl.trim_queue(0);
4079 assert(work_q->size() == 0, "Should have been emptied above"); 4083 assert(work_q->size() == 0, "Should have been emptied above");
4080 if (get_work_from_overflow_stack(ovflw, work_q)) { 4084 if (get_work_from_overflow_stack(ovflw, work_q)) {
4081 // Can't assert below because the work obtained from the 4085 // Can't assert below because the work obtained from the
4096 // This is run by the CMS (coordinator) thread. 4100 // This is run by the CMS (coordinator) thread.
4097 void CMSConcMarkingTask::coordinator_yield() { 4101 void CMSConcMarkingTask::coordinator_yield() {
4098 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 4102 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4099 "CMS thread should hold CMS token"); 4103 "CMS thread should hold CMS token");
4100 4104
4105 DEBUG_ONLY(RememberKlassesChecker mux(false);)
4101 // First give up the locks, then yield, then re-lock 4106 // First give up the locks, then yield, then re-lock
4102 // We should probably use a constructor/destructor idiom to 4107 // We should probably use a constructor/destructor idiom to
4103 // do this unlock/lock or modify the MutexUnlocker class to 4108 // do this unlock/lock or modify the MutexUnlocker class to
4104 // serve our purpose. XXX 4109 // serve our purpose. XXX
4105 assert_lock_strong(_bit_map_lock); 4110 assert_lock_strong(_bit_map_lock);
4171 // Refs discovery is already non-atomic. 4176 // Refs discovery is already non-atomic.
4172 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic"); 4177 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4173 // Mutate the Refs discovery so it is MT during the 4178 // Mutate the Refs discovery so it is MT during the
4174 // multi-threaded marking phase. 4179 // multi-threaded marking phase.
4175 ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1); 4180 ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
4181
4182 DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
4176 4183
4177 conc_workers()->start_task(&tsk); 4184 conc_workers()->start_task(&tsk);
4178 while (tsk.yielded()) { 4185 while (tsk.yielded()) {
4179 tsk.coordinator_yield(); 4186 tsk.coordinator_yield();
4180 conc_workers()->continue_task(&tsk); 4187 conc_workers()->continue_task(&tsk);
4411 if (clean_refs) { 4418 if (clean_refs) {
4412 ReferenceProcessor* rp = ref_processor(); 4419 ReferenceProcessor* rp = ref_processor();
4413 CMSPrecleanRefsYieldClosure yield_cl(this); 4420 CMSPrecleanRefsYieldClosure yield_cl(this);
4414 assert(rp->span().equals(_span), "Spans should be equal"); 4421 assert(rp->span().equals(_span), "Spans should be equal");
4415 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, 4422 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4416 &_markStack, true /* preclean */); 4423 &_markStack, &_revisitStack,
4424 true /* preclean */);
4417 CMSDrainMarkingStackClosure complete_trace(this, 4425 CMSDrainMarkingStackClosure complete_trace(this,
4418 _span, &_markBitMap, &_markStack, 4426 _span, &_markBitMap, &_markStack,
4419 &keep_alive, true /* preclean */); 4427 &keep_alive, true /* preclean */);
4420 4428
4421 // We don't want this step to interfere with a young 4429 // We don't want this step to interfere with a young
4431 stopTimer(); 4439 stopTimer();
4432 CMSTokenSyncWithLocks x(true /* is cms thread */, 4440 CMSTokenSyncWithLocks x(true /* is cms thread */,
4433 bitMapLock()); 4441 bitMapLock());
4434 startTimer(); 4442 startTimer();
4435 sample_eden(); 4443 sample_eden();
4444
4436 // The following will yield to allow foreground 4445 // The following will yield to allow foreground
4437 // collection to proceed promptly. XXX YSR: 4446 // collection to proceed promptly. XXX YSR:
4438 // The code in this method may need further 4447 // The code in this method may need further
4439 // tweaking for better performance and some restructuring 4448 // tweaking for better performance and some restructuring
4440 // for cleaner interfaces. 4449 // for cleaner interfaces.
4460 unsigned int before_count = 4469 unsigned int before_count =
4461 GenCollectedHeap::heap()->total_collections(); 4470 GenCollectedHeap::heap()->total_collections();
4462 SurvivorSpacePrecleanClosure 4471 SurvivorSpacePrecleanClosure
4463 sss_cl(this, _span, &_markBitMap, &_markStack, 4472 sss_cl(this, _span, &_markBitMap, &_markStack,
4464 &pam_cl, before_count, CMSYield); 4473 &pam_cl, before_count, CMSYield);
4474 DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
4465 dng->from()->object_iterate_careful(&sss_cl); 4475 dng->from()->object_iterate_careful(&sss_cl);
4466 dng->to()->object_iterate_careful(&sss_cl); 4476 dng->to()->object_iterate_careful(&sss_cl);
4467 } 4477 }
4468 MarkRefsIntoAndScanClosure 4478 MarkRefsIntoAndScanClosure
4469 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable, 4479 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4561 ConcurrentMarkSweepGeneration* gen, 4571 ConcurrentMarkSweepGeneration* gen,
4562 ScanMarkedObjectsAgainCarefullyClosure* cl) { 4572 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4563 verify_work_stacks_empty(); 4573 verify_work_stacks_empty();
4564 verify_overflow_empty(); 4574 verify_overflow_empty();
4565 4575
4576 // Turn off checking for this method but turn it back on
4577 // selectively. There are yield points in this method
4578 // but it is difficult to turn the checking off just around
4579 // the yield points. It is simpler to selectively turn
4580 // it on.
4581 DEBUG_ONLY(RememberKlassesChecker mux(false);)
4582
4566 // strategy: starting with the first card, accumulate contiguous 4583 // strategy: starting with the first card, accumulate contiguous
4567 // ranges of dirty cards; clear these cards, then scan the region 4584 // ranges of dirty cards; clear these cards, then scan the region
4568 // covered by these cards. 4585 // covered by these cards.
4569 4586
4570 // Since all of the MUT is committed ahead, we can just use 4587 // Since all of the MUT is committed ahead, we can just use
4589 HandleMark hm; 4606 HandleMark hm;
4590 4607
4591 MemRegion dirtyRegion; 4608 MemRegion dirtyRegion;
4592 { 4609 {
4593 stopTimer(); 4610 stopTimer();
4611 // Potential yield point
4594 CMSTokenSync ts(true); 4612 CMSTokenSync ts(true);
4595 startTimer(); 4613 startTimer();
4596 sample_eden(); 4614 sample_eden();
4597 // Get dirty region starting at nextOffset (inclusive), 4615 // Get dirty region starting at nextOffset (inclusive),
4598 // simultaneously clearing it. 4616 // simultaneously clearing it.
4614 // yields for foreground GC as needed). 4632 // yields for foreground GC as needed).
4615 if (!dirtyRegion.is_empty()) { 4633 if (!dirtyRegion.is_empty()) {
4616 assert(numDirtyCards > 0, "consistency check"); 4634 assert(numDirtyCards > 0, "consistency check");
4617 HeapWord* stop_point = NULL; 4635 HeapWord* stop_point = NULL;
4618 stopTimer(); 4636 stopTimer();
4637 // Potential yield point
4619 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), 4638 CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4620 bitMapLock()); 4639 bitMapLock());
4621 startTimer(); 4640 startTimer();
4622 { 4641 {
4623 verify_work_stacks_empty(); 4642 verify_work_stacks_empty();
4624 verify_overflow_empty(); 4643 verify_overflow_empty();
4625 sample_eden(); 4644 sample_eden();
4645 DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
4626 stop_point = 4646 stop_point =
4627 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); 4647 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4628 } 4648 }
4629 if (stop_point != NULL) { 4649 if (stop_point != NULL) {
4630 // The careful iteration stopped early either because it found an 4650 // The careful iteration stopped early either because it found an
4708 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock()); 4728 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4709 startTimer(); 4729 startTimer();
4710 sample_eden(); 4730 sample_eden();
4711 verify_work_stacks_empty(); 4731 verify_work_stacks_empty();
4712 verify_overflow_empty(); 4732 verify_overflow_empty();
4733 DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
4713 HeapWord* stop_point = 4734 HeapWord* stop_point =
4714 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); 4735 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4715 if (stop_point != NULL) { 4736 if (stop_point != NULL) {
4716 // The careful iteration stopped early because it found an 4737 // The careful iteration stopped early because it found an
4717 // uninitialized object. Redirty the bits corresponding to the 4738 // uninitialized object. Redirty the bits corresponding to the
4807 CodeCache::gc_prologue(); 4828 CodeCache::gc_prologue();
4808 } 4829 }
4809 assert(haveFreelistLocks(), "must have free list locks"); 4830 assert(haveFreelistLocks(), "must have free list locks");
4810 assert_lock_strong(bitMapLock()); 4831 assert_lock_strong(bitMapLock());
4811 4832
4833 DEBUG_ONLY(RememberKlassesChecker fmx(CMSClassUnloadingEnabled);)
4812 if (!init_mark_was_synchronous) { 4834 if (!init_mark_was_synchronous) {
4813 // We might assume that we need not fill TLAB's when 4835 // We might assume that we need not fill TLAB's when
4814 // CMSScavengeBeforeRemark is set, because we may have just done 4836 // CMSScavengeBeforeRemark is set, because we may have just done
4815 // a scavenge which would have filled all TLAB's -- and besides 4837 // a scavenge which would have filled all TLAB's -- and besides
4816 // Eden would be empty. This however may not always be the case -- 4838 // Eden would be empty. This however may not always be the case --
4909 _markStack.capacity()); 4931 _markStack.capacity());
4910 } 4932 }
4911 } 4933 }
4912 _markStack._hit_limit = 0; 4934 _markStack._hit_limit = 0;
4913 _markStack._failed_double = 0; 4935 _markStack._failed_double = 0;
4936
4937 // Check that all the klasses have been checked
4938 assert(_revisitStack.isEmpty(), "Not all klasses revisited");
4914 4939
4915 if ((VerifyAfterGC || VerifyDuringGC) && 4940 if ((VerifyAfterGC || VerifyDuringGC) &&
4916 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { 4941 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4917 verify_after_remark(); 4942 verify_after_remark();
4918 } 4943 }
5595 }; 5620 };
5596 5621
5597 void CMSRefProcTaskProxy::work(int i) { 5622 void CMSRefProcTaskProxy::work(int i) {
5598 assert(_collector->_span.equals(_span), "Inconsistency in _span"); 5623 assert(_collector->_span.equals(_span), "Inconsistency in _span");
5599 CMSParKeepAliveClosure par_keep_alive(_collector, _span, 5624 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5600 _mark_bit_map, work_queue(i)); 5625 _mark_bit_map,
5626 &_collector->_revisitStack,
5627 work_queue(i));
5601 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span, 5628 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5602 _mark_bit_map, work_queue(i)); 5629 _mark_bit_map,
5630 &_collector->_revisitStack,
5631 work_queue(i));
5603 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map); 5632 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5604 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack); 5633 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
5605 if (_task.marks_oops_alive()) { 5634 if (_task.marks_oops_alive()) {
5606 do_work_steal(i, &par_drain_stack, &par_keep_alive, 5635 do_work_steal(i, &par_drain_stack, &par_keep_alive,
5607 _collector->hash_seed(i)); 5636 _collector->hash_seed(i));
5625 _task.work(i); 5654 _task.work(i);
5626 } 5655 }
5627 }; 5656 };
5628 5657
5629 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector, 5658 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5630 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue): 5659 MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
5631 _collector(collector), 5660 OopTaskQueue* work_queue):
5661 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
5632 _span(span), 5662 _span(span),
5633 _bit_map(bit_map), 5663 _bit_map(bit_map),
5634 _work_queue(work_queue), 5664 _work_queue(work_queue),
5635 _mark_and_push(collector, span, bit_map, work_queue), 5665 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue),
5636 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4), 5666 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5637 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))) 5667 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5638 { } 5668 { }
5639 5669
5640 // . see if we can share work_queues with ParNew? XXX 5670 // . see if we can share work_queues with ParNew? XXX
5717 // Process weak references. 5747 // Process weak references.
5718 rp->setup_policy(clear_all_soft_refs); 5748 rp->setup_policy(clear_all_soft_refs);
5719 verify_work_stacks_empty(); 5749 verify_work_stacks_empty();
5720 5750
5721 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, 5751 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5722 &_markStack, false /* !preclean */); 5752 &_markStack, &_revisitStack,
5753 false /* !preclean */);
5723 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, 5754 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5724 _span, &_markBitMap, &_markStack, 5755 _span, &_markBitMap, &_markStack,
5725 &cmsKeepAliveClosure, false /* !preclean */); 5756 &cmsKeepAliveClosure, false /* !preclean */);
5726 { 5757 {
5727 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); 5758 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
6552 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6583 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6553 "CMS thread should hold CMS token"); 6584 "CMS thread should hold CMS token");
6554 assert_lock_strong(_freelistLock); 6585 assert_lock_strong(_freelistLock);
6555 assert_lock_strong(_bit_map->lock()); 6586 assert_lock_strong(_bit_map->lock());
6556 // relinquish the free_list_lock and bitMaplock() 6587 // relinquish the free_list_lock and bitMaplock()
6588 DEBUG_ONLY(RememberKlassesChecker mux(false);)
6557 _bit_map->lock()->unlock(); 6589 _bit_map->lock()->unlock();
6558 _freelistLock->unlock(); 6590 _freelistLock->unlock();
6559 ConcurrentMarkSweepThread::desynchronize(true); 6591 ConcurrentMarkSweepThread::desynchronize(true);
6560 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6592 ConcurrentMarkSweepThread::acknowledge_yield_request();
6561 _collector->stopTimer(); 6593 _collector->stopTimer();
6724 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() { 6756 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6725 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6757 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6726 "CMS thread should hold CMS token"); 6758 "CMS thread should hold CMS token");
6727 assert_lock_strong(_freelistLock); 6759 assert_lock_strong(_freelistLock);
6728 assert_lock_strong(_bitMap->lock()); 6760 assert_lock_strong(_bitMap->lock());
6761 DEBUG_ONLY(RememberKlassesChecker mux(false);)
6729 // relinquish the free_list_lock and bitMaplock() 6762 // relinquish the free_list_lock and bitMaplock()
6730 _bitMap->lock()->unlock(); 6763 _bitMap->lock()->unlock();
6731 _freelistLock->unlock(); 6764 _freelistLock->unlock();
6732 ConcurrentMarkSweepThread::desynchronize(true); 6765 ConcurrentMarkSweepThread::desynchronize(true);
6733 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6766 ConcurrentMarkSweepThread::acknowledge_yield_request();
6800 6833
6801 void SurvivorSpacePrecleanClosure::do_yield_work() { 6834 void SurvivorSpacePrecleanClosure::do_yield_work() {
6802 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6835 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6803 "CMS thread should hold CMS token"); 6836 "CMS thread should hold CMS token");
6804 assert_lock_strong(_bit_map->lock()); 6837 assert_lock_strong(_bit_map->lock());
6838 DEBUG_ONLY(RememberKlassesChecker smx(false);)
6805 // Relinquish the bit map lock 6839 // Relinquish the bit map lock
6806 _bit_map->lock()->unlock(); 6840 _bit_map->lock()->unlock();
6807 ConcurrentMarkSweepThread::desynchronize(true); 6841 ConcurrentMarkSweepThread::desynchronize(true);
6808 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6842 ConcurrentMarkSweepThread::acknowledge_yield_request();
6809 _collector->stopTimer(); 6843 _collector->stopTimer();
6962 // do this unlock/lock or modify the MutexUnlocker class to 6996 // do this unlock/lock or modify the MutexUnlocker class to
6963 // serve our purpose. XXX 6997 // serve our purpose. XXX
6964 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6998 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6965 "CMS thread should hold CMS token"); 6999 "CMS thread should hold CMS token");
6966 assert_lock_strong(_bitMap->lock()); 7000 assert_lock_strong(_bitMap->lock());
7001 DEBUG_ONLY(RememberKlassesChecker mux(false);)
6967 _bitMap->lock()->unlock(); 7002 _bitMap->lock()->unlock();
6968 ConcurrentMarkSweepThread::desynchronize(true); 7003 ConcurrentMarkSweepThread::desynchronize(true);
6969 ConcurrentMarkSweepThread::acknowledge_yield_request(); 7004 ConcurrentMarkSweepThread::acknowledge_yield_request();
6970 _collector->stopTimer(); 7005 _collector->stopTimer();
6971 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); 7006 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7316 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector, 7351 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7317 MemRegion span, 7352 MemRegion span,
7318 CMSBitMap* bitMap, CMSMarkStack* markStack, 7353 CMSBitMap* bitMap, CMSMarkStack* markStack,
7319 CMSMarkStack* revisitStack, 7354 CMSMarkStack* revisitStack,
7320 HeapWord* finger, MarkFromRootsClosure* parent) : 7355 HeapWord* finger, MarkFromRootsClosure* parent) :
7321 OopClosure(collector->ref_processor()), 7356 KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack),
7322 _collector(collector),
7323 _span(span), 7357 _span(span),
7324 _bitMap(bitMap), 7358 _bitMap(bitMap),
7325 _markStack(markStack), 7359 _markStack(markStack),
7326 _revisitStack(revisitStack),
7327 _finger(finger), 7360 _finger(finger),
7328 _parent(parent), 7361 _parent(parent)
7329 _should_remember_klasses(collector->should_unload_classes())
7330 { } 7362 { }
7331 7363
7332 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, 7364 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7333 MemRegion span, 7365 MemRegion span,
7334 CMSBitMap* bit_map, 7366 CMSBitMap* bit_map,
7336 CMSMarkStack* overflow_stack, 7368 CMSMarkStack* overflow_stack,
7337 CMSMarkStack* revisit_stack, 7369 CMSMarkStack* revisit_stack,
7338 HeapWord* finger, 7370 HeapWord* finger,
7339 HeapWord** global_finger_addr, 7371 HeapWord** global_finger_addr,
7340 Par_MarkFromRootsClosure* parent) : 7372 Par_MarkFromRootsClosure* parent) :
7341 OopClosure(collector->ref_processor()), 7373 Par_KlassRememberingOopClosure(collector,
7342 _collector(collector), 7374 collector->ref_processor(),
7375 revisit_stack),
7343 _whole_span(collector->_span), 7376 _whole_span(collector->_span),
7344 _span(span), 7377 _span(span),
7345 _bit_map(bit_map), 7378 _bit_map(bit_map),
7346 _work_queue(work_queue), 7379 _work_queue(work_queue),
7347 _overflow_stack(overflow_stack), 7380 _overflow_stack(overflow_stack),
7348 _revisit_stack(revisit_stack),
7349 _finger(finger), 7381 _finger(finger),
7350 _global_finger_addr(global_finger_addr), 7382 _global_finger_addr(global_finger_addr),
7351 _parent(parent), 7383 _parent(parent)
7352 _should_remember_klasses(collector->should_unload_classes())
7353 { } 7384 { }
7354 7385
7355 // Assumes thread-safe access by callers, who are 7386 // Assumes thread-safe access by callers, who are
7356 // responsible for mutual exclusion. 7387 // responsible for mutual exclusion.
7357 void CMSCollector::lower_restart_addr(HeapWord* low) { 7388 void CMSCollector::lower_restart_addr(HeapWord* low) {
7477 } 7508 }
7478 7509
7479 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 7510 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7480 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 7511 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7481 7512
7513 KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector,
7514 ReferenceProcessor* rp,
7515 CMSMarkStack* revisit_stack) :
7516 OopClosure(rp),
7517 _collector(collector),
7518 _revisit_stack(revisit_stack),
7519 _should_remember_klasses(collector->should_unload_classes()) {}
7520
7482 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, 7521 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7483 MemRegion span, 7522 MemRegion span,
7484 ReferenceProcessor* rp, 7523 ReferenceProcessor* rp,
7485 CMSBitMap* bit_map, 7524 CMSBitMap* bit_map,
7486 CMSBitMap* mod_union_table, 7525 CMSBitMap* mod_union_table,
7487 CMSMarkStack* mark_stack, 7526 CMSMarkStack* mark_stack,
7488 CMSMarkStack* revisit_stack, 7527 CMSMarkStack* revisit_stack,
7489 bool concurrent_precleaning): 7528 bool concurrent_precleaning):
7490 OopClosure(rp), 7529 KlassRememberingOopClosure(collector, rp, revisit_stack),
7491 _collector(collector),
7492 _span(span), 7530 _span(span),
7493 _bit_map(bit_map), 7531 _bit_map(bit_map),
7494 _mod_union_table(mod_union_table), 7532 _mod_union_table(mod_union_table),
7495 _mark_stack(mark_stack), 7533 _mark_stack(mark_stack),
7496 _revisit_stack(revisit_stack), 7534 _concurrent_precleaning(concurrent_precleaning)
7497 _concurrent_precleaning(concurrent_precleaning),
7498 _should_remember_klasses(collector->should_unload_classes())
7499 { 7535 {
7500 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); 7536 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7501 } 7537 }
7502 7538
7503 // Grey object rescan during pre-cleaning and second checkpoint phases -- 7539 // Grey object rescan during pre-cleaning and second checkpoint phases --
7561 MemRegion span, 7597 MemRegion span,
7562 ReferenceProcessor* rp, 7598 ReferenceProcessor* rp,
7563 CMSBitMap* bit_map, 7599 CMSBitMap* bit_map,
7564 OopTaskQueue* work_queue, 7600 OopTaskQueue* work_queue,
7565 CMSMarkStack* revisit_stack): 7601 CMSMarkStack* revisit_stack):
7566 OopClosure(rp), 7602 Par_KlassRememberingOopClosure(collector, rp, revisit_stack),
7567 _collector(collector),
7568 _span(span), 7603 _span(span),
7569 _bit_map(bit_map), 7604 _bit_map(bit_map),
7570 _work_queue(work_queue), 7605 _work_queue(work_queue)
7571 _revisit_stack(revisit_stack),
7572 _should_remember_klasses(collector->should_unload_classes())
7573 { 7606 {
7574 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); 7607 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7575 } 7608 }
7576 7609
7577 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); } 7610 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
7620 } 7653 }
7621 7654
7622 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } 7655 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7623 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } 7656 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7624 7657
7625 void PushAndMarkClosure::remember_klass(Klass* k) { 7658 void PushAndMarkClosure::remember_mdo(DataLayout* v) {
7626 if (!_revisit_stack->push(oop(k))) { 7659 // TBD
7627 fatal("Revisit stack overflowed in PushAndMarkClosure"); 7660 }
7628 } 7661
7629 } 7662 void Par_PushAndMarkClosure::remember_mdo(DataLayout* v) {
7630 7663 // TBD
7631 void Par_PushAndMarkClosure::remember_klass(Klass* k) {
7632 if (!_revisit_stack->par_push(oop(k))) {
7633 fatal("Revist stack overflowed in Par_PushAndMarkClosure");
7634 }
7635 } 7664 }
7636 7665
7637 void CMSPrecleanRefsYieldClosure::do_yield_work() { 7666 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7667 DEBUG_ONLY(RememberKlassesChecker mux(false);)
7638 Mutex* bml = _collector->bitMapLock(); 7668 Mutex* bml = _collector->bitMapLock();
7639 assert_lock_strong(bml); 7669 assert_lock_strong(bml);
7640 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 7670 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7641 "CMS thread should hold CMS token"); 7671 "CMS thread should hold CMS token");
7642 7672
8323 HeapWord* addr = (HeapWord*)obj; 8353 HeapWord* addr = (HeapWord*)obj;
8324 return addr != NULL && 8354 return addr != NULL &&
8325 (!_span.contains(addr) || _bit_map->isMarked(addr)); 8355 (!_span.contains(addr) || _bit_map->isMarked(addr));
8326 } 8356 }
8327 8357
8358 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8359 MemRegion span,
8360 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8361 CMSMarkStack* revisit_stack, bool cpc):
8362 KlassRememberingOopClosure(collector, NULL, revisit_stack),
8363 _span(span),
8364 _bit_map(bit_map),
8365 _mark_stack(mark_stack),
8366 _concurrent_precleaning(cpc) {
8367 assert(!_span.is_empty(), "Empty span could spell trouble");
8368 }
8369
8370
8328 // CMSKeepAliveClosure: the serial version 8371 // CMSKeepAliveClosure: the serial version
8329 void CMSKeepAliveClosure::do_oop(oop obj) { 8372 void CMSKeepAliveClosure::do_oop(oop obj) {
8330 HeapWord* addr = (HeapWord*)obj; 8373 HeapWord* addr = (HeapWord*)obj;
8331 if (_span.contains(addr) && 8374 if (_span.contains(addr) &&
8332 !_bit_map->isMarked(addr)) { 8375 !_bit_map->isMarked(addr)) {
8406 new_oop->oop_iterate(&_mark_and_push); 8449 new_oop->oop_iterate(&_mark_and_push);
8407 } 8450 }
8408 } 8451 }
8409 } 8452 }
8410 8453
8454 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8455 CMSCollector* collector,
8456 MemRegion span, CMSBitMap* bit_map,
8457 CMSMarkStack* revisit_stack,
8458 OopTaskQueue* work_queue):
8459 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
8460 _span(span),
8461 _bit_map(bit_map),
8462 _work_queue(work_queue) { }
8463
8411 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) { 8464 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8412 HeapWord* addr = (HeapWord*)obj; 8465 HeapWord* addr = (HeapWord*)obj;
8413 if (_span.contains(addr) && 8466 if (_span.contains(addr) &&
8414 !_bit_map->isMarked(addr)) { 8467 !_bit_map->isMarked(addr)) {
8415 if (_bit_map->par_mark(addr)) { 8468 if (_bit_map->par_mark(addr)) {