Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @ 453:c96030fff130
6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa
author | ysr |
---|---|
date | Thu, 20 Nov 2008 16:56:09 -0800 |
parents | 00b023ae2d78 |
children | 27a80744a83b |
comparison
equal
deleted
inserted
replaced
452:00b023ae2d78 | 453:c96030fff130 |
---|---|
1959 // Temporarily make refs discovery atomic | 1959 // Temporarily make refs discovery atomic |
1960 ReferenceProcessorAtomicMutator w(ref_processor(), true); | 1960 ReferenceProcessorAtomicMutator w(ref_processor(), true); |
1961 | 1961 |
1962 ref_processor()->set_enqueuing_is_done(false); | 1962 ref_processor()->set_enqueuing_is_done(false); |
1963 ref_processor()->enable_discovery(); | 1963 ref_processor()->enable_discovery(); |
1964 ref_processor()->snap_policy(clear_all_soft_refs); | |
1964 // If an asynchronous collection finishes, the _modUnionTable is | 1965 // If an asynchronous collection finishes, the _modUnionTable is |
1965 // all clear. If we are assuming the collection from an asynchronous | 1966 // all clear. If we are assuming the collection from an asynchronous |
1966 // collection, clear the _modUnionTable. | 1967 // collection, clear the _modUnionTable. |
1967 assert(_collectorState != Idling || _modUnionTable.isAllClear(), | 1968 assert(_collectorState != Idling || _modUnionTable.isAllClear(), |
1968 "_modUnionTable should be clear if the baton was not passed"); | 1969 "_modUnionTable should be clear if the baton was not passed"); |
2381 | 2382 |
2382 if (VerifyBeforeGC && | 2383 if (VerifyBeforeGC && |
2383 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | 2384 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { |
2384 Universe::verify(true); | 2385 Universe::verify(true); |
2385 } | 2386 } |
2387 | |
2388 // Snapshot the soft reference policy to be used in this collection cycle. | |
2389 ref_processor()->snap_policy(clear_all_soft_refs); | |
2386 | 2390 |
2387 bool init_mark_was_synchronous = false; // until proven otherwise | 2391 bool init_mark_was_synchronous = false; // until proven otherwise |
2388 while (_collectorState != Idling) { | 2392 while (_collectorState != Idling) { |
2389 if (TraceCMSState) { | 2393 if (TraceCMSState) { |
2390 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", | 2394 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", |
4589 // We'll scan the cards in the dirty region (with periodic | 4593 // We'll scan the cards in the dirty region (with periodic |
4590 // yields for foreground GC as needed). | 4594 // yields for foreground GC as needed). |
4591 if (!dirtyRegion.is_empty()) { | 4595 if (!dirtyRegion.is_empty()) { |
4592 assert(numDirtyCards > 0, "consistency check"); | 4596 assert(numDirtyCards > 0, "consistency check"); |
4593 HeapWord* stop_point = NULL; | 4597 HeapWord* stop_point = NULL; |
4598 stopTimer(); | |
4599 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), | |
4600 bitMapLock()); | |
4601 startTimer(); | |
4594 { | 4602 { |
4595 stopTimer(); | |
4596 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), | |
4597 bitMapLock()); | |
4598 startTimer(); | |
4599 verify_work_stacks_empty(); | 4603 verify_work_stacks_empty(); |
4600 verify_overflow_empty(); | 4604 verify_overflow_empty(); |
4601 sample_eden(); | 4605 sample_eden(); |
4602 stop_point = | 4606 stop_point = |
4603 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); | 4607 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); |
4610 // cards. We'll either restart at the next block boundary or | 4614 // cards. We'll either restart at the next block boundary or |
4611 // abort the preclean. | 4615 // abort the preclean. |
4612 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) || | 4616 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) || |
4613 (_collectorState == AbortablePreclean && should_abort_preclean()), | 4617 (_collectorState == AbortablePreclean && should_abort_preclean()), |
4614 "Unparsable objects should only be in perm gen."); | 4618 "Unparsable objects should only be in perm gen."); |
4615 | |
4616 stopTimer(); | |
4617 CMSTokenSyncWithLocks ts(true, bitMapLock()); | |
4618 startTimer(); | |
4619 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end())); | 4619 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end())); |
4620 if (should_abort_preclean()) { | 4620 if (should_abort_preclean()) { |
4621 break; // out of preclean loop | 4621 break; // out of preclean loop |
4622 } else { | 4622 } else { |
4623 // Compute the next address at which preclean should pick up; | 4623 // Compute the next address at which preclean should pick up; |
5676 | 5676 |
5677 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { | 5677 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { |
5678 | 5678 |
5679 ResourceMark rm; | 5679 ResourceMark rm; |
5680 HandleMark hm; | 5680 HandleMark hm; |
5681 ReferencePolicy* soft_ref_policy; | |
5682 | |
5683 assert(!ref_processor()->enqueuing_is_done(), "Enqueuing should not be complete"); | |
5684 // Process weak references. | |
5685 if (clear_all_soft_refs) { | |
5686 soft_ref_policy = new AlwaysClearPolicy(); | |
5687 } else { | |
5688 #ifdef COMPILER2 | |
5689 soft_ref_policy = new LRUMaxHeapPolicy(); | |
5690 #else | |
5691 soft_ref_policy = new LRUCurrentHeapPolicy(); | |
5692 #endif // COMPILER2 | |
5693 } | |
5694 verify_work_stacks_empty(); | |
5695 | 5681 |
5696 ReferenceProcessor* rp = ref_processor(); | 5682 ReferenceProcessor* rp = ref_processor(); |
5697 assert(rp->span().equals(_span), "Spans should be equal"); | 5683 assert(rp->span().equals(_span), "Spans should be equal"); |
5684 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete"); | |
5685 // Process weak references. | |
5686 rp->snap_policy(clear_all_soft_refs); | |
5687 verify_work_stacks_empty(); | |
5688 | |
5698 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, | 5689 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, |
5699 &_markStack, false /* !preclean */); | 5690 &_markStack, false /* !preclean */); |
5700 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, | 5691 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, |
5701 _span, &_markBitMap, &_markStack, | 5692 _span, &_markBitMap, &_markStack, |
5702 &cmsKeepAliveClosure, false /* !preclean */); | 5693 &cmsKeepAliveClosure, false /* !preclean */); |
5703 { | 5694 { |
5704 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); | 5695 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); |
5705 if (rp->processing_is_mt()) { | 5696 if (rp->processing_is_mt()) { |
5706 CMSRefProcTaskExecutor task_executor(*this); | 5697 CMSRefProcTaskExecutor task_executor(*this); |
5707 rp->process_discovered_references(soft_ref_policy, | 5698 rp->process_discovered_references(&_is_alive_closure, |
5708 &_is_alive_closure, | |
5709 &cmsKeepAliveClosure, | 5699 &cmsKeepAliveClosure, |
5710 &cmsDrainMarkingStackClosure, | 5700 &cmsDrainMarkingStackClosure, |
5711 &task_executor); | 5701 &task_executor); |
5712 } else { | 5702 } else { |
5713 rp->process_discovered_references(soft_ref_policy, | 5703 rp->process_discovered_references(&_is_alive_closure, |
5714 &_is_alive_closure, | |
5715 &cmsKeepAliveClosure, | 5704 &cmsKeepAliveClosure, |
5716 &cmsDrainMarkingStackClosure, | 5705 &cmsDrainMarkingStackClosure, |
5717 NULL); | 5706 NULL); |
5718 } | 5707 } |
5719 verify_work_stacks_empty(); | 5708 verify_work_stacks_empty(); |
6164 // is checked. | 6153 // is checked. |
6165 } | 6154 } |
6166 #endif | 6155 #endif |
6167 | 6156 |
6168 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const { | 6157 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const { |
6169 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1), | 6158 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1), |
6170 "missing Printezis mark?"); | 6159 "missing Printezis mark?"); |
6171 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); | 6160 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); |
6172 size_t size = pointer_delta(nextOneAddr + 1, addr); | 6161 size_t size = pointer_delta(nextOneAddr + 1, addr); |
6173 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), | 6162 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), |
6174 "alignment problem"); | 6163 "alignment problem"); |
6175 assert(size >= 3, "Necessary for Printezis marks to work"); | 6164 assert(size >= 3, "Necessary for Printezis marks to work"); |