comparison src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @ 20804:7848fc12602b

Merge with jdk8u40-b25
author Gilles Duboscq <gilles.m.duboscq@oracle.com>
date Tue, 07 Apr 2015 14:58:49 +0200
parents 52b4284cb496 c2844108a708
children d86b226e331a
comparison
equal deleted inserted replaced
20184:84105dcdb05b 20804:7848fc12602b
47 #include "memory/collectorPolicy.hpp" 47 #include "memory/collectorPolicy.hpp"
48 #include "memory/gcLocker.inline.hpp" 48 #include "memory/gcLocker.inline.hpp"
49 #include "memory/genCollectedHeap.hpp" 49 #include "memory/genCollectedHeap.hpp"
50 #include "memory/genMarkSweep.hpp" 50 #include "memory/genMarkSweep.hpp"
51 #include "memory/genOopClosures.inline.hpp" 51 #include "memory/genOopClosures.inline.hpp"
52 #include "memory/iterator.hpp" 52 #include "memory/iterator.inline.hpp"
53 #include "memory/padded.hpp" 53 #include "memory/padded.hpp"
54 #include "memory/referencePolicy.hpp" 54 #include "memory/referencePolicy.hpp"
55 #include "memory/resourceArea.hpp" 55 #include "memory/resourceArea.hpp"
56 #include "memory/tenuredGeneration.hpp" 56 #include "memory/tenuredGeneration.hpp"
57 #include "oops/oop.inline.hpp" 57 #include "oops/oop.inline.hpp"
58 #include "prims/jvmtiExport.hpp" 58 #include "prims/jvmtiExport.hpp"
59 #include "runtime/globals_extension.hpp" 59 #include "runtime/globals_extension.hpp"
60 #include "runtime/handles.inline.hpp" 60 #include "runtime/handles.inline.hpp"
61 #include "runtime/java.hpp" 61 #include "runtime/java.hpp"
62 #include "runtime/orderAccess.inline.hpp"
62 #include "runtime/vmThread.hpp" 63 #include "runtime/vmThread.hpp"
63 #include "services/memoryService.hpp" 64 #include "services/memoryService.hpp"
64 #include "services/runtimeService.hpp" 65 #include "services/runtimeService.hpp"
65 66
66 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 67 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
735 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error"); 736 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
736 737
737 // Support for parallelizing survivor space rescan 738 // Support for parallelizing survivor space rescan
738 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) { 739 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
739 const size_t max_plab_samples = 740 const size_t max_plab_samples =
740 ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize; 741 ((DefNewGeneration*)_young_gen)->max_survivor_size() / plab_sample_minimum_size();
741 742
742 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC); 743 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
743 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC); 744 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
744 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC); 745 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
745 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL 746 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
793 _gc_counters = new CollectorCounters("CMS", 1); 794 _gc_counters = new CollectorCounters("CMS", 1);
794 _completed_initialization = true; 795 _completed_initialization = true;
795 _inter_sweep_timer.start(); // start of time 796 _inter_sweep_timer.start(); // start of time
796 } 797 }
797 798
799 size_t CMSCollector::plab_sample_minimum_size() {
800 // The default value of MinTLABSize is 2k, but there is
801 // no way to get the default value if the flag has been overridden.
802 return MAX2(ThreadLocalAllocBuffer::min_size() * HeapWordSize, 2 * K);
803 }
804
798 const char* ConcurrentMarkSweepGeneration::name() const { 805 const char* ConcurrentMarkSweepGeneration::name() const {
799 return "concurrent mark-sweep generation"; 806 return "concurrent mark-sweep generation";
800 } 807 }
801 void ConcurrentMarkSweepGeneration::update_counters() { 808 void ConcurrentMarkSweepGeneration::update_counters() {
802 if (UsePerfData) { 809 if (UsePerfData) {
1511 _cmsGen->contiguous_available()); 1518 _cmsGen->contiguous_available());
1512 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate()); 1519 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1513 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate()); 1520 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1514 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy()); 1521 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1515 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy()); 1522 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1523 gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1524 gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1516 gclog_or_tty->print_cr("metadata initialized %d", 1525 gclog_or_tty->print_cr("metadata initialized %d",
1517 MetaspaceGC::should_concurrent_collect()); 1526 MetaspaceGC::should_concurrent_collect());
1518 } 1527 }
1519 // ------------------------------------------------------------------ 1528 // ------------------------------------------------------------------
1520 1529
1567 } 1576 }
1568 return true; 1577 return true;
1569 } 1578 }
1570 1579
1571 if (MetaspaceGC::should_concurrent_collect()) { 1580 if (MetaspaceGC::should_concurrent_collect()) {
1581 if (Verbose && PrintGCDetails) {
1582 gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1583 }
1584 return true;
1585 }
1586
1587 // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1588 if (CMSTriggerInterval >= 0) {
1589 if (CMSTriggerInterval == 0) {
1590 // Trigger always
1591 return true;
1592 }
1593
1594 // Check the CMS time since begin (we do not check the stats validity
1595 // as we want to be able to trigger the first CMS cycle as well)
1596 if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1572 if (Verbose && PrintGCDetails) { 1597 if (Verbose && PrintGCDetails) {
1573 gclog_or_tty->print("CMSCollector: collect for metadata allocation "); 1598 if (stats().valid()) {
1599 gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1600 stats().cms_time_since_begin());
1601 } else {
1602 gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
1603 }
1574 } 1604 }
1575 return true; 1605 return true;
1576 } 1606 }
1607 }
1577 1608
1578 return false; 1609 return false;
1579 } 1610 }
1580 1611
1581 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); } 1612 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1997 gc_timer->register_gc_start(); 2028 gc_timer->register_gc_start();
1998 2029
1999 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); 2030 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
2000 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); 2031 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
2001 2032
2002 GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL); 2033 GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
2003 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) { 2034 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
2004 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d " 2035 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
2005 "collections passed to foreground collector", _full_gcs_since_conc_gc); 2036 "collections passed to foreground collector", _full_gcs_since_conc_gc);
2006 } 2037 }
2007 2038
2507 assert(Thread::current()->is_VM_thread(), "A foreground collection" 2538 assert(Thread::current()->is_VM_thread(), "A foreground collection"
2508 "may only be done by the VM Thread with the world stopped"); 2539 "may only be done by the VM Thread with the world stopped");
2509 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), 2540 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2510 "VM thread should have CMS token"); 2541 "VM thread should have CMS token");
2511 2542
2543 // The gc id is created in register_foreground_gc_start if this collection is synchronous
2544 const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
2512 NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, 2545 NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2513 true, NULL);) 2546 true, NULL, gc_id);)
2514 if (UseAdaptiveSizePolicy) { 2547 if (UseAdaptiveSizePolicy) {
2515 size_policy()->ms_collection_begin(); 2548 size_policy()->ms_collection_begin();
2516 } 2549 }
2517 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact); 2550 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2518 2551
3023 void CMSCollector::verify_after_remark_work_1() { 3056 void CMSCollector::verify_after_remark_work_1() {
3024 ResourceMark rm; 3057 ResourceMark rm;
3025 HandleMark hm; 3058 HandleMark hm;
3026 GenCollectedHeap* gch = GenCollectedHeap::heap(); 3059 GenCollectedHeap* gch = GenCollectedHeap::heap();
3027 3060
3028 // Get a clear set of claim bits for the strong roots processing to work with. 3061 // Get a clear set of claim bits for the roots processing to work with.
3029 ClassLoaderDataGraph::clear_claimed_marks(); 3062 ClassLoaderDataGraph::clear_claimed_marks();
3030 3063
3031 // Mark from roots one level into CMS 3064 // Mark from roots one level into CMS
3032 MarkRefsIntoClosure notOlder(_span, verification_mark_bm()); 3065 MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
3033 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 3066 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3034 3067
3035 gch->gen_process_strong_roots(_cmsGen->level(), 3068 gch->gen_process_roots(_cmsGen->level(),
3036 true, // younger gens are roots 3069 true, // younger gens are roots
3037 true, // activate StrongRootsScope 3070 true, // activate StrongRootsScope
3038 false, // not scavenging 3071 SharedHeap::ScanningOption(roots_scanning_options()),
3039 SharedHeap::ScanningOption(roots_scanning_options()), 3072 should_unload_classes(),
3040 &notOlder, 3073 &notOlder,
3041 true, // walk code active on stacks 3074 NULL,
3042 NULL, 3075 NULL); // SSS: Provide correct closure
3043 NULL); // SSS: Provide correct closure
3044 3076
3045 // Now mark from the roots 3077 // Now mark from the roots
3046 MarkFromRootsClosure markFromRootsClosure(this, _span, 3078 MarkFromRootsClosure markFromRootsClosure(this, _span,
3047 verification_mark_bm(), verification_mark_stack(), 3079 verification_mark_bm(), verification_mark_stack(),
3048 false /* don't yield */, true /* verifying */); 3080 false /* don't yield */, true /* verifying */);
3089 void CMSCollector::verify_after_remark_work_2() { 3121 void CMSCollector::verify_after_remark_work_2() {
3090 ResourceMark rm; 3122 ResourceMark rm;
3091 HandleMark hm; 3123 HandleMark hm;
3092 GenCollectedHeap* gch = GenCollectedHeap::heap(); 3124 GenCollectedHeap* gch = GenCollectedHeap::heap();
3093 3125
3094 // Get a clear set of claim bits for the strong roots processing to work with. 3126 // Get a clear set of claim bits for the roots processing to work with.
3095 ClassLoaderDataGraph::clear_claimed_marks(); 3127 ClassLoaderDataGraph::clear_claimed_marks();
3096 3128
3097 // Mark from roots one level into CMS 3129 // Mark from roots one level into CMS
3098 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(), 3130 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3099 markBitMap()); 3131 markBitMap());
3100 CMKlassClosure klass_closure(&notOlder); 3132 CLDToOopClosure cld_closure(&notOlder, true);
3101 3133
3102 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 3134 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3103 gch->gen_process_strong_roots(_cmsGen->level(), 3135
3104 true, // younger gens are roots 3136 gch->gen_process_roots(_cmsGen->level(),
3105 true, // activate StrongRootsScope 3137 true, // younger gens are roots
3106 false, // not scavenging 3138 true, // activate StrongRootsScope
3107 SharedHeap::ScanningOption(roots_scanning_options()), 3139 SharedHeap::ScanningOption(roots_scanning_options()),
3108 &notOlder, 3140 should_unload_classes(),
3109 true, // walk code active on stacks 3141 &notOlder,
3110 NULL, 3142 NULL,
3111 &klass_closure); 3143 &cld_closure);
3112 3144
3113 // Now mark from the roots 3145 // Now mark from the roots
3114 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span, 3146 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3115 verification_mark_bm(), markBitMap(), verification_mark_stack()); 3147 verification_mark_bm(), markBitMap(), verification_mark_stack());
3116 assert(_restart_addr == NULL, "Expected pre-condition"); 3148 assert(_restart_addr == NULL, "Expected pre-condition");
3164 void 3196 void
3165 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) { 3197 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
3166 cl->set_generation(this); 3198 cl->set_generation(this);
3167 younger_refs_in_space_iterate(_cmsSpace, cl); 3199 younger_refs_in_space_iterate(_cmsSpace, cl);
3168 cl->reset_generation(); 3200 cl->reset_generation();
3169 }
3170
3171 void
3172 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
3173 if (freelistLock()->owned_by_self()) {
3174 Generation::oop_iterate(mr, cl);
3175 } else {
3176 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3177 Generation::oop_iterate(mr, cl);
3178 }
3179 } 3201 }
3180 3202
3181 void 3203 void
3182 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) { 3204 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
3183 if (freelistLock()->owned_by_self()) { 3205 if (freelistLock()->owned_by_self()) {
3303 } 3325 }
3304 3326
3305 void CMSCollector::setup_cms_unloading_and_verification_state() { 3327 void CMSCollector::setup_cms_unloading_and_verification_state() {
3306 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC 3328 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3307 || VerifyBeforeExit; 3329 || VerifyBeforeExit;
3308 const int rso = SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; 3330 const int rso = SharedHeap::SO_AllCodeCache;
3309 3331
3310 // We set the proper root for this CMS cycle here. 3332 // We set the proper root for this CMS cycle here.
3311 if (should_unload_classes()) { // Should unload classes this cycle 3333 if (should_unload_classes()) { // Should unload classes this cycle
3312 remove_root_scanning_option(SharedHeap::SO_AllClasses);
3313 add_root_scanning_option(SharedHeap::SO_SystemClasses);
3314 remove_root_scanning_option(rso); // Shrink the root set appropriately 3334 remove_root_scanning_option(rso); // Shrink the root set appropriately
3315 set_verifying(should_verify); // Set verification state for this cycle 3335 set_verifying(should_verify); // Set verification state for this cycle
3316 return; // Nothing else needs to be done at this time 3336 return; // Nothing else needs to be done at this time
3317 } 3337 }
3318 3338
3319 // Not unloading classes this cycle 3339 // Not unloading classes this cycle
3320 assert(!should_unload_classes(), "Inconsitency!"); 3340 assert(!should_unload_classes(), "Inconsitency!");
3321 remove_root_scanning_option(SharedHeap::SO_SystemClasses);
3322 add_root_scanning_option(SharedHeap::SO_AllClasses);
3323 3341
3324 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) { 3342 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3325 // Include symbols, strings and code cache elements to prevent their resurrection. 3343 // Include symbols, strings and code cache elements to prevent their resurrection.
3326 add_root_scanning_option(rso); 3344 add_root_scanning_option(rso);
3327 set_verifying(true); 3345 set_verifying(true);
3525 // phases. 3543 // phases.
3526 class CMSPhaseAccounting: public StackObj { 3544 class CMSPhaseAccounting: public StackObj {
3527 public: 3545 public:
3528 CMSPhaseAccounting(CMSCollector *collector, 3546 CMSPhaseAccounting(CMSCollector *collector,
3529 const char *phase, 3547 const char *phase,
3548 const GCId gc_id,
3530 bool print_cr = true); 3549 bool print_cr = true);
3531 ~CMSPhaseAccounting(); 3550 ~CMSPhaseAccounting();
3532 3551
3533 private: 3552 private:
3534 CMSCollector *_collector; 3553 CMSCollector *_collector;
3535 const char *_phase; 3554 const char *_phase;
3536 elapsedTimer _wallclock; 3555 elapsedTimer _wallclock;
3537 bool _print_cr; 3556 bool _print_cr;
3557 const GCId _gc_id;
3538 3558
3539 public: 3559 public:
3540 // Not MT-safe; so do not pass around these StackObj's 3560 // Not MT-safe; so do not pass around these StackObj's
3541 // where they may be accessed by other threads. 3561 // where they may be accessed by other threads.
3542 jlong wallclock_millis() { 3562 jlong wallclock_millis() {
3548 } 3568 }
3549 }; 3569 };
3550 3570
3551 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector, 3571 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3552 const char *phase, 3572 const char *phase,
3573 const GCId gc_id,
3553 bool print_cr) : 3574 bool print_cr) :
3554 _collector(collector), _phase(phase), _print_cr(print_cr) { 3575 _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) {
3555 3576
3556 if (PrintCMSStatistics != 0) { 3577 if (PrintCMSStatistics != 0) {
3557 _collector->resetYields(); 3578 _collector->resetYields();
3558 } 3579 }
3559 if (PrintGCDetails) { 3580 if (PrintGCDetails) {
3560 gclog_or_tty->date_stamp(PrintGCDateStamps); 3581 gclog_or_tty->gclog_stamp(_gc_id);
3561 gclog_or_tty->stamp(PrintGCTimeStamps);
3562 gclog_or_tty->print_cr("[%s-concurrent-%s-start]", 3582 gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
3563 _collector->cmsGen()->short_name(), _phase); 3583 _collector->cmsGen()->short_name(), _phase);
3564 } 3584 }
3565 _collector->resetTimer(); 3585 _collector->resetTimer();
3566 _wallclock.start(); 3586 _wallclock.start();
3570 CMSPhaseAccounting::~CMSPhaseAccounting() { 3590 CMSPhaseAccounting::~CMSPhaseAccounting() {
3571 assert(_wallclock.is_active(), "Wall clock should not have stopped"); 3591 assert(_wallclock.is_active(), "Wall clock should not have stopped");
3572 _collector->stopTimer(); 3592 _collector->stopTimer();
3573 _wallclock.stop(); 3593 _wallclock.stop();
3574 if (PrintGCDetails) { 3594 if (PrintGCDetails) {
3575 gclog_or_tty->date_stamp(PrintGCDateStamps); 3595 gclog_or_tty->gclog_stamp(_gc_id);
3576 gclog_or_tty->stamp(PrintGCTimeStamps);
3577 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]", 3596 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3578 _collector->cmsGen()->short_name(), 3597 _collector->cmsGen()->short_name(),
3579 _phase, _collector->timerValue(), _wallclock.seconds()); 3598 _phase, _collector->timerValue(), _wallclock.seconds());
3580 if (_print_cr) { 3599 if (_print_cr) {
3581 gclog_or_tty->cr(); 3600 gclog_or_tty->cr();
3669 // Setup the verification and class unloading state for this 3688 // Setup the verification and class unloading state for this
3670 // CMS collection cycle. 3689 // CMS collection cycle.
3671 setup_cms_unloading_and_verification_state(); 3690 setup_cms_unloading_and_verification_state();
3672 3691
3673 NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork", 3692 NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
3674 PrintGCDetails && Verbose, true, _gc_timer_cm);) 3693 PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
3675 if (UseAdaptiveSizePolicy) { 3694 if (UseAdaptiveSizePolicy) {
3676 size_policy()->checkpoint_roots_initial_begin(); 3695 size_policy()->checkpoint_roots_initial_begin();
3677 } 3696 }
3678 3697
3679 // Reset all the PLAB chunk arrays if necessary. 3698 // Reset all the PLAB chunk arrays if necessary.
3682 } 3701 }
3683 3702
3684 ResourceMark rm; 3703 ResourceMark rm;
3685 HandleMark hm; 3704 HandleMark hm;
3686 3705
3687 FalseClosure falseClosure;
3688 // In the case of a synchronous collection, we will elide the
3689 // remark step, so it's important to catch all the nmethod oops
3690 // in this step.
3691 // The final 'true' flag to gen_process_strong_roots will ensure this.
3692 // If 'async' is true, we can relax the nmethod tracing.
3693 MarkRefsIntoClosure notOlder(_span, &_markBitMap); 3706 MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3694 GenCollectedHeap* gch = GenCollectedHeap::heap(); 3707 GenCollectedHeap* gch = GenCollectedHeap::heap();
3695 3708
3696 verify_work_stacks_empty(); 3709 verify_work_stacks_empty();
3697 verify_overflow_empty(); 3710 verify_overflow_empty();
3733 tsk.work(0); 3746 tsk.work(0);
3734 } 3747 }
3735 gch->set_par_threads(0); 3748 gch->set_par_threads(0);
3736 } else { 3749 } else {
3737 // The serial version. 3750 // The serial version.
3738 CMKlassClosure klass_closure(&notOlder); 3751 CLDToOopClosure cld_closure(&notOlder, true);
3739 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 3752 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3740 gch->gen_process_strong_roots(_cmsGen->level(), 3753 gch->gen_process_roots(_cmsGen->level(),
3741 true, // younger gens are roots 3754 true, // younger gens are roots
3742 true, // activate StrongRootsScope 3755 true, // activate StrongRootsScope
3743 false, // not scavenging 3756 SharedHeap::ScanningOption(roots_scanning_options()),
3744 SharedHeap::ScanningOption(roots_scanning_options()), 3757 should_unload_classes(),
3745 &notOlder, 3758 &notOlder,
3746 true, // walk all of code cache if (so & SO_CodeCache) 3759 NULL,
3747 NULL, 3760 &cld_closure);
3748 &klass_closure);
3749 } 3761 }
3750 } 3762 }
3751 3763
3752 // Clear mod-union table; it will be dirtied in the prologue of 3764 // Clear mod-union table; it will be dirtied in the prologue of
3753 // CMS generation per each younger generation collection. 3765 // CMS generation per each younger generation collection.
3794 // refs in this generation concurrent (but interleaved) with 3806 // refs in this generation concurrent (but interleaved) with
3795 // weak ref discovery by a younger generation collector. 3807 // weak ref discovery by a younger generation collector.
3796 3808
3797 CMSTokenSyncWithLocks ts(true, bitMapLock()); 3809 CMSTokenSyncWithLocks ts(true, bitMapLock());
3798 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 3810 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3799 CMSPhaseAccounting pa(this, "mark", !PrintGCDetails); 3811 CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3800 res = markFromRootsWork(asynch); 3812 res = markFromRootsWork(asynch);
3801 if (res) { 3813 if (res) {
3802 _collectorState = Precleaning; 3814 _collectorState = Precleaning;
3803 } else { // We failed and a foreground collection wants to take over 3815 } else { // We failed and a foreground collection wants to take over
3804 assert(_foregroundGCIsActive, "internal state inconsistency"); 3816 assert(_foregroundGCIsActive, "internal state inconsistency");
4197 // have been bumped up by the thread that claimed the last 4209 // have been bumped up by the thread that claimed the last
4198 // task. 4210 // task.
4199 pst->all_tasks_completed(); 4211 pst->all_tasks_completed();
4200 } 4212 }
4201 4213
4202 class Par_ConcMarkingClosure: public CMSOopClosure { 4214 class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
4203 private: 4215 private:
4204 CMSCollector* _collector; 4216 CMSCollector* _collector;
4205 CMSConcMarkingTask* _task; 4217 CMSConcMarkingTask* _task;
4206 MemRegion _span; 4218 MemRegion _span;
4207 CMSBitMap* _bit_map; 4219 CMSBitMap* _bit_map;
4210 protected: 4222 protected:
4211 DO_OOP_WORK_DEFN 4223 DO_OOP_WORK_DEFN
4212 public: 4224 public:
4213 Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue, 4225 Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4214 CMSBitMap* bit_map, CMSMarkStack* overflow_stack): 4226 CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
4215 CMSOopClosure(collector->ref_processor()), 4227 MetadataAwareOopClosure(collector->ref_processor()),
4216 _collector(collector), 4228 _collector(collector),
4217 _task(task), 4229 _task(task),
4218 _span(collector->_span), 4230 _span(collector->_span),
4219 _work_queue(work_queue), 4231 _work_queue(work_queue),
4220 _bit_map(bit_map), 4232 _bit_map(bit_map),
4517 _start_sampling = true; 4529 _start_sampling = true;
4518 } else { 4530 } else {
4519 _start_sampling = false; 4531 _start_sampling = false;
4520 } 4532 }
4521 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 4533 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4522 CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails); 4534 CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
4523 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1); 4535 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4524 } 4536 }
4525 CMSTokenSync x(true); // is cms thread 4537 CMSTokenSync x(true); // is cms thread
4526 if (CMSPrecleaningEnabled) { 4538 if (CMSPrecleaningEnabled) {
4527 sample_eden(); 4539 sample_eden();
4546 // schedule the pause as described avove. By choosing 4558 // schedule the pause as described avove. By choosing
4547 // CMSScheduleRemarkEdenSizeThreshold >= max eden size 4559 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4548 // we will never do an actual abortable preclean cycle. 4560 // we will never do an actual abortable preclean cycle.
4549 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { 4561 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4550 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 4562 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4551 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails); 4563 CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
4552 // We need more smarts in the abortable preclean 4564 // We need more smarts in the abortable preclean
4553 // loop below to deal with cases where allocation 4565 // loop below to deal with cases where allocation
4554 // in young gen is very very slow, and our precleaning 4566 // in young gen is very very slow, and our precleaning
4555 // is running a losing race against a horde of 4567 // is running a losing race against a horde of
4556 // mutators intent on flooding us with CMS updates 4568 // mutators intent on flooding us with CMS updates
4691 // tweaking for better performance and some restructuring 4703 // tweaking for better performance and some restructuring
4692 // for cleaner interfaces. 4704 // for cleaner interfaces.
4693 GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases 4705 GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
4694 rp->preclean_discovered_references( 4706 rp->preclean_discovered_references(
4695 rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl, 4707 rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
4696 gc_timer); 4708 gc_timer, _gc_tracer_cm->gc_id());
4697 } 4709 }
4698 4710
4699 if (clean_survivor) { // preclean the active survivor space(s) 4711 if (clean_survivor) { // preclean the active survivor space(s)
4700 assert(_young_gen->kind() == Generation::DefNew || 4712 assert(_young_gen->kind() == Generation::DefNew ||
4701 _young_gen->kind() == Generation::ParNew || 4713 _young_gen->kind() == Generation::ParNew ||
4981 verify_overflow_empty(); 4993 verify_overflow_empty();
4982 return cumNumDirtyCards; 4994 return cumNumDirtyCards;
4983 } 4995 }
4984 4996
4985 class PrecleanKlassClosure : public KlassClosure { 4997 class PrecleanKlassClosure : public KlassClosure {
4986 CMKlassClosure _cm_klass_closure; 4998 KlassToOopClosure _cm_klass_closure;
4987 public: 4999 public:
4988 PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {} 5000 PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4989 void do_klass(Klass* k) { 5001 void do_klass(Klass* k) {
4990 if (k->has_accumulated_modified_oops()) { 5002 if (k->has_accumulated_modified_oops()) {
4991 k->clear_accumulated_modified_oops(); 5003 k->clear_accumulated_modified_oops();
5034 GenCollectedHeap* gch = GenCollectedHeap::heap(); 5046 GenCollectedHeap* gch = GenCollectedHeap::heap();
5035 // Temporarily set flag to false, GCH->do_collection will 5047 // Temporarily set flag to false, GCH->do_collection will
5036 // expect it to be false and set to true 5048 // expect it to be false and set to true
5037 FlagSetting fl(gch->_is_gc_active, false); 5049 FlagSetting fl(gch->_is_gc_active, false);
5038 NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark", 5050 NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
5039 PrintGCDetails && Verbose, true, _gc_timer_cm);) 5051 PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
5040 int level = _cmsGen->level() - 1; 5052 int level = _cmsGen->level() - 1;
5041 if (level >= 0) { 5053 if (level >= 0) {
5042 gch->do_collection(true, // full (i.e. force, see below) 5054 gch->do_collection(true, // full (i.e. force, see below)
5043 false, // !clear_all_soft_refs 5055 false, // !clear_all_soft_refs
5044 0, // size 5056 0, // size
5063 } 5075 }
5064 5076
5065 void CMSCollector::checkpointRootsFinalWork(bool asynch, 5077 void CMSCollector::checkpointRootsFinalWork(bool asynch,
5066 bool clear_all_soft_refs, bool init_mark_was_synchronous) { 5078 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5067 5079
5068 NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);) 5080 NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
5069 5081
5070 assert(haveFreelistLocks(), "must have free list locks"); 5082 assert(haveFreelistLocks(), "must have free list locks");
5071 assert_lock_strong(bitMapLock()); 5083 assert_lock_strong(bitMapLock());
5072 5084
5073 if (UseAdaptiveSizePolicy) { 5085 if (UseAdaptiveSizePolicy) {
5118 // are detected via the mod union table which is the set of all cards 5130 // are detected via the mod union table which is the set of all cards
5119 // dirtied since the first checkpoint in this GC cycle and prior to 5131 // dirtied since the first checkpoint in this GC cycle and prior to
5120 // the most recent young generation GC, minus those cleaned up by the 5132 // the most recent young generation GC, minus those cleaned up by the
5121 // concurrent precleaning. 5133 // concurrent precleaning.
5122 if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) { 5134 if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
5123 GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm); 5135 GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5124 do_remark_parallel(); 5136 do_remark_parallel();
5125 } else { 5137 } else {
5126 GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, 5138 GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
5127 _gc_timer_cm); 5139 _gc_timer_cm, _gc_tracer_cm->gc_id());
5128 do_remark_non_parallel(); 5140 do_remark_non_parallel();
5129 } 5141 }
5130 } 5142 }
5131 } else { 5143 } else {
5132 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode"); 5144 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
5135 } 5147 }
5136 verify_work_stacks_empty(); 5148 verify_work_stacks_empty();
5137 verify_overflow_empty(); 5149 verify_overflow_empty();
5138 5150
5139 { 5151 {
5140 NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);) 5152 NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
5141 refProcessingWork(asynch, clear_all_soft_refs); 5153 refProcessingWork(asynch, clear_all_soft_refs);
5142 } 5154 }
5143 verify_work_stacks_empty(); 5155 verify_work_stacks_empty();
5144 verify_overflow_empty(); 5156 verify_overflow_empty();
5145 5157
5219 5231
5220 // ---------- scan from roots -------------- 5232 // ---------- scan from roots --------------
5221 _timer.start(); 5233 _timer.start();
5222 GenCollectedHeap* gch = GenCollectedHeap::heap(); 5234 GenCollectedHeap* gch = GenCollectedHeap::heap();
5223 Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap)); 5235 Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5224 CMKlassClosure klass_closure(&par_mri_cl);
5225 5236
5226 // ---------- young gen roots -------------- 5237 // ---------- young gen roots --------------
5227 { 5238 {
5228 work_on_young_gen_roots(worker_id, &par_mri_cl); 5239 work_on_young_gen_roots(worker_id, &par_mri_cl);
5229 _timer.stop(); 5240 _timer.stop();
5235 } 5246 }
5236 5247
5237 // ---------- remaining roots -------------- 5248 // ---------- remaining roots --------------
5238 _timer.reset(); 5249 _timer.reset();
5239 _timer.start(); 5250 _timer.start();
5240 gch->gen_process_strong_roots(_collector->_cmsGen->level(), 5251
5241 false, // yg was scanned above 5252 CLDToOopClosure cld_closure(&par_mri_cl, true);
5242 false, // this is parallel code 5253
5243 false, // not scavenging 5254 gch->gen_process_roots(_collector->_cmsGen->level(),
5244 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), 5255 false, // yg was scanned above
5245 &par_mri_cl, 5256 false, // this is parallel code
5246 true, // walk all of code cache if (so & SO_CodeCache) 5257 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5247 NULL, 5258 _collector->should_unload_classes(),
5248 &klass_closure); 5259 &par_mri_cl,
5260 NULL,
5261 &cld_closure);
5249 assert(_collector->should_unload_classes() 5262 assert(_collector->should_unload_classes()
5250 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache), 5263 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5251 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); 5264 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5252 _timer.stop(); 5265 _timer.stop();
5253 if (PrintCMSStatistics != 0) { 5266 if (PrintCMSStatistics != 0) {
5254 gclog_or_tty->print_cr( 5267 gclog_or_tty->print_cr(
5255 "Finished remaining root initial mark scan work in %dth thread: %3.3f sec", 5268 "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5295 // ... work stealing for the above 5308 // ... work stealing for the above
5296 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed); 5309 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5297 }; 5310 };
5298 5311
5299 class RemarkKlassClosure : public KlassClosure { 5312 class RemarkKlassClosure : public KlassClosure {
5300 CMKlassClosure _cm_klass_closure; 5313 KlassToOopClosure _cm_klass_closure;
5301 public: 5314 public:
5302 RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {} 5315 RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5303 void do_klass(Klass* k) { 5316 void do_klass(Klass* k) {
5304 // Check if we have modified any oops in the Klass during the concurrent marking. 5317 // Check if we have modified any oops in the Klass during the concurrent marking.
5305 if (k->has_accumulated_modified_oops()) { 5318 if (k->has_accumulated_modified_oops()) {
5372 } 5385 }
5373 5386
5374 // ---------- remaining roots -------------- 5387 // ---------- remaining roots --------------
5375 _timer.reset(); 5388 _timer.reset();
5376 _timer.start(); 5389 _timer.start();
5377 gch->gen_process_strong_roots(_collector->_cmsGen->level(), 5390 gch->gen_process_roots(_collector->_cmsGen->level(),
5378 false, // yg was scanned above 5391 false, // yg was scanned above
5379 false, // this is parallel code 5392 false, // this is parallel code
5380 false, // not scavenging 5393 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5381 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), 5394 _collector->should_unload_classes(),
5382 &par_mrias_cl, 5395 &par_mrias_cl,
5383 true, // walk all of code cache if (so & SO_CodeCache) 5396 NULL,
5384 NULL, 5397 NULL); // The dirty klasses will be handled below
5385 NULL); // The dirty klasses will be handled below 5398
5386 assert(_collector->should_unload_classes() 5399 assert(_collector->should_unload_classes()
5387 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache), 5400 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5388 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); 5401 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5389 _timer.stop(); 5402 _timer.stop();
5390 if (PrintCMSStatistics != 0) { 5403 if (PrintCMSStatistics != 0) {
5391 gclog_or_tty->print_cr( 5404 gclog_or_tty->print_cr(
5392 "Finished remaining root rescan work in %dth thread: %3.3f sec", 5405 "Finished remaining root rescan work in %dth thread: %3.3f sec",
5435 } 5448 }
5436 5449
5437 // We might have added oops to ClassLoaderData::_handles during the 5450 // We might have added oops to ClassLoaderData::_handles during the
5438 // concurrent marking phase. These oops point to newly allocated objects 5451 // concurrent marking phase. These oops point to newly allocated objects
5439 // that are guaranteed to be kept alive. Either by the direct allocation 5452 // that are guaranteed to be kept alive. Either by the direct allocation
5440 // code, or when the young collector processes the strong roots. Hence, 5453 // code, or when the young collector processes the roots. Hence,
5441 // we don't have to revisit the _handles block during the remark phase. 5454 // we don't have to revisit the _handles block during the remark phase.
5442 5455
5443 // ---------- rescan dirty cards ------------ 5456 // ---------- rescan dirty cards ------------
5444 _timer.reset(); 5457 _timer.reset();
5445 _timer.start(); 5458 _timer.start();
5857 5870
5858 CMSParRemarkTask tsk(this, 5871 CMSParRemarkTask tsk(this,
5859 cms_space, 5872 cms_space,
5860 n_workers, workers, task_queues()); 5873 n_workers, workers, task_queues());
5861 5874
5862 // Set up for parallel process_strong_roots work. 5875 // Set up for parallel process_roots work.
5863 gch->set_par_threads(n_workers); 5876 gch->set_par_threads(n_workers);
5864 // We won't be iterating over the cards in the card table updating 5877 // We won't be iterating over the cards in the card table updating
5865 // the younger_gen cards, so we shouldn't call the following else 5878 // the younger_gen cards, so we shouldn't call the following else
5866 // the verification code as well as subsequent younger_refs_iterate 5879 // the verification code as well as subsequent younger_refs_iterate
5867 // code would get confused. XXX 5880 // code would get confused. XXX
5868 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel 5881 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5869 5882
5870 // The young gen rescan work will not be done as part of 5883 // The young gen rescan work will not be done as part of
5871 // process_strong_roots (which currently doesn't knw how to 5884 // process_roots (which currently doesn't know how to
5872 // parallelize such a scan), but rather will be broken up into 5885 // parallelize such a scan), but rather will be broken up into
5873 // a set of parallel tasks (via the sampling that the [abortable] 5886 // a set of parallel tasks (via the sampling that the [abortable]
5874 // preclean phase did of EdenSpace, plus the [two] tasks of 5887 // preclean phase did of EdenSpace, plus the [two] tasks of
5875 // scanning the [two] survivor spaces. Further fine-grain 5888 // scanning the [two] survivor spaces. Further fine-grain
5876 // parallelization of the scanning of the survivor spaces 5889 // parallelization of the scanning of the survivor spaces
5920 MarkFromDirtyCardsClosure 5933 MarkFromDirtyCardsClosure
5921 markFromDirtyCardsClosure(this, _span, 5934 markFromDirtyCardsClosure(this, _span,
5922 NULL, // space is set further below 5935 NULL, // space is set further below
5923 &_markBitMap, &_markStack, &mrias_cl); 5936 &_markBitMap, &_markStack, &mrias_cl);
5924 { 5937 {
5925 GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm); 5938 GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5926 // Iterate over the dirty cards, setting the corresponding bits in the 5939 // Iterate over the dirty cards, setting the corresponding bits in the
5927 // mod union table. 5940 // mod union table.
5928 { 5941 {
5929 ModUnionClosure modUnionClosure(&_modUnionTable); 5942 ModUnionClosure modUnionClosure(&_modUnionTable);
5930 _ct->ct_bs()->dirty_card_iterate( 5943 _ct->ct_bs()->dirty_card_iterate(
5957 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { 5970 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5958 HandleMark hm; // Discard invalid handles created during verification 5971 HandleMark hm; // Discard invalid handles created during verification
5959 Universe::verify(); 5972 Universe::verify();
5960 } 5973 }
5961 { 5974 {
5962 GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm); 5975 GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5963 5976
5964 verify_work_stacks_empty(); 5977 verify_work_stacks_empty();
5965 5978
5966 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 5979 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5967 GenCollectedHeap::StrongRootsScope srs(gch); 5980 GenCollectedHeap::StrongRootsScope srs(gch);
5968 gch->gen_process_strong_roots(_cmsGen->level(), 5981
5969 true, // younger gens as roots 5982 gch->gen_process_roots(_cmsGen->level(),
5970 false, // use the local StrongRootsScope 5983 true, // younger gens as roots
5971 false, // not scavenging 5984 false, // use the local StrongRootsScope
5972 SharedHeap::ScanningOption(roots_scanning_options()), 5985 SharedHeap::ScanningOption(roots_scanning_options()),
5973 &mrias_cl, 5986 should_unload_classes(),
5974 true, // walk code active on stacks 5987 &mrias_cl,
5975 NULL, 5988 NULL,
5976 NULL); // The dirty klasses will be handled below 5989 NULL); // The dirty klasses will be handled below
5977 5990
5978 assert(should_unload_classes() 5991 assert(should_unload_classes()
5979 || (roots_scanning_options() & SharedHeap::SO_CodeCache), 5992 || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5980 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); 5993 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5981 } 5994 }
5982 5995
5983 { 5996 {
5984 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm); 5997 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5985 5998
5986 verify_work_stacks_empty(); 5999 verify_work_stacks_empty();
5987 6000
5988 // Scan all class loader data objects that might have been introduced 6001 // Scan all class loader data objects that might have been introduced
5989 // during concurrent marking. 6002 // during concurrent marking.
5998 6011
5999 verify_work_stacks_empty(); 6012 verify_work_stacks_empty();
6000 } 6013 }
6001 6014
6002 { 6015 {
6003 GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm); 6016 GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6004 6017
6005 verify_work_stacks_empty(); 6018 verify_work_stacks_empty();
6006 6019
6007 RemarkKlassClosure remark_klass_closure(&mrias_cl); 6020 RemarkKlassClosure remark_klass_closure(&mrias_cl);
6008 ClassLoaderDataGraph::classes_do(&remark_klass_closure); 6021 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
6011 } 6024 }
6012 6025
6013 // We might have added oops to ClassLoaderData::_handles during the 6026 // We might have added oops to ClassLoaderData::_handles during the
6014 // concurrent marking phase. These oops point to newly allocated objects 6027 // concurrent marking phase. These oops point to newly allocated objects
6015 // that are guaranteed to be kept alive. Either by the direct allocation 6028 // that are guaranteed to be kept alive. Either by the direct allocation
6016 // code, or when the young collector processes the strong roots. Hence, 6029 // code, or when the young collector processes the roots. Hence,
6017 // we don't have to revisit the _handles block during the remark phase. 6030 // we don't have to revisit the _handles block during the remark phase.
6018 6031
6019 verify_work_stacks_empty(); 6032 verify_work_stacks_empty();
6020 // Restore evacuated mark words, if any, used for overflow list links 6033 // Restore evacuated mark words, if any, used for overflow list links
6021 if (!CMSOverflowEarlyRestoration) { 6034 if (!CMSOverflowEarlyRestoration) {
6066 6079
6067 virtual void work(uint worker_id); 6080 virtual void work(uint worker_id);
6068 }; 6081 };
6069 6082
6070 void CMSRefProcTaskProxy::work(uint worker_id) { 6083 void CMSRefProcTaskProxy::work(uint worker_id) {
6084 ResourceMark rm;
6085 HandleMark hm;
6071 assert(_collector->_span.equals(_span), "Inconsistency in _span"); 6086 assert(_collector->_span.equals(_span), "Inconsistency in _span");
6072 CMSParKeepAliveClosure par_keep_alive(_collector, _span, 6087 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
6073 _mark_bit_map, 6088 _mark_bit_map,
6074 work_queue(worker_id)); 6089 work_queue(worker_id));
6075 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span, 6090 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
6200 &_markStack, false /* !preclean */); 6215 &_markStack, false /* !preclean */);
6201 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, 6216 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
6202 _span, &_markBitMap, &_markStack, 6217 _span, &_markBitMap, &_markStack,
6203 &cmsKeepAliveClosure, false /* !preclean */); 6218 &cmsKeepAliveClosure, false /* !preclean */);
6204 { 6219 {
6205 GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm); 6220 GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6206 6221
6207 ReferenceProcessorStats stats; 6222 ReferenceProcessorStats stats;
6208 if (rp->processing_is_mt()) { 6223 if (rp->processing_is_mt()) {
6209 // Set the degree of MT here. If the discovery is done MT, there 6224 // Set the degree of MT here. If the discovery is done MT, there
6210 // may have been a different number of threads doing the discovery 6225 // may have been a different number of threads doing the discovery
6225 CMSRefProcTaskExecutor task_executor(*this); 6240 CMSRefProcTaskExecutor task_executor(*this);
6226 stats = rp->process_discovered_references(&_is_alive_closure, 6241 stats = rp->process_discovered_references(&_is_alive_closure,
6227 &cmsKeepAliveClosure, 6242 &cmsKeepAliveClosure,
6228 &cmsDrainMarkingStackClosure, 6243 &cmsDrainMarkingStackClosure,
6229 &task_executor, 6244 &task_executor,
6230 _gc_timer_cm); 6245 _gc_timer_cm,
6246 _gc_tracer_cm->gc_id());
6231 } else { 6247 } else {
6232 stats = rp->process_discovered_references(&_is_alive_closure, 6248 stats = rp->process_discovered_references(&_is_alive_closure,
6233 &cmsKeepAliveClosure, 6249 &cmsKeepAliveClosure,
6234 &cmsDrainMarkingStackClosure, 6250 &cmsDrainMarkingStackClosure,
6235 NULL, 6251 NULL,
6236 _gc_timer_cm); 6252 _gc_timer_cm,
6253 _gc_tracer_cm->gc_id());
6237 } 6254 }
6238 _gc_tracer_cm->report_gc_reference_stats(stats); 6255 _gc_tracer_cm->report_gc_reference_stats(stats);
6239 6256
6240 } 6257 }
6241 6258
6242 // This is the point where the entire marking should have completed. 6259 // This is the point where the entire marking should have completed.
6243 verify_work_stacks_empty(); 6260 verify_work_stacks_empty();
6244 6261
6245 if (should_unload_classes()) { 6262 if (should_unload_classes()) {
6246 { 6263 {
6247 GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm); 6264 GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6248 6265
6249 // Unload classes and purge the SystemDictionary. 6266 // Unload classes and purge the SystemDictionary.
6250 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); 6267 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
6251 6268
6252 // Unload nmethods. 6269 // Unload nmethods.
6255 // Prune dead klasses from subklass/sibling/implementor lists. 6272 // Prune dead klasses from subklass/sibling/implementor lists.
6256 Klass::clean_weak_klass_links(&_is_alive_closure); 6273 Klass::clean_weak_klass_links(&_is_alive_closure);
6257 } 6274 }
6258 6275
6259 { 6276 {
6260 GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm); 6277 GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6261 // Clean up unreferenced symbols in symbol table. 6278 // Clean up unreferenced symbols in symbol table.
6262 SymbolTable::unlink(); 6279 SymbolTable::unlink();
6263 } 6280 }
6264 } 6281
6265 6282 {
6266 // CMS doesn't use the StringTable as hard roots when class unloading is turned off. 6283 GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6267 // Need to check if we really scanned the StringTable. 6284 // Delete entries for dead interned strings.
6268 if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) { 6285 StringTable::unlink(&_is_alive_closure);
6269 GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm); 6286 }
6270 // Delete entries for dead interned strings. 6287 }
6271 StringTable::unlink(&_is_alive_closure); 6288
6272 }
6273 6289
6274 // Restore any preserved marks as a result of mark stack or 6290 // Restore any preserved marks as a result of mark stack or
6275 // work queue overflow 6291 // work queue overflow
6276 restore_preserved_marks_if_any(); // done single-threaded for now 6292 restore_preserved_marks_if_any(); // done single-threaded for now
6277 6293
6331 assert(!_intra_sweep_timer.is_active(), "Should not be active"); 6347 assert(!_intra_sweep_timer.is_active(), "Should not be active");
6332 _intra_sweep_timer.reset(); 6348 _intra_sweep_timer.reset();
6333 _intra_sweep_timer.start(); 6349 _intra_sweep_timer.start();
6334 if (asynch) { 6350 if (asynch) {
6335 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 6351 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6336 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails); 6352 CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
6337 // First sweep the old gen 6353 // First sweep the old gen
6338 { 6354 {
6339 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), 6355 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6340 bitMapLock()); 6356 bitMapLock());
6341 sweepWork(_cmsGen, asynch); 6357 sweepWork(_cmsGen, asynch);
6552 } 6568 }
6553 6569
6554 // Clear the mark bitmap (no grey objects to start with) 6570 // Clear the mark bitmap (no grey objects to start with)
6555 // for the next cycle. 6571 // for the next cycle.
6556 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 6572 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6557 CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails); 6573 CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
6558 6574
6559 HeapWord* curAddr = _markBitMap.startWord(); 6575 HeapWord* curAddr = _markBitMap.startWord();
6560 while (curAddr < _markBitMap.endWord()) { 6576 while (curAddr < _markBitMap.endWord()) {
6561 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr); 6577 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
6562 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining)); 6578 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6618 } 6634 }
6619 6635
6620 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) { 6636 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6621 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 6637 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6622 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 6638 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6623 GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); 6639 GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
6624 TraceCollectorStats tcs(counters()); 6640 TraceCollectorStats tcs(counters());
6625 6641
6626 switch (op) { 6642 switch (op) {
6627 case CMS_op_checkpointRootsInitial: { 6643 case CMS_op_checkpointRootsInitial: {
6628 SvcGCMarker sgcm(SvcGCMarker::OTHER); 6644 SvcGCMarker sgcm(SvcGCMarker::OTHER);
7736 7752
7737 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure( 7753 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7738 CMSCollector* collector, MemRegion span, 7754 CMSCollector* collector, MemRegion span,
7739 CMSBitMap* verification_bm, CMSBitMap* cms_bm, 7755 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7740 CMSMarkStack* mark_stack): 7756 CMSMarkStack* mark_stack):
7741 CMSOopClosure(collector->ref_processor()), 7757 MetadataAwareOopClosure(collector->ref_processor()),
7742 _collector(collector), 7758 _collector(collector),
7743 _span(span), 7759 _span(span),
7744 _verification_bm(verification_bm), 7760 _verification_bm(verification_bm),
7745 _cms_bm(cms_bm), 7761 _cms_bm(cms_bm),
7746 _mark_stack(mark_stack) 7762 _mark_stack(mark_stack)
7789 7805
7790 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector, 7806 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7791 MemRegion span, 7807 MemRegion span,
7792 CMSBitMap* bitMap, CMSMarkStack* markStack, 7808 CMSBitMap* bitMap, CMSMarkStack* markStack,
7793 HeapWord* finger, MarkFromRootsClosure* parent) : 7809 HeapWord* finger, MarkFromRootsClosure* parent) :
7794 CMSOopClosure(collector->ref_processor()), 7810 MetadataAwareOopClosure(collector->ref_processor()),
7795 _collector(collector), 7811 _collector(collector),
7796 _span(span), 7812 _span(span),
7797 _bitMap(bitMap), 7813 _bitMap(bitMap),
7798 _markStack(markStack), 7814 _markStack(markStack),
7799 _finger(finger), 7815 _finger(finger),
7806 OopTaskQueue* work_queue, 7822 OopTaskQueue* work_queue,
7807 CMSMarkStack* overflow_stack, 7823 CMSMarkStack* overflow_stack,
7808 HeapWord* finger, 7824 HeapWord* finger,
7809 HeapWord** global_finger_addr, 7825 HeapWord** global_finger_addr,
7810 Par_MarkFromRootsClosure* parent) : 7826 Par_MarkFromRootsClosure* parent) :
7811 CMSOopClosure(collector->ref_processor()), 7827 MetadataAwareOopClosure(collector->ref_processor()),
7812 _collector(collector), 7828 _collector(collector),
7813 _whole_span(collector->_span), 7829 _whole_span(collector->_span),
7814 _span(span), 7830 _span(span),
7815 _bit_map(bit_map), 7831 _bit_map(bit_map),
7816 _work_queue(work_queue), 7832 _work_queue(work_queue),
7853 // Remember the least grey address discarded 7869 // Remember the least grey address discarded
7854 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); 7870 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7855 _collector->lower_restart_addr(ra); 7871 _collector->lower_restart_addr(ra);
7856 _overflow_stack->reset(); // discard stack contents 7872 _overflow_stack->reset(); // discard stack contents
7857 _overflow_stack->expand(); // expand the stack if possible 7873 _overflow_stack->expand(); // expand the stack if possible
7858 }
7859
7860 void CMKlassClosure::do_klass(Klass* k) {
7861 assert(_oop_closure != NULL, "Not initialized?");
7862 k->oops_do(_oop_closure);
7863 } 7874 }
7864 7875
7865 void PushOrMarkClosure::do_oop(oop obj) { 7876 void PushOrMarkClosure::do_oop(oop obj) {
7866 // Ignore mark word because we are running concurrent with mutators. 7877 // Ignore mark word because we are running concurrent with mutators.
7867 assert(obj->is_oop_or_null(true), "expected an oop or NULL"); 7878 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7957 ReferenceProcessor* rp, 7968 ReferenceProcessor* rp,
7958 CMSBitMap* bit_map, 7969 CMSBitMap* bit_map,
7959 CMSBitMap* mod_union_table, 7970 CMSBitMap* mod_union_table,
7960 CMSMarkStack* mark_stack, 7971 CMSMarkStack* mark_stack,
7961 bool concurrent_precleaning): 7972 bool concurrent_precleaning):
7962 CMSOopClosure(rp), 7973 MetadataAwareOopClosure(rp),
7963 _collector(collector), 7974 _collector(collector),
7964 _span(span), 7975 _span(span),
7965 _bit_map(bit_map), 7976 _bit_map(bit_map),
7966 _mod_union_table(mod_union_table), 7977 _mod_union_table(mod_union_table),
7967 _mark_stack(mark_stack), 7978 _mark_stack(mark_stack),
8030 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector, 8041 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
8031 MemRegion span, 8042 MemRegion span,
8032 ReferenceProcessor* rp, 8043 ReferenceProcessor* rp,
8033 CMSBitMap* bit_map, 8044 CMSBitMap* bit_map,
8034 OopTaskQueue* work_queue): 8045 OopTaskQueue* work_queue):
8035 CMSOopClosure(rp), 8046 MetadataAwareOopClosure(rp),
8036 _collector(collector), 8047 _collector(collector),
8037 _span(span), 8048 _span(span),
8038 _bit_map(bit_map), 8049 _bit_map(bit_map),
8039 _work_queue(work_queue) 8050 _work_queue(work_queue)
8040 { 8051 {