comparison src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @ 14909:4ca6dc0799b6

Backout jdk9 merge
author Gilles Duboscq <duboscq@ssw.jku.at>
date Tue, 01 Apr 2014 13:57:07 +0200
parents d166675568f6
children 52b4284cb496
comparison
equal deleted inserted replaced
14908:8db6e76cb658 14909:4ca6dc0799b6
115 // 115 //
116 // Unfortunately, i couldn't come up with a good abstraction to factor and 116 // Unfortunately, i couldn't come up with a good abstraction to factor and
117 // hide the naked CGC_lock manipulation in the baton-passing code 117 // hide the naked CGC_lock manipulation in the baton-passing code
118 // further below. That's something we should try to do. Also, the proof 118 // further below. That's something we should try to do. Also, the proof
119 // of correctness of this 2-level locking scheme is far from obvious, 119 // of correctness of this 2-level locking scheme is far from obvious,
120 // and potentially quite slippery. We have an uneasy suspicion, for instance, 120 // and potentially quite slippery. We have an uneasy supsicion, for instance,
121 // that there may be a theoretical possibility of delay/starvation in the 121 // that there may be a theoretical possibility of delay/starvation in the
122 // low-level lock/wait/notify scheme used for the baton-passing because of 122 // low-level lock/wait/notify scheme used for the baton-passing because of
123 // potential interference with the priority scheme embodied in the 123 // potential intereference with the priority scheme embodied in the
124 // CMS-token-passing protocol. See related comments at a CGC_lock->wait() 124 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
125 // invocation further below and marked with "XXX 20011219YSR". 125 // invocation further below and marked with "XXX 20011219YSR".
126 // Indeed, as we note elsewhere, this may become yet more slippery 126 // Indeed, as we note elsewhere, this may become yet more slippery
127 // in the presence of multiple CMS and/or multiple VM threads. XXX 127 // in the presence of multiple CMS and/or multiple VM threads. XXX
128 128
257 // generation may be larger than that in, say, a contiguous young 257 // generation may be larger than that in, say, a contiguous young
258 // generation. 258 // generation.
259 // Ideally, in the calculation below, we'd compute the dilatation 259 // Ideally, in the calculation below, we'd compute the dilatation
260 // factor as: MinChunkSize/(promoting_gen's min object size) 260 // factor as: MinChunkSize/(promoting_gen's min object size)
261 // Since we do not have such a general query interface for the 261 // Since we do not have such a general query interface for the
262 // promoting generation, we'll instead just use the minimum 262 // promoting generation, we'll instead just use the mimimum
263 // object size (which today is a header's worth of space); 263 // object size (which today is a header's worth of space);
264 // note that all arithmetic is in units of HeapWords. 264 // note that all arithmetic is in units of HeapWords.
265 assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking"); 265 assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
266 assert(_dilatation_factor >= 1.0, "from previous assert"); 266 assert(_dilatation_factor >= 1.0, "from previous assert");
267 } 267 }
272 // via CMSInitiatingOccupancyFraction (argument "io" below), it 272 // via CMSInitiatingOccupancyFraction (argument "io" below), it
273 // is calculated by: 273 // is calculated by:
274 // 274 //
275 // Let "f" be MinHeapFreeRatio in 275 // Let "f" be MinHeapFreeRatio in
276 // 276 //
277 // _initiating_occupancy = 100-f + 277 // _intiating_occupancy = 100-f +
278 // f * (CMSTriggerRatio/100) 278 // f * (CMSTriggerRatio/100)
279 // where CMSTriggerRatio is the argument "tr" below. 279 // where CMSTriggerRatio is the argument "tr" below.
280 // 280 //
281 // That is, if we assume the heap is at its desired maximum occupancy at the 281 // That is, if we assume the heap is at its desired maximum occupancy at the
282 // end of a collection, we let CMSTriggerRatio of the (purported) free 282 // end of a collection, we let CMSTriggerRatio of the (purported) free
956 gclog_or_tty->print_cr(" Free fraction %f", free_percentage); 956 gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
957 gclog_or_tty->print_cr(" Desired free fraction %f", 957 gclog_or_tty->print_cr(" Desired free fraction %f",
958 desired_free_percentage); 958 desired_free_percentage);
959 gclog_or_tty->print_cr(" Maximum free fraction %f", 959 gclog_or_tty->print_cr(" Maximum free fraction %f",
960 maximum_free_percentage); 960 maximum_free_percentage);
961 gclog_or_tty->print_cr(" Capacity "SIZE_FORMAT, capacity()/1000); 961 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
962 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT, 962 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
963 desired_capacity/1000); 963 desired_capacity/1000);
964 int prev_level = level() - 1; 964 int prev_level = level() - 1;
965 if (prev_level >= 0) { 965 if (prev_level >= 0) {
966 size_t prev_size = 0; 966 size_t prev_size = 0;
2669 // prologue delegate to the collector, which delegates back 2669 // prologue delegate to the collector, which delegates back
2670 // some "local" work to a worker method in the individual generations 2670 // some "local" work to a worker method in the individual generations
2671 // that it's responsible for collecting, while itself doing any 2671 // that it's responsible for collecting, while itself doing any
2672 // work common to all generations it's responsible for. A similar 2672 // work common to all generations it's responsible for. A similar
2673 // comment applies to the gc_epilogue()'s. 2673 // comment applies to the gc_epilogue()'s.
2674 // The role of the variable _between_prologue_and_epilogue is to 2674 // The role of the varaible _between_prologue_and_epilogue is to
2675 // enforce the invocation protocol. 2675 // enforce the invocation protocol.
2676 void CMSCollector::gc_prologue(bool full) { 2676 void CMSCollector::gc_prologue(bool full) {
2677 // Call gc_prologue_work() for the CMSGen 2677 // Call gc_prologue_work() for the CMSGen
2678 // we are responsible for. 2678 // we are responsible for.
2679 2679
2876 #endif 2876 #endif
2877 2877
2878 // Check reachability of the given heap address in CMS generation, 2878 // Check reachability of the given heap address in CMS generation,
2879 // treating all other generations as roots. 2879 // treating all other generations as roots.
2880 bool CMSCollector::is_cms_reachable(HeapWord* addr) { 2880 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2881 // We could "guarantee" below, rather than assert, but I'll 2881 // We could "guarantee" below, rather than assert, but i'll
2882 // leave these as "asserts" so that an adventurous debugger 2882 // leave these as "asserts" so that an adventurous debugger
2883 // could try this in the product build provided some subset of 2883 // could try this in the product build provided some subset of
2884 // the conditions were met, provided they were interested in the 2884 // the conditions were met, provided they were intersted in the
2885 // results and knew that the computation below wouldn't interfere 2885 // results and knew that the computation below wouldn't interfere
2886 // with other concurrent computations mutating the structures 2886 // with other concurrent computations mutating the structures
2887 // being read or written. 2887 // being read or written.
2888 assert(SafepointSynchronize::is_at_safepoint(), 2888 assert(SafepointSynchronize::is_at_safepoint(),
2889 "Else mutations in object graph will make answer suspect"); 2889 "Else mutations in object graph will make answer suspect");
2980 2980
2981 // Turn off refs discovery -- so we will be tracing through refs. 2981 // Turn off refs discovery -- so we will be tracing through refs.
2982 // This is as intended, because by this time 2982 // This is as intended, because by this time
2983 // GC must already have cleared any refs that need to be cleared, 2983 // GC must already have cleared any refs that need to be cleared,
2984 // and traced those that need to be marked; moreover, 2984 // and traced those that need to be marked; moreover,
2985 // the marking done here is not going to interfere in any 2985 // the marking done here is not going to intefere in any
2986 // way with the marking information used by GC. 2986 // way with the marking information used by GC.
2987 NoRefDiscovery no_discovery(ref_processor()); 2987 NoRefDiscovery no_discovery(ref_processor());
2988 2988
2989 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) 2989 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2990 2990
2998 // Update the saved marks which may affect the root scans. 2998 // Update the saved marks which may affect the root scans.
2999 gch->save_marks(); 2999 gch->save_marks();
3000 3000
3001 if (CMSRemarkVerifyVariant == 1) { 3001 if (CMSRemarkVerifyVariant == 1) {
3002 // In this first variant of verification, we complete 3002 // In this first variant of verification, we complete
3003 // all marking, then check if the new marks-vector is 3003 // all marking, then check if the new marks-verctor is
3004 // a subset of the CMS marks-vector. 3004 // a subset of the CMS marks-vector.
3005 verify_after_remark_work_1(); 3005 verify_after_remark_work_1();
3006 } else if (CMSRemarkVerifyVariant == 2) { 3006 } else if (CMSRemarkVerifyVariant == 2) {
3007 // In this second variant of verification, we flag an error 3007 // In this second variant of verification, we flag an error
3008 // (i.e. an object reachable in the new marks-vector not reachable 3008 // (i.e. an object reachable in the new marks-vector not reachable
3031 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 3031 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3032 3032
3033 gch->gen_process_strong_roots(_cmsGen->level(), 3033 gch->gen_process_strong_roots(_cmsGen->level(),
3034 true, // younger gens are roots 3034 true, // younger gens are roots
3035 true, // activate StrongRootsScope 3035 true, // activate StrongRootsScope
3036 false, // not scavenging
3036 SharedHeap::ScanningOption(roots_scanning_options()), 3037 SharedHeap::ScanningOption(roots_scanning_options()),
3037 &notOlder, 3038 &notOlder,
3039 true, // walk code active on stacks
3038 NULL, 3040 NULL,
3039 NULL); // SSS: Provide correct closure 3041 NULL); // SSS: Provide correct closure
3040 3042
3041 // Now mark from the roots 3043 // Now mark from the roots
3042 MarkFromRootsClosure markFromRootsClosure(this, _span, 3044 MarkFromRootsClosure markFromRootsClosure(this, _span,
3097 3099
3098 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 3100 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3099 gch->gen_process_strong_roots(_cmsGen->level(), 3101 gch->gen_process_strong_roots(_cmsGen->level(),
3100 true, // younger gens are roots 3102 true, // younger gens are roots
3101 true, // activate StrongRootsScope 3103 true, // activate StrongRootsScope
3104 false, // not scavenging
3102 SharedHeap::ScanningOption(roots_scanning_options()), 3105 SharedHeap::ScanningOption(roots_scanning_options()),
3103 &notOlder, 3106 &notOlder,
3107 true, // walk code active on stacks
3104 NULL, 3108 NULL,
3105 &klass_closure); 3109 &klass_closure);
3106 3110
3107 // Now mark from the roots 3111 // Now mark from the roots
3108 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span, 3112 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3297 } 3301 }
3298 3302
3299 void CMSCollector::setup_cms_unloading_and_verification_state() { 3303 void CMSCollector::setup_cms_unloading_and_verification_state() {
3300 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC 3304 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3301 || VerifyBeforeExit; 3305 || VerifyBeforeExit;
3302 const int rso = SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache; 3306 const int rso = SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
3303 3307
3304 // We set the proper root for this CMS cycle here. 3308 // We set the proper root for this CMS cycle here.
3305 if (should_unload_classes()) { // Should unload classes this cycle 3309 if (should_unload_classes()) { // Should unload classes this cycle
3306 remove_root_scanning_option(SharedHeap::SO_AllClasses); 3310 remove_root_scanning_option(SharedHeap::SO_AllClasses);
3307 add_root_scanning_option(SharedHeap::SO_SystemClasses); 3311 add_root_scanning_option(SharedHeap::SO_SystemClasses);
3309 set_verifying(should_verify); // Set verification state for this cycle 3313 set_verifying(should_verify); // Set verification state for this cycle
3310 return; // Nothing else needs to be done at this time 3314 return; // Nothing else needs to be done at this time
3311 } 3315 }
3312 3316
3313 // Not unloading classes this cycle 3317 // Not unloading classes this cycle
3314 assert(!should_unload_classes(), "Inconsistency!"); 3318 assert(!should_unload_classes(), "Inconsitency!");
3315 remove_root_scanning_option(SharedHeap::SO_SystemClasses); 3319 remove_root_scanning_option(SharedHeap::SO_SystemClasses);
3316 add_root_scanning_option(SharedHeap::SO_AllClasses); 3320 add_root_scanning_option(SharedHeap::SO_AllClasses);
3317 3321
3318 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) { 3322 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3319 // Include symbols, strings and code cache elements to prevent their resurrection. 3323 // Include symbols, strings and code cache elements to prevent their resurrection.
3395 // Otherwise, we try expansion. 3399 // Otherwise, we try expansion.
3396 expand(word_sz*HeapWordSize, MinHeapDeltaBytes, 3400 expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3397 CMSExpansionCause::_allocate_par_lab); 3401 CMSExpansionCause::_allocate_par_lab);
3398 // Now go around the loop and try alloc again; 3402 // Now go around the loop and try alloc again;
3399 // A competing par_promote might beat us to the expansion space, 3403 // A competing par_promote might beat us to the expansion space,
3400 // so we may go around the loop again if promotion fails again. 3404 // so we may go around the loop again if promotion fails agaion.
3401 if (GCExpandToAllocateDelayMillis > 0) { 3405 if (GCExpandToAllocateDelayMillis > 0) {
3402 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); 3406 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3403 } 3407 }
3404 } 3408 }
3405 } 3409 }
3676 } 3680 }
3677 3681
3678 ResourceMark rm; 3682 ResourceMark rm;
3679 HandleMark hm; 3683 HandleMark hm;
3680 3684
3685 FalseClosure falseClosure;
3686 // In the case of a synchronous collection, we will elide the
3687 // remark step, so it's important to catch all the nmethod oops
3688 // in this step.
3689 // The final 'true' flag to gen_process_strong_roots will ensure this.
3690 // If 'async' is true, we can relax the nmethod tracing.
3681 MarkRefsIntoClosure notOlder(_span, &_markBitMap); 3691 MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3682 GenCollectedHeap* gch = GenCollectedHeap::heap(); 3692 GenCollectedHeap* gch = GenCollectedHeap::heap();
3683 3693
3684 verify_work_stacks_empty(); 3694 verify_work_stacks_empty();
3685 verify_overflow_empty(); 3695 verify_overflow_empty();
3726 CMKlassClosure klass_closure(&notOlder); 3736 CMKlassClosure klass_closure(&notOlder);
3727 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 3737 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3728 gch->gen_process_strong_roots(_cmsGen->level(), 3738 gch->gen_process_strong_roots(_cmsGen->level(),
3729 true, // younger gens are roots 3739 true, // younger gens are roots
3730 true, // activate StrongRootsScope 3740 true, // activate StrongRootsScope
3741 false, // not scavenging
3731 SharedHeap::ScanningOption(roots_scanning_options()), 3742 SharedHeap::ScanningOption(roots_scanning_options()),
3732 &notOlder, 3743 &notOlder,
3744 true, // walk all of code cache if (so & SO_CodeCache)
3733 NULL, 3745 NULL,
3734 &klass_closure); 3746 &klass_closure);
3735 } 3747 }
3736 } 3748 }
3737 3749
4359 // We really need to reconsider the synchronization between the GC 4371 // We really need to reconsider the synchronization between the GC
4360 // thread and the yield-requesting threads in the future and we 4372 // thread and the yield-requesting threads in the future and we
4361 // should really use wait/notify, which is the recommended 4373 // should really use wait/notify, which is the recommended
4362 // way of doing this type of interaction. Additionally, we should 4374 // way of doing this type of interaction. Additionally, we should
4363 // consolidate the eight methods that do the yield operation and they 4375 // consolidate the eight methods that do the yield operation and they
4364 // are almost identical into one for better maintainability and 4376 // are almost identical into one for better maintenability and
4365 // readability. See 6445193. 4377 // readability. See 6445193.
4366 // 4378 //
4367 // Tony 2006.06.29 4379 // Tony 2006.06.29
4368 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount && 4380 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4369 ConcurrentMarkSweepThread::should_yield() && 4381 ConcurrentMarkSweepThread::should_yield() &&
4527 assert(_collectorState == AbortablePreclean, "Inconsistent control state"); 4539 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4528 4540
4529 // If Eden's current occupancy is below this threshold, 4541 // If Eden's current occupancy is below this threshold,
4530 // immediately schedule the remark; else preclean 4542 // immediately schedule the remark; else preclean
4531 // past the next scavenge in an effort to 4543 // past the next scavenge in an effort to
4532 // schedule the pause as described above. By choosing 4544 // schedule the pause as described avove. By choosing
4533 // CMSScheduleRemarkEdenSizeThreshold >= max eden size 4545 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4534 // we will never do an actual abortable preclean cycle. 4546 // we will never do an actual abortable preclean cycle.
4535 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { 4547 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4536 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 4548 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4537 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails); 4549 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
5224 _timer.reset(); 5236 _timer.reset();
5225 _timer.start(); 5237 _timer.start();
5226 gch->gen_process_strong_roots(_collector->_cmsGen->level(), 5238 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5227 false, // yg was scanned above 5239 false, // yg was scanned above
5228 false, // this is parallel code 5240 false, // this is parallel code
5241 false, // not scavenging
5229 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), 5242 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5230 &par_mri_cl, 5243 &par_mri_cl,
5244 true, // walk all of code cache if (so & SO_CodeCache)
5231 NULL, 5245 NULL,
5232 &klass_closure); 5246 &klass_closure);
5233 assert(_collector->should_unload_classes() 5247 assert(_collector->should_unload_classes()
5234 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache), 5248 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5235 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); 5249 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5236 _timer.stop(); 5250 _timer.stop();
5237 if (PrintCMSStatistics != 0) { 5251 if (PrintCMSStatistics != 0) {
5238 gclog_or_tty->print_cr( 5252 gclog_or_tty->print_cr(
5239 "Finished remaining root initial mark scan work in %dth thread: %3.3f sec", 5253 "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5359 _timer.reset(); 5373 _timer.reset();
5360 _timer.start(); 5374 _timer.start();
5361 gch->gen_process_strong_roots(_collector->_cmsGen->level(), 5375 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5362 false, // yg was scanned above 5376 false, // yg was scanned above
5363 false, // this is parallel code 5377 false, // this is parallel code
5378 false, // not scavenging
5364 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), 5379 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5365 &par_mrias_cl, 5380 &par_mrias_cl,
5381 true, // walk all of code cache if (so & SO_CodeCache)
5366 NULL, 5382 NULL,
5367 NULL); // The dirty klasses will be handled below 5383 NULL); // The dirty klasses will be handled below
5368 assert(_collector->should_unload_classes() 5384 assert(_collector->should_unload_classes()
5369 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache), 5385 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5370 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); 5386 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5371 _timer.stop(); 5387 _timer.stop();
5372 if (PrintCMSStatistics != 0) { 5388 if (PrintCMSStatistics != 0) {
5373 gclog_or_tty->print_cr( 5389 gclog_or_tty->print_cr(
5374 "Finished remaining root rescan work in %dth thread: %3.3f sec", 5390 "Finished remaining root rescan work in %dth thread: %3.3f sec",
5519 OopTaskQueue* work_q = work_queue(i); 5535 OopTaskQueue* work_q = work_queue(i);
5520 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable)); 5536 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5521 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! 5537 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5522 // CAUTION: This closure has state that persists across calls to 5538 // CAUTION: This closure has state that persists across calls to
5523 // the work method dirty_range_iterate_clear() in that it has 5539 // the work method dirty_range_iterate_clear() in that it has
5524 // embedded in it a (subtype of) UpwardsObjectClosure. The 5540 // imbedded in it a (subtype of) UpwardsObjectClosure. The
5525 // use of that state in the embedded UpwardsObjectClosure instance 5541 // use of that state in the imbedded UpwardsObjectClosure instance
5526 // assumes that the cards are always iterated (even if in parallel 5542 // assumes that the cards are always iterated (even if in parallel
5527 // by several threads) in monotonically increasing order per each 5543 // by several threads) in monotonically increasing order per each
5528 // thread. This is true of the implementation below which picks 5544 // thread. This is true of the implementation below which picks
5529 // card ranges (chunks) in monotonically increasing order globally 5545 // card ranges (chunks) in monotonically increasing order globally
5530 // and, a-fortiori, in monotonically increasing order per thread 5546 // and, a-fortiori, in monotonically increasing order per thread
5535 // revisited and modified appropriately. See also related 5551 // revisited and modified appropriately. See also related
5536 // bug 4756801 work on which should examine this code to make 5552 // bug 4756801 work on which should examine this code to make
5537 // sure that the changes there do not run counter to the 5553 // sure that the changes there do not run counter to the
5538 // assumptions made here and necessary for correctness and 5554 // assumptions made here and necessary for correctness and
5539 // efficiency. Note also that this code might yield inefficient 5555 // efficiency. Note also that this code might yield inefficient
5540 // behavior in the case of very large objects that span one or 5556 // behaviour in the case of very large objects that span one or
5541 // more work chunks. Such objects would potentially be scanned 5557 // more work chunks. Such objects would potentially be scanned
5542 // several times redundantly. Work on 4756801 should try and 5558 // several times redundantly. Work on 4756801 should try and
5543 // address that performance anomaly if at all possible. XXX 5559 // address that performance anomaly if at all possible. XXX
5544 MemRegion full_span = _collector->_span; 5560 MemRegion full_span = _collector->_span;
5545 CMSBitMap* bm = &(_collector->_markBitMap); // shared 5561 CMSBitMap* bm = &(_collector->_markBitMap); // shared
5561 assert((size_t)round_to((intptr_t)chunk_size, alignment) == 5577 assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5562 chunk_size, "Check alignment"); 5578 chunk_size, "Check alignment");
5563 5579
5564 while (!pst->is_task_claimed(/* reference */ nth_task)) { 5580 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5565 // Having claimed the nth_task, compute corresponding mem-region, 5581 // Having claimed the nth_task, compute corresponding mem-region,
5566 // which is a-fortiori aligned correctly (i.e. at a MUT boundary). 5582 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5567 // The alignment restriction ensures that we do not need any 5583 // The alignment restriction ensures that we do not need any
5568 // synchronization with other gang-workers while setting or 5584 // synchronization with other gang-workers while setting or
5569 // clearing bits in thus chunk of the MUT. 5585 // clearing bits in thus chunk of the MUT.
5570 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size, 5586 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5571 start_addr + (nth_task+1)*chunk_size); 5587 start_addr + (nth_task+1)*chunk_size);
5948 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 5964 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5949 GenCollectedHeap::StrongRootsScope srs(gch); 5965 GenCollectedHeap::StrongRootsScope srs(gch);
5950 gch->gen_process_strong_roots(_cmsGen->level(), 5966 gch->gen_process_strong_roots(_cmsGen->level(),
5951 true, // younger gens as roots 5967 true, // younger gens as roots
5952 false, // use the local StrongRootsScope 5968 false, // use the local StrongRootsScope
5969 false, // not scavenging
5953 SharedHeap::ScanningOption(roots_scanning_options()), 5970 SharedHeap::ScanningOption(roots_scanning_options()),
5954 &mrias_cl, 5971 &mrias_cl,
5972 true, // walk code active on stacks
5955 NULL, 5973 NULL,
5956 NULL); // The dirty klasses will be handled below 5974 NULL); // The dirty klasses will be handled below
5957 5975
5958 assert(should_unload_classes() 5976 assert(should_unload_classes()
5959 || (roots_scanning_options() & SharedHeap::SO_AllCodeCache), 5977 || (roots_scanning_options() & SharedHeap::SO_CodeCache),
5960 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); 5978 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5961 } 5979 }
5962 5980
5963 { 5981 {
5964 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm); 5982 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
6351 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds()); 6369 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
6352 6370
6353 _inter_sweep_timer.reset(); 6371 _inter_sweep_timer.reset();
6354 _inter_sweep_timer.start(); 6372 _inter_sweep_timer.start();
6355 6373
6356 // We need to use a monotonically non-decreasing time in ms 6374 // We need to use a monotonically non-deccreasing time in ms
6357 // or we will see time-warp warnings and os::javaTimeMillis() 6375 // or we will see time-warp warnings and os::javaTimeMillis()
6358 // does not guarantee monotonicity. 6376 // does not guarantee monotonicity.
6359 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 6377 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
6360 update_time_of_last_gc(now); 6378 update_time_of_last_gc(now);
6361 6379
6712 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); 6730 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6713 if (!brs.is_reserved()) { 6731 if (!brs.is_reserved()) {
6714 warning("CMS bit map allocation failure"); 6732 warning("CMS bit map allocation failure");
6715 return false; 6733 return false;
6716 } 6734 }
6717 // For now we'll just commit all of the bit map up front. 6735 // For now we'll just commit all of the bit map up fromt.
6718 // Later on we'll try to be more parsimonious with swap. 6736 // Later on we'll try to be more parsimonious with swap.
6719 if (!_virtual_space.initialize(brs, brs.size())) { 6737 if (!_virtual_space.initialize(brs, brs.size())) {
6720 warning("CMS bit map backing store failure"); 6738 warning("CMS bit map backing store failure");
6721 return false; 6739 return false;
6722 } 6740 }
6819 return true; 6837 return true;
6820 } 6838 }
6821 6839
6822 // XXX FIX ME !!! In the MT case we come in here holding a 6840 // XXX FIX ME !!! In the MT case we come in here holding a
6823 // leaf lock. For printing we need to take a further lock 6841 // leaf lock. For printing we need to take a further lock
6824 // which has lower rank. We need to recalibrate the two 6842 // which has lower rank. We need to recallibrate the two
6825 // lock-ranks involved in order to be able to print the 6843 // lock-ranks involved in order to be able to rpint the
6826 // messages below. (Or defer the printing to the caller. 6844 // messages below. (Or defer the printing to the caller.
6827 // For now we take the expedient path of just disabling the 6845 // For now we take the expedient path of just disabling the
6828 // messages for the problematic case.) 6846 // messages for the problematic case.)
6829 void CMSMarkStack::expand() { 6847 void CMSMarkStack::expand() {
6830 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted"); 6848 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
7160 assert(_bitMap->isMarked(addr+size-1), 7178 assert(_bitMap->isMarked(addr+size-1),
7161 "inconsistent Printezis mark"); 7179 "inconsistent Printezis mark");
7162 } 7180 }
7163 #endif // ASSERT 7181 #endif // ASSERT
7164 } else { 7182 } else {
7165 // An uninitialized object. 7183 // an unitialized object
7166 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?"); 7184 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
7167 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); 7185 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7168 size = pointer_delta(nextOneAddr + 1, addr); 7186 size = pointer_delta(nextOneAddr + 1, addr);
7169 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), 7187 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7170 "alignment problem"); 7188 "alignment problem");
7171 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass() 7189 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
7172 // will dirty the card when the klass pointer is installed in the 7190 // will dirty the card when the klass pointer is installed in the
7173 // object (signaling the completion of initialization). 7191 // object (signalling the completion of initialization).
7174 } 7192 }
7175 } else { 7193 } else {
7176 // Either a not yet marked object or an uninitialized object 7194 // Either a not yet marked object or an uninitialized object
7177 if (p->klass_or_null() == NULL) { 7195 if (p->klass_or_null() == NULL) {
7178 // An uninitialized object, skip to the next card, since 7196 // An uninitialized object, skip to the next card, since
7229 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) { 7247 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
7230 7248
7231 HeapWord* addr = (HeapWord*)p; 7249 HeapWord* addr = (HeapWord*)p;
7232 DEBUG_ONLY(_collector->verify_work_stacks_empty();) 7250 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7233 assert(!_span.contains(addr), "we are scanning the survivor spaces"); 7251 assert(!_span.contains(addr), "we are scanning the survivor spaces");
7234 assert(p->klass_or_null() != NULL, "object should be initialized"); 7252 assert(p->klass_or_null() != NULL, "object should be initializd");
7235 // an initialized object; ignore mark word in verification below 7253 // an initialized object; ignore mark word in verification below
7236 // since we are running concurrent with mutators 7254 // since we are running concurrent with mutators
7237 assert(p->is_oop(true), "should be an oop"); 7255 assert(p->is_oop(true), "should be an oop");
7238 // Note that we do not yield while we iterate over 7256 // Note that we do not yield while we iterate over
7239 // the interior oops of p, pushing the relevant ones 7257 // the interior oops of p, pushing the relevant ones
7979 // in the mod union table, thus ensuring that the object remains 7997 // in the mod union table, thus ensuring that the object remains
7980 // in the grey set and continue. In the case of object arrays 7998 // in the grey set and continue. In the case of object arrays
7981 // we need to dirty all of the cards that the object spans, 7999 // we need to dirty all of the cards that the object spans,
7982 // since the rescan of object arrays will be limited to the 8000 // since the rescan of object arrays will be limited to the
7983 // dirty cards. 8001 // dirty cards.
7984 // Note that no one can be interfering with us in this action 8002 // Note that no one can be intefering with us in this action
7985 // of dirtying the mod union table, so no locking or atomics 8003 // of dirtying the mod union table, so no locking or atomics
7986 // are required. 8004 // are required.
7987 if (obj->is_objArray()) { 8005 if (obj->is_objArray()) {
7988 size_t sz = obj->size(); 8006 size_t sz = obj->size();
7989 HeapWord* end_card_addr = (HeapWord*)round_to( 8007 HeapWord* end_card_addr = (HeapWord*)round_to(
9005 #ifndef PRODUCT 9023 #ifndef PRODUCT
9006 // Debugging support for CMSStackOverflowALot 9024 // Debugging support for CMSStackOverflowALot
9007 9025
9008 // It's OK to call this multi-threaded; the worst thing 9026 // It's OK to call this multi-threaded; the worst thing
9009 // that can happen is that we'll get a bunch of closely 9027 // that can happen is that we'll get a bunch of closely
9010 // spaced simulated overflows, but that's OK, in fact 9028 // spaced simulated oveflows, but that's OK, in fact
9011 // probably good as it would exercise the overflow code 9029 // probably good as it would exercise the overflow code
9012 // under contention. 9030 // under contention.
9013 bool CMSCollector::simulate_overflow() { 9031 bool CMSCollector::simulate_overflow() {
9014 if (_overflow_counter-- <= 0) { // just being defensive 9032 if (_overflow_counter-- <= 0) { // just being defensive
9015 _overflow_counter = CMSMarkStackOverflowInterval; 9033 _overflow_counter = CMSMarkStackOverflowInterval;
9125 // above, if it is still the same value. 9143 // above, if it is still the same value.
9126 if (_overflow_list == BUSY) { 9144 if (_overflow_list == BUSY) {
9127 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 9145 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9128 } 9146 }
9129 } else { 9147 } else {
9130 // Chop off the suffix and return it to the global list. 9148 // Chop off the suffix and rerturn it to the global list.
9131 assert(cur->mark() != BUSY, "Error"); 9149 assert(cur->mark() != BUSY, "Error");
9132 oop suffix_head = cur->mark(); // suffix will be put back on global list 9150 oop suffix_head = cur->mark(); // suffix will be put back on global list
9133 cur->set_mark(NULL); // break off suffix 9151 cur->set_mark(NULL); // break off suffix
9134 // It's possible that the list is still in the empty(busy) state 9152 // It's possible that the list is still in the empty(busy) state
9135 // we left it in a short while ago; in that case we may be 9153 // we left it in a short while ago; in that case we may be