Mercurial > hg > graal-compiler
diff src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @ 14909:4ca6dc0799b6
Backout jdk9 merge
author | Gilles Duboscq <duboscq@ssw.jku.at> |
---|---|
date | Tue, 01 Apr 2014 13:57:07 +0200 |
parents | d166675568f6 |
children | 52b4284cb496 |
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Apr 01 14:09:03 2014 +0200 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Apr 01 13:57:07 2014 +0200 @@ -117,10 +117,10 @@ // hide the naked CGC_lock manipulation in the baton-passing code // further below. That's something we should try to do. Also, the proof // of correctness of this 2-level locking scheme is far from obvious, -// and potentially quite slippery. We have an uneasy suspicion, for instance, +// and potentially quite slippery. We have an uneasy supsicion, for instance, // that there may be a theoretical possibility of delay/starvation in the // low-level lock/wait/notify scheme used for the baton-passing because of -// potential interference with the priority scheme embodied in the +// potential intereference with the priority scheme embodied in the // CMS-token-passing protocol. See related comments at a CGC_lock->wait() // invocation further below and marked with "XXX 20011219YSR". // Indeed, as we note elsewhere, this may become yet more slippery @@ -259,7 +259,7 @@ // Ideally, in the calculation below, we'd compute the dilatation // factor as: MinChunkSize/(promoting_gen's min object size) // Since we do not have such a general query interface for the - // promoting generation, we'll instead just use the minimum + // promoting generation, we'll instead just use the mimimum // object size (which today is a header's worth of space); // note that all arithmetic is in units of HeapWords. assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking"); @@ -274,7 +274,7 @@ // // Let "f" be MinHeapFreeRatio in // -// _initiating_occupancy = 100-f + +// _intiating_occupancy = 100-f + // f * (CMSTriggerRatio/100) // where CMSTriggerRatio is the argument "tr" below. // @@ -958,7 +958,7 @@ desired_free_percentage); gclog_or_tty->print_cr(" Maximum free fraction %f", maximum_free_percentage); - gclog_or_tty->print_cr(" Capacity "SIZE_FORMAT, capacity()/1000); + gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000); gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT, desired_capacity/1000); int prev_level = level() - 1; @@ -2671,7 +2671,7 @@ // that it's responsible for collecting, while itself doing any // work common to all generations it's responsible for. A similar // comment applies to the gc_epilogue()'s. -// The role of the variable _between_prologue_and_epilogue is to +// The role of the varaible _between_prologue_and_epilogue is to // enforce the invocation protocol. void CMSCollector::gc_prologue(bool full) { // Call gc_prologue_work() for the CMSGen @@ -2878,10 +2878,10 @@ // Check reachability of the given heap address in CMS generation, // treating all other generations as roots. bool CMSCollector::is_cms_reachable(HeapWord* addr) { - // We could "guarantee" below, rather than assert, but I'll + // We could "guarantee" below, rather than assert, but i'll // leave these as "asserts" so that an adventurous debugger // could try this in the product build provided some subset of - // the conditions were met, provided they were interested in the + // the conditions were met, provided they were intersted in the // results and knew that the computation below wouldn't interfere // with other concurrent computations mutating the structures // being read or written. @@ -2982,7 +2982,7 @@ // This is as intended, because by this time // GC must already have cleared any refs that need to be cleared, // and traced those that need to be marked; moreover, - // the marking done here is not going to interfere in any + // the marking done here is not going to intefere in any // way with the marking information used by GC. NoRefDiscovery no_discovery(ref_processor()); @@ -3000,7 +3000,7 @@ if (CMSRemarkVerifyVariant == 1) { // In this first variant of verification, we complete - // all marking, then check if the new marks-vector is + // all marking, then check if the new marks-verctor is // a subset of the CMS marks-vector. verify_after_remark_work_1(); } else if (CMSRemarkVerifyVariant == 2) { @@ -3033,8 +3033,10 @@ gch->gen_process_strong_roots(_cmsGen->level(), true, // younger gens are roots true, // activate StrongRootsScope + false, // not scavenging SharedHeap::ScanningOption(roots_scanning_options()), ¬Older, + true, // walk code active on stacks NULL, NULL); // SSS: Provide correct closure @@ -3099,8 +3101,10 @@ gch->gen_process_strong_roots(_cmsGen->level(), true, // younger gens are roots true, // activate StrongRootsScope + false, // not scavenging SharedHeap::ScanningOption(roots_scanning_options()), ¬Older, + true, // walk code active on stacks NULL, &klass_closure); @@ -3299,7 +3303,7 @@ void CMSCollector::setup_cms_unloading_and_verification_state() { const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC || VerifyBeforeExit; - const int rso = SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache; + const int rso = SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; // We set the proper root for this CMS cycle here. if (should_unload_classes()) { // Should unload classes this cycle @@ -3311,7 +3315,7 @@ } // Not unloading classes this cycle - assert(!should_unload_classes(), "Inconsistency!"); + assert(!should_unload_classes(), "Inconsitency!"); remove_root_scanning_option(SharedHeap::SO_SystemClasses); add_root_scanning_option(SharedHeap::SO_AllClasses); @@ -3397,7 +3401,7 @@ CMSExpansionCause::_allocate_par_lab); // Now go around the loop and try alloc again; // A competing par_promote might beat us to the expansion space, - // so we may go around the loop again if promotion fails again. + // so we may go around the loop again if promotion fails agaion. if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); } @@ -3678,6 +3682,12 @@ ResourceMark rm; HandleMark hm; + FalseClosure falseClosure; + // In the case of a synchronous collection, we will elide the + // remark step, so it's important to catch all the nmethod oops + // in this step. + // The final 'true' flag to gen_process_strong_roots will ensure this. + // If 'async' is true, we can relax the nmethod tracing. MarkRefsIntoClosure notOlder(_span, &_markBitMap); GenCollectedHeap* gch = GenCollectedHeap::heap(); @@ -3728,8 +3738,10 @@ gch->gen_process_strong_roots(_cmsGen->level(), true, // younger gens are roots true, // activate StrongRootsScope + false, // not scavenging SharedHeap::ScanningOption(roots_scanning_options()), ¬Older, + true, // walk all of code cache if (so & SO_CodeCache) NULL, &klass_closure); } @@ -4361,7 +4373,7 @@ // should really use wait/notify, which is the recommended // way of doing this type of interaction. Additionally, we should // consolidate the eight methods that do the yield operation and they - // are almost identical into one for better maintainability and + // are almost identical into one for better maintenability and // readability. See 6445193. // // Tony 2006.06.29 @@ -4529,7 +4541,7 @@ // If Eden's current occupancy is below this threshold, // immediately schedule the remark; else preclean // past the next scavenge in an effort to - // schedule the pause as described above. By choosing + // schedule the pause as described avove. By choosing // CMSScheduleRemarkEdenSizeThreshold >= max eden size // we will never do an actual abortable preclean cycle. if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { @@ -5226,12 +5238,14 @@ gch->gen_process_strong_roots(_collector->_cmsGen->level(), false, // yg was scanned above false, // this is parallel code + false, // not scavenging SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), &par_mri_cl, + true, // walk all of code cache if (so & SO_CodeCache) NULL, &klass_closure); assert(_collector->should_unload_classes() - || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache), + || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); _timer.stop(); if (PrintCMSStatistics != 0) { @@ -5361,12 +5375,14 @@ gch->gen_process_strong_roots(_collector->_cmsGen->level(), false, // yg was scanned above false, // this is parallel code + false, // not scavenging SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), &par_mrias_cl, + true, // walk all of code cache if (so & SO_CodeCache) NULL, NULL); // The dirty klasses will be handled below assert(_collector->should_unload_classes() - || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache), + || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); _timer.stop(); if (PrintCMSStatistics != 0) { @@ -5521,8 +5537,8 @@ // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! // CAUTION: This closure has state that persists across calls to // the work method dirty_range_iterate_clear() in that it has - // embedded in it a (subtype of) UpwardsObjectClosure. The - // use of that state in the embedded UpwardsObjectClosure instance + // imbedded in it a (subtype of) UpwardsObjectClosure. The + // use of that state in the imbedded UpwardsObjectClosure instance // assumes that the cards are always iterated (even if in parallel // by several threads) in monotonically increasing order per each // thread. This is true of the implementation below which picks @@ -5537,7 +5553,7 @@ // sure that the changes there do not run counter to the // assumptions made here and necessary for correctness and // efficiency. Note also that this code might yield inefficient - // behavior in the case of very large objects that span one or + // behaviour in the case of very large objects that span one or // more work chunks. Such objects would potentially be scanned // several times redundantly. Work on 4756801 should try and // address that performance anomaly if at all possible. XXX @@ -5563,7 +5579,7 @@ while (!pst->is_task_claimed(/* reference */ nth_task)) { // Having claimed the nth_task, compute corresponding mem-region, - // which is a-fortiori aligned correctly (i.e. at a MUT boundary). + // which is a-fortiori aligned correctly (i.e. at a MUT bopundary). // The alignment restriction ensures that we do not need any // synchronization with other gang-workers while setting or // clearing bits in thus chunk of the MUT. @@ -5950,13 +5966,15 @@ gch->gen_process_strong_roots(_cmsGen->level(), true, // younger gens as roots false, // use the local StrongRootsScope + false, // not scavenging SharedHeap::ScanningOption(roots_scanning_options()), &mrias_cl, + true, // walk code active on stacks NULL, NULL); // The dirty klasses will be handled below assert(should_unload_classes() - || (roots_scanning_options() & SharedHeap::SO_AllCodeCache), + || (roots_scanning_options() & SharedHeap::SO_CodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); } @@ -6353,7 +6371,7 @@ _inter_sweep_timer.reset(); _inter_sweep_timer.start(); - // We need to use a monotonically non-decreasing time in ms + // We need to use a monotonically non-deccreasing time in ms // or we will see time-warp warnings and os::javaTimeMillis() // does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; @@ -6714,7 +6732,7 @@ warning("CMS bit map allocation failure"); return false; } - // For now we'll just commit all of the bit map up front. + // For now we'll just commit all of the bit map up fromt. // Later on we'll try to be more parsimonious with swap. if (!_virtual_space.initialize(brs, brs.size())) { warning("CMS bit map backing store failure"); @@ -6821,8 +6839,8 @@ // XXX FIX ME !!! In the MT case we come in here holding a // leaf lock. For printing we need to take a further lock -// which has lower rank. We need to recalibrate the two -// lock-ranks involved in order to be able to print the +// which has lower rank. We need to recallibrate the two +// lock-ranks involved in order to be able to rpint the // messages below. (Or defer the printing to the caller. // For now we take the expedient path of just disabling the // messages for the problematic case.) @@ -7162,7 +7180,7 @@ } #endif // ASSERT } else { - // An uninitialized object. + // an unitialized object assert(_bitMap->isMarked(addr+1), "missing Printezis mark?"); HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); size = pointer_delta(nextOneAddr + 1, addr); @@ -7170,7 +7188,7 @@ "alignment problem"); // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass() // will dirty the card when the klass pointer is installed in the - // object (signaling the completion of initialization). + // object (signalling the completion of initialization). } } else { // Either a not yet marked object or an uninitialized object @@ -7231,7 +7249,7 @@ HeapWord* addr = (HeapWord*)p; DEBUG_ONLY(_collector->verify_work_stacks_empty();) assert(!_span.contains(addr), "we are scanning the survivor spaces"); - assert(p->klass_or_null() != NULL, "object should be initialized"); + assert(p->klass_or_null() != NULL, "object should be initializd"); // an initialized object; ignore mark word in verification below // since we are running concurrent with mutators assert(p->is_oop(true), "should be an oop"); @@ -7981,7 +7999,7 @@ // we need to dirty all of the cards that the object spans, // since the rescan of object arrays will be limited to the // dirty cards. - // Note that no one can be interfering with us in this action + // Note that no one can be intefering with us in this action // of dirtying the mod union table, so no locking or atomics // are required. if (obj->is_objArray()) { @@ -9007,7 +9025,7 @@ // It's OK to call this multi-threaded; the worst thing // that can happen is that we'll get a bunch of closely -// spaced simulated overflows, but that's OK, in fact +// spaced simulated oveflows, but that's OK, in fact // probably good as it would exercise the overflow code // under contention. bool CMSCollector::simulate_overflow() { @@ -9127,7 +9145,7 @@ (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); } } else { - // Chop off the suffix and return it to the global list. + // Chop off the suffix and rerturn it to the global list. assert(cur->mark() != BUSY, "Error"); oop suffix_head = cur->mark(); // suffix will be put back on global list cur->set_mark(NULL); // break off suffix