comparison src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @ 94:0834225a7916

6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction Summary: The option CMSInitiatingPermOccupancyFraction now controls perm triggering threshold. Even though the actual value of the threshold has not yet been changed, so there is no change in policy, we now have the infrastructure in place for dynamically deciding when to collect the perm gen, an issue that will be addressed in the near future. Reviewed-by: jmasa
author ysr
date Sun, 16 Mar 2008 21:57:25 -0700
parents 173195ff483a
children ba764ed4b6f2
comparison
equal deleted inserted replaced
15:d825a8a2bd39 94:0834225a7916
223 // note that all arithmetic is in units of HeapWords. 223 // note that all arithmetic is in units of HeapWords.
224 assert(MinChunkSize >= oopDesc::header_size(), "just checking"); 224 assert(MinChunkSize >= oopDesc::header_size(), "just checking");
225 assert(_dilatation_factor >= 1.0, "from previous assert"); 225 assert(_dilatation_factor >= 1.0, "from previous assert");
226 } 226 }
227 227
228
229 // The field "_initiating_occupancy" represents the occupancy percentage
230 // at which we trigger a new collection cycle. Unless explicitly specified
231 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
232 // is calculated by:
233 //
234 // Let "f" be MinHeapFreeRatio in
235 //
236 // _intiating_occupancy = 100-f +
237 // f * (CMSTrigger[Perm]Ratio/100)
238 // where CMSTrigger[Perm]Ratio is the argument "tr" below.
239 //
240 // That is, if we assume the heap is at its desired maximum occupancy at the
241 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
242 // space be allocated before initiating a new collection cycle.
243 //
244 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
245 assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
246 if (io >= 0) {
247 _initiating_occupancy = (double)io / 100.0;
248 } else {
249 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
250 (double)(tr * MinHeapFreeRatio) / 100.0)
251 / 100.0;
252 }
253 }
254
255
228 void ConcurrentMarkSweepGeneration::ref_processor_init() { 256 void ConcurrentMarkSweepGeneration::ref_processor_init() {
229 assert(collector() != NULL, "no collector"); 257 assert(collector() != NULL, "no collector");
230 collector()->ref_processor_init(); 258 collector()->ref_processor_init();
231 } 259 }
232 260
518 _icms_start_limit(NULL), 546 _icms_start_limit(NULL),
519 _icms_stop_limit(NULL), 547 _icms_stop_limit(NULL),
520 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"), 548 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
521 _completed_initialization(false), 549 _completed_initialization(false),
522 _collector_policy(cp), 550 _collector_policy(cp),
523 _unload_classes(false), 551 _should_unload_classes(false),
524 _unloaded_classes_last_cycle(false), 552 _concurrent_cycles_since_last_unload(0),
525 _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding) 553 _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
526 { 554 {
527 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) { 555 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
528 ExplicitGCInvokesConcurrent = true; 556 ExplicitGCInvokesConcurrent = true;
529 } 557 }
640 _hash_seed[i] = 17; // copied from ParNew 668 _hash_seed[i] = 17; // copied from ParNew
641 } 669 }
642 } 670 }
643 } 671 }
644 672
645 // "initiatingOccupancy" is the occupancy ratio at which we trigger 673 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
646 // a new collection cycle. Unless explicitly specified via 674 _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
647 // CMSTriggerRatio, it is calculated by: 675
648 // Let "f" be MinHeapFreeRatio in
649 //
650 // intiatingOccupancy = 100-f +
651 // f * (CMSTriggerRatio/100)
652 // That is, if we assume the heap is at its desired maximum occupancy at the
653 // end of a collection, we let CMSTriggerRatio of the (purported) free
654 // space be allocated before initiating a new collection cycle.
655 if (CMSInitiatingOccupancyFraction > 0) {
656 _initiatingOccupancy = (double)CMSInitiatingOccupancyFraction / 100.0;
657 } else {
658 _initiatingOccupancy = ((100 - MinHeapFreeRatio) +
659 (double)(CMSTriggerRatio *
660 MinHeapFreeRatio) / 100.0)
661 / 100.0;
662 }
663 // Clip CMSBootstrapOccupancy between 0 and 100. 676 // Clip CMSBootstrapOccupancy between 0 and 100.
664 _bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy))) 677 _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
665 /(double)100; 678 /(double)100;
666 679
667 _full_gcs_since_conc_gc = 0; 680 _full_gcs_since_conc_gc = 0;
668 681
669 // Now tell CMS generations the identity of their collector 682 // Now tell CMS generations the identity of their collector
1411 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT, 1424 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1412 _cmsGen->contiguous_available()); 1425 _cmsGen->contiguous_available());
1413 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate()); 1426 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1414 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate()); 1427 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1415 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy()); 1428 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1416 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", initiatingOccupancy()); 1429 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1430 gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
1417 } 1431 }
1418 // ------------------------------------------------------------------ 1432 // ------------------------------------------------------------------
1419 1433
1420 // If the estimated time to complete a cms collection (cms_duration()) 1434 // If the estimated time to complete a cms collection (cms_duration())
1421 // is less than the estimated time remaining until the cms generation 1435 // is less than the estimated time remaining until the cms generation
1444 1458
1445 // Otherwise, we start a collection cycle if either the perm gen or 1459 // Otherwise, we start a collection cycle if either the perm gen or
1446 // old gen want a collection cycle started. Each may use 1460 // old gen want a collection cycle started. Each may use
1447 // an appropriate criterion for making this decision. 1461 // an appropriate criterion for making this decision.
1448 // XXX We need to make sure that the gen expansion 1462 // XXX We need to make sure that the gen expansion
1449 // criterion dovetails well with this. 1463 // criterion dovetails well with this. XXX NEED TO FIX THIS
1450 if (_cmsGen->shouldConcurrentCollect(initiatingOccupancy())) { 1464 if (_cmsGen->should_concurrent_collect()) {
1451 if (Verbose && PrintGCDetails) { 1465 if (Verbose && PrintGCDetails) {
1452 gclog_or_tty->print_cr("CMS old gen initiated"); 1466 gclog_or_tty->print_cr("CMS old gen initiated");
1453 } 1467 }
1454 return true; 1468 return true;
1455 } 1469 }
1456 1470
1457 if (cms_should_unload_classes() && 1471 // We start a collection if we believe an incremental collection may fail;
1458 _permGen->shouldConcurrentCollect(initiatingOccupancy())) { 1472 // this is not likely to be productive in practice because it's probably too
1459 if (Verbose && PrintGCDetails) { 1473 // late anyway.
1460 gclog_or_tty->print_cr("CMS perm gen initiated");
1461 }
1462 return true;
1463 }
1464
1465 return false;
1466 }
1467
1468 // Clear _expansion_cause fields of constituent generations
1469 void CMSCollector::clear_expansion_cause() {
1470 _cmsGen->clear_expansion_cause();
1471 _permGen->clear_expansion_cause();
1472 }
1473
1474 bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect(
1475 double initiatingOccupancy) {
1476 // We should be conservative in starting a collection cycle. To
1477 // start too eagerly runs the risk of collecting too often in the
1478 // extreme. To collect too rarely falls back on full collections,
1479 // which works, even if not optimum in terms of concurrent work.
1480 // As a work around for too eagerly collecting, use the flag
1481 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1482 // giving the user an easily understandable way of controlling the
1483 // collections.
1484 // We want to start a new collection cycle if any of the following
1485 // conditions hold:
1486 // . our current occupancy exceeds the initiating occupancy, or
1487 // . we recently needed to expand and have not since that expansion,
1488 // collected, or
1489 // . we are not using adaptive free lists and linear allocation is
1490 // going to fail, or
1491 // . (for old gen) incremental collection has already failed or
1492 // may soon fail in the near future as we may not be able to absorb
1493 // promotions.
1494 assert_lock_strong(freelistLock());
1495
1496 if (occupancy() > initiatingOccupancy) {
1497 if (PrintGCDetails && Verbose) {
1498 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1499 short_name(), occupancy(), initiatingOccupancy);
1500 }
1501 return true;
1502 }
1503 if (UseCMSInitiatingOccupancyOnly) {
1504 return false;
1505 }
1506 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1507 if (PrintGCDetails && Verbose) {
1508 gclog_or_tty->print(" %s: collect because expanded for allocation ",
1509 short_name());
1510 }
1511 return true;
1512 }
1513 GenCollectedHeap* gch = GenCollectedHeap::heap(); 1474 GenCollectedHeap* gch = GenCollectedHeap::heap();
1514 assert(gch->collector_policy()->is_two_generation_policy(), 1475 assert(gch->collector_policy()->is_two_generation_policy(),
1515 "You may want to check the correctness of the following"); 1476 "You may want to check the correctness of the following");
1516 if (gch->incremental_collection_will_fail()) { 1477 if (gch->incremental_collection_will_fail()) {
1517 if (PrintGCDetails && Verbose) { 1478 if (PrintGCDetails && Verbose) {
1518 gclog_or_tty->print(" %s: collect because incremental collection will fail ", 1479 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1480 }
1481 return true;
1482 }
1483
1484 if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
1485 bool res = update_should_unload_classes();
1486 if (res) {
1487 if (Verbose && PrintGCDetails) {
1488 gclog_or_tty->print_cr("CMS perm gen initiated");
1489 }
1490 return true;
1491 }
1492 }
1493 return false;
1494 }
1495
1496 // Clear _expansion_cause fields of constituent generations
1497 void CMSCollector::clear_expansion_cause() {
1498 _cmsGen->clear_expansion_cause();
1499 _permGen->clear_expansion_cause();
1500 }
1501
1502 // We should be conservative in starting a collection cycle. To
1503 // start too eagerly runs the risk of collecting too often in the
1504 // extreme. To collect too rarely falls back on full collections,
1505 // which works, even if not optimum in terms of concurrent work.
1506 // As a work around for too eagerly collecting, use the flag
1507 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1508 // giving the user an easily understandable way of controlling the
1509 // collections.
1510 // We want to start a new collection cycle if any of the following
1511 // conditions hold:
1512 // . our current occupancy exceeds the configured initiating occupancy
1513 // for this generation, or
1514 // . we recently needed to expand this space and have not, since that
1515 // expansion, done a collection of this generation, or
1516 // . the underlying space believes that it may be a good idea to initiate
1517 // a concurrent collection (this may be based on criteria such as the
1518 // following: the space uses linear allocation and linear allocation is
1519 // going to fail, or there is believed to be excessive fragmentation in
1520 // the generation, etc... or ...
1521 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1522 // the case of the old generation, not the perm generation; see CR 6543076):
1523 // we may be approaching a point at which allocation requests may fail because
1524 // we will be out of sufficient free space given allocation rate estimates.]
1525 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1526
1527 assert_lock_strong(freelistLock());
1528 if (occupancy() > initiating_occupancy()) {
1529 if (PrintGCDetails && Verbose) {
1530 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1531 short_name(), occupancy(), initiating_occupancy());
1532 }
1533 return true;
1534 }
1535 if (UseCMSInitiatingOccupancyOnly) {
1536 return false;
1537 }
1538 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1539 if (PrintGCDetails && Verbose) {
1540 gclog_or_tty->print(" %s: collect because expanded for allocation ",
1519 short_name()); 1541 short_name());
1520 } 1542 }
1521 return true; 1543 return true;
1522 } 1544 }
1523 if (!_cmsSpace->adaptive_freelists() && 1545 if (_cmsSpace->should_concurrent_collect()) {
1524 _cmsSpace->linearAllocationWouldFail()) {
1525 if (PrintGCDetails && Verbose) { 1546 if (PrintGCDetails && Verbose) {
1526 gclog_or_tty->print(" %s: collect because of linAB ", 1547 gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1527 short_name()); 1548 short_name());
1528 } 1549 }
1529 return true; 1550 return true;
1530 } 1551 }
1531 return false; 1552 return false;
1968 _collectorState = Resetting; 1989 _collectorState = Resetting;
1969 assert(_restart_addr == NULL, 1990 assert(_restart_addr == NULL,
1970 "Should have been NULL'd before baton was passed"); 1991 "Should have been NULL'd before baton was passed");
1971 reset(false /* == !asynch */); 1992 reset(false /* == !asynch */);
1972 _cmsGen->reset_after_compaction(); 1993 _cmsGen->reset_after_compaction();
1973 1994 _concurrent_cycles_since_last_unload = 0;
1974 if (verifying() && !cms_should_unload_classes()) { 1995
1996 if (verifying() && !should_unload_classes()) {
1975 perm_gen_verify_bit_map()->clear_all(); 1997 perm_gen_verify_bit_map()->clear_all();
1976 } 1998 }
1977 1999
1978 // Clear any data recorded in the PLAB chunk arrays. 2000 // Clear any data recorded in the PLAB chunk arrays.
1979 if (_survivor_plab_array != NULL) { 2001 if (_survivor_plab_array != NULL) {
2096 2118
2097 GenCollectedHeap* gch = GenCollectedHeap::heap(); 2119 GenCollectedHeap* gch = GenCollectedHeap::heap();
2098 { 2120 {
2099 bool safepoint_check = Mutex::_no_safepoint_check_flag; 2121 bool safepoint_check = Mutex::_no_safepoint_check_flag;
2100 MutexLockerEx hl(Heap_lock, safepoint_check); 2122 MutexLockerEx hl(Heap_lock, safepoint_check);
2123 FreelistLocker fll(this);
2101 MutexLockerEx x(CGC_lock, safepoint_check); 2124 MutexLockerEx x(CGC_lock, safepoint_check);
2102 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) { 2125 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2103 // The foreground collector is active or we're 2126 // The foreground collector is active or we're
2104 // not using asynchronous collections. Skip this 2127 // not using asynchronous collections. Skip this
2105 // background collection. 2128 // background collection.
2110 _collectorState = InitialMarking; 2133 _collectorState = InitialMarking;
2111 // Reset the expansion cause, now that we are about to begin 2134 // Reset the expansion cause, now that we are about to begin
2112 // a new cycle. 2135 // a new cycle.
2113 clear_expansion_cause(); 2136 clear_expansion_cause();
2114 } 2137 }
2115 _unloaded_classes_last_cycle = cms_should_unload_classes(); // ... from last cycle 2138 // Decide if we want to enable class unloading as part of the
2116 // This controls class unloading in response to an explicit gc request. 2139 // ensuing concurrent GC cycle.
2117 // If ExplicitGCInvokesConcurrentAndUnloadsClasses is set, then 2140 update_should_unload_classes();
2118 // we will unload classes even if CMSClassUnloadingEnabled is not set.
2119 // See CR 6541037 and related CRs.
2120 _unload_classes = _full_gc_requested // ... for this cycle
2121 && ExplicitGCInvokesConcurrentAndUnloadsClasses;
2122 _full_gc_requested = false; // acks all outstanding full gc requests 2141 _full_gc_requested = false; // acks all outstanding full gc requests
2123 // Signal that we are about to start a collection 2142 // Signal that we are about to start a collection
2124 gch->increment_total_full_collections(); // ... starting a collection cycle 2143 gch->increment_total_full_collections(); // ... starting a collection cycle
2125 _collection_count_start = gch->total_full_collections(); 2144 _collection_count_start = gch->total_full_collections();
2126 } 2145 }
3045 assert(overflow_list_is_empty(), "Overflow list should be empty"); 3064 assert(overflow_list_is_empty(), "Overflow list should be empty");
3046 assert(no_preserved_marks(), "No preserved marks"); 3065 assert(no_preserved_marks(), "No preserved marks");
3047 } 3066 }
3048 #endif // PRODUCT 3067 #endif // PRODUCT
3049 3068
3069 // Decide if we want to enable class unloading as part of the
3070 // ensuing concurrent GC cycle. We will collect the perm gen and
3071 // unload classes if it's the case that:
3072 // (1) an explicit gc request has been made and the flag
3073 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3074 // (2) (a) class unloading is enabled at the command line, and
3075 // (b) (i) perm gen threshold has been crossed, or
3076 // (ii) old gen is getting really full, or
3077 // (iii) the previous N CMS collections did not collect the
3078 // perm gen
3079 // NOTE: Provided there is no change in the state of the heap between
3080 // calls to this method, it should have idempotent results. Moreover,
3081 // its results should be monotonically increasing (i.e. going from 0 to 1,
3082 // but not 1 to 0) between successive calls between which the heap was
3083 // not collected. For the implementation below, it must thus rely on
3084 // the property that concurrent_cycles_since_last_unload()
3085 // will not decrease unless a collection cycle happened and that
3086 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
3087 // themselves also monotonic in that sense. See check_monotonicity()
3088 // below.
3089 bool CMSCollector::update_should_unload_classes() {
3090 _should_unload_classes = false;
3091 // Condition 1 above
3092 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3093 _should_unload_classes = true;
3094 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3095 // Disjuncts 2.b.(i,ii,iii) above
3096 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3097 CMSClassUnloadingMaxInterval)
3098 || _permGen->should_concurrent_collect()
3099 || _cmsGen->is_too_full();
3100 }
3101 return _should_unload_classes;
3102 }
3103
3104 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3105 bool res = should_concurrent_collect();
3106 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3107 return res;
3108 }
3109
3050 void CMSCollector::setup_cms_unloading_and_verification_state() { 3110 void CMSCollector::setup_cms_unloading_and_verification_state() {
3051 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC 3111 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3052 || VerifyBeforeExit; 3112 || VerifyBeforeExit;
3053 const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings 3113 const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3054 | SharedHeap::SO_CodeCache; 3114 | SharedHeap::SO_CodeCache;
3055 3115
3056 if (cms_should_unload_classes()) { // Should unload classes this cycle 3116 if (should_unload_classes()) { // Should unload classes this cycle
3057 remove_root_scanning_option(rso); // Shrink the root set appropriately 3117 remove_root_scanning_option(rso); // Shrink the root set appropriately
3058 set_verifying(should_verify); // Set verification state for this cycle 3118 set_verifying(should_verify); // Set verification state for this cycle
3059 return; // Nothing else needs to be done at this time 3119 return; // Nothing else needs to be done at this time
3060 } 3120 }
3061 3121
3062 // Not unloading classes this cycle 3122 // Not unloading classes this cycle
3063 assert(!cms_should_unload_classes(), "Inconsitency!"); 3123 assert(!should_unload_classes(), "Inconsitency!");
3064 if ((!verifying() || cms_unloaded_classes_last_cycle()) && should_verify) { 3124 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3065 // We were not verifying, or we _were_ unloading classes in the last cycle, 3125 // We were not verifying, or we _were_ unloading classes in the last cycle,
3066 // AND some verification options are enabled this cycle; in this case, 3126 // AND some verification options are enabled this cycle; in this case,
3067 // we must make sure that the deadness map is allocated if not already so, 3127 // we must make sure that the deadness map is allocated if not already so,
3068 // and cleared (if already allocated previously -- 3128 // and cleared (if already allocated previously --
3069 // CMSBitMap::sizeInBits() is used to determine if it's allocated). 3129 // CMSBitMap::sizeInBits() is used to determine if it's allocated).
4691 ResourceMark rm; 4751 ResourceMark rm;
4692 HandleMark hm; 4752 HandleMark hm;
4693 4753
4694 GenCollectedHeap* gch = GenCollectedHeap::heap(); 4754 GenCollectedHeap* gch = GenCollectedHeap::heap();
4695 4755
4696 if (cms_should_unload_classes()) { 4756 if (should_unload_classes()) {
4697 CodeCache::gc_prologue(); 4757 CodeCache::gc_prologue();
4698 } 4758 }
4699 assert(haveFreelistLocks(), "must have free list locks"); 4759 assert(haveFreelistLocks(), "must have free list locks");
4700 assert_lock_strong(bitMapLock()); 4760 assert_lock_strong(bitMapLock());
4701 4761
4751 refProcessingWork(asynch, clear_all_soft_refs); 4811 refProcessingWork(asynch, clear_all_soft_refs);
4752 } 4812 }
4753 verify_work_stacks_empty(); 4813 verify_work_stacks_empty();
4754 verify_overflow_empty(); 4814 verify_overflow_empty();
4755 4815
4756 if (cms_should_unload_classes()) { 4816 if (should_unload_classes()) {
4757 CodeCache::gc_epilogue(); 4817 CodeCache::gc_epilogue();
4758 } 4818 }
4759 4819
4760 // If we encountered any (marking stack / work queue) overflow 4820 // If we encountered any (marking stack / work queue) overflow
4761 // events during the current CMS cycle, take appropriate 4821 // events during the current CMS cycle, take appropriate
5621 NULL); 5681 NULL);
5622 } 5682 }
5623 verify_work_stacks_empty(); 5683 verify_work_stacks_empty();
5624 } 5684 }
5625 5685
5626 if (cms_should_unload_classes()) { 5686 if (should_unload_classes()) {
5627 { 5687 {
5628 TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty); 5688 TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5629 5689
5630 // Follow SystemDictionary roots and unload classes 5690 // Follow SystemDictionary roots and unload classes
5631 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); 5691 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5724 5784
5725 // PermGen verification support: If perm gen sweeping is disabled in 5785 // PermGen verification support: If perm gen sweeping is disabled in
5726 // this cycle, we preserve the perm gen object "deadness" information 5786 // this cycle, we preserve the perm gen object "deadness" information
5727 // in the perm_gen_verify_bit_map. In order to do that we traverse 5787 // in the perm_gen_verify_bit_map. In order to do that we traverse
5728 // all blocks in perm gen and mark all dead objects. 5788 // all blocks in perm gen and mark all dead objects.
5729 if (verifying() && !cms_should_unload_classes()) { 5789 if (verifying() && !should_unload_classes()) {
5730 assert(perm_gen_verify_bit_map()->sizeInBits() != 0, 5790 assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5731 "Should have already been allocated"); 5791 "Should have already been allocated");
5732 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(), 5792 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5733 markBitMap(), perm_gen_verify_bit_map()); 5793 markBitMap(), perm_gen_verify_bit_map());
5734 if (asynch) { 5794 if (asynch) {
5751 bitMapLock()); 5811 bitMapLock());
5752 sweepWork(_cmsGen, asynch); 5812 sweepWork(_cmsGen, asynch);
5753 } 5813 }
5754 5814
5755 // Now repeat for perm gen 5815 // Now repeat for perm gen
5756 if (cms_should_unload_classes()) { 5816 if (should_unload_classes()) {
5757 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), 5817 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5758 bitMapLock()); 5818 bitMapLock());
5759 sweepWork(_permGen, asynch); 5819 sweepWork(_permGen, asynch);
5760 } 5820 }
5761 5821
5773 } 5833 }
5774 } else { 5834 } else {
5775 // already have needed locks 5835 // already have needed locks
5776 sweepWork(_cmsGen, asynch); 5836 sweepWork(_cmsGen, asynch);
5777 5837
5778 if (cms_should_unload_classes()) { 5838 if (should_unload_classes()) {
5779 sweepWork(_permGen, asynch); 5839 sweepWork(_permGen, asynch);
5780 } 5840 }
5781 // Update heap occupancy information which is used as 5841 // Update heap occupancy information which is used as
5782 // input to soft ref clearing policy at the next gc. 5842 // input to soft ref clearing policy at the next gc.
5783 Universe::update_heap_info_at_gc(); 5843 Universe::update_heap_info_at_gc();
5935 // destructor; so, do not remove this scope, else the 5995 // destructor; so, do not remove this scope, else the
5936 // end-of-sweep-census below will be off by a little bit. 5996 // end-of-sweep-census below will be off by a little bit.
5937 } 5997 }
5938 gen->cmsSpace()->sweep_completed(); 5998 gen->cmsSpace()->sweep_completed();
5939 gen->cmsSpace()->endSweepFLCensus(sweepCount()); 5999 gen->cmsSpace()->endSweepFLCensus(sweepCount());
6000 if (should_unload_classes()) { // unloaded classes this cycle,
6001 _concurrent_cycles_since_last_unload = 0; // ... reset count
6002 } else { // did not unload classes,
6003 _concurrent_cycles_since_last_unload++; // ... increment count
6004 }
5940 } 6005 }
5941 6006
5942 // Reset CMS data structures (for now just the marking bit map) 6007 // Reset CMS data structures (for now just the marking bit map)
5943 // preparatory for the next cycle. 6008 // preparatory for the next cycle.
5944 void CMSCollector::reset(bool asynch) { 6009 void CMSCollector::reset(bool asynch) {
7192 _bitMap(bitMap), 7257 _bitMap(bitMap),
7193 _markStack(markStack), 7258 _markStack(markStack),
7194 _revisitStack(revisitStack), 7259 _revisitStack(revisitStack),
7195 _finger(finger), 7260 _finger(finger),
7196 _parent(parent), 7261 _parent(parent),
7197 _should_remember_klasses(collector->cms_should_unload_classes()) 7262 _should_remember_klasses(collector->should_unload_classes())
7198 { } 7263 { }
7199 7264
7200 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, 7265 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7201 MemRegion span, 7266 MemRegion span,
7202 CMSBitMap* bit_map, 7267 CMSBitMap* bit_map,
7215 _overflow_stack(overflow_stack), 7280 _overflow_stack(overflow_stack),
7216 _revisit_stack(revisit_stack), 7281 _revisit_stack(revisit_stack),
7217 _finger(finger), 7282 _finger(finger),
7218 _global_finger_addr(global_finger_addr), 7283 _global_finger_addr(global_finger_addr),
7219 _parent(parent), 7284 _parent(parent),
7220 _should_remember_klasses(collector->cms_should_unload_classes()) 7285 _should_remember_klasses(collector->should_unload_classes())
7221 { } 7286 { }
7222 7287
7223 7288
7224 void CMSCollector::lower_restart_addr(HeapWord* low) { 7289 void CMSCollector::lower_restart_addr(HeapWord* low) {
7225 assert(_span.contains(low), "Out of bounds addr"); 7290 assert(_span.contains(low), "Out of bounds addr");
7358 _bit_map(bit_map), 7423 _bit_map(bit_map),
7359 _mod_union_table(mod_union_table), 7424 _mod_union_table(mod_union_table),
7360 _mark_stack(mark_stack), 7425 _mark_stack(mark_stack),
7361 _revisit_stack(revisit_stack), 7426 _revisit_stack(revisit_stack),
7362 _concurrent_precleaning(concurrent_precleaning), 7427 _concurrent_precleaning(concurrent_precleaning),
7363 _should_remember_klasses(collector->cms_should_unload_classes()) 7428 _should_remember_klasses(collector->should_unload_classes())
7364 { 7429 {
7365 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); 7430 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7366 } 7431 }
7367 7432
7368 // Grey object rescan during pre-cleaning and second checkpoint phases -- 7433 // Grey object rescan during pre-cleaning and second checkpoint phases --
7420 _collector(collector), 7485 _collector(collector),
7421 _span(span), 7486 _span(span),
7422 _bit_map(bit_map), 7487 _bit_map(bit_map),
7423 _work_queue(work_queue), 7488 _work_queue(work_queue),
7424 _revisit_stack(revisit_stack), 7489 _revisit_stack(revisit_stack),
7425 _should_remember_klasses(collector->cms_should_unload_classes()) 7490 _should_remember_klasses(collector->should_unload_classes())
7426 { 7491 {
7427 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); 7492 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7428 } 7493 }
7429 7494
7430 // Grey object rescan during second checkpoint phase -- 7495 // Grey object rescan during second checkpoint phase --
7942 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), 8007 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7943 "alignment problem"); 8008 "alignment problem");
7944 8009
7945 #ifdef DEBUG 8010 #ifdef DEBUG
7946 if (oop(addr)->klass() != NULL && 8011 if (oop(addr)->klass() != NULL &&
7947 ( !_collector->cms_should_unload_classes() 8012 ( !_collector->should_unload_classes()
7948 || oop(addr)->is_parsable())) { 8013 || oop(addr)->is_parsable())) {
7949 // Ignore mark word because we are running concurrent with mutators 8014 // Ignore mark word because we are running concurrent with mutators
7950 assert(oop(addr)->is_oop(true), "live block should be an oop"); 8015 assert(oop(addr)->is_oop(true), "live block should be an oop");
7951 assert(size == 8016 assert(size ==
7952 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()), 8017 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7955 #endif 8020 #endif
7956 8021
7957 } else { 8022 } else {
7958 // This should be an initialized object that's alive. 8023 // This should be an initialized object that's alive.
7959 assert(oop(addr)->klass() != NULL && 8024 assert(oop(addr)->klass() != NULL &&
7960 (!_collector->cms_should_unload_classes() 8025 (!_collector->should_unload_classes()
7961 || oop(addr)->is_parsable()), 8026 || oop(addr)->is_parsable()),
7962 "Should be an initialized object"); 8027 "Should be an initialized object");
7963 // Ignore mark word because we are running concurrent with mutators 8028 // Ignore mark word because we are running concurrent with mutators
7964 assert(oop(addr)->is_oop(true), "live block should be an oop"); 8029 assert(oop(addr)->is_oop(true), "live block should be an oop");
7965 // Verify that the bit map has no bits marked between 8030 // Verify that the bit map has no bits marked between