comparison src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @ 6725:da91efe96a93

6964458: Reimplement class meta-data storage to use native memory Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author coleenp
date Sat, 01 Sep 2012 13:25:18 -0400
parents d2a62e0f25eb
children 9646b7ff4d14
comparison
equal deleted inserted replaced
6724:36d1d483d5d6 6725:da91efe96a93
21 * questions. 21 * questions.
22 * 22 *
23 */ 23 */
24 24
25 #include "precompiled.hpp" 25 #include "precompiled.hpp"
26 #include "classfile/classLoaderData.hpp"
26 #include "classfile/symbolTable.hpp" 27 #include "classfile/symbolTable.hpp"
27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp" 29 #include "code/codeCache.hpp"
29 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" 30 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
30 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp" 31 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
258 } 259 }
259 260
260 261
261 // The field "_initiating_occupancy" represents the occupancy percentage 262 // The field "_initiating_occupancy" represents the occupancy percentage
262 // at which we trigger a new collection cycle. Unless explicitly specified 263 // at which we trigger a new collection cycle. Unless explicitly specified
263 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it 264 // via CMSInitiatingOccupancyFraction (argument "io" below), it
264 // is calculated by: 265 // is calculated by:
265 // 266 //
266 // Let "f" be MinHeapFreeRatio in 267 // Let "f" be MinHeapFreeRatio in
267 // 268 //
268 // _intiating_occupancy = 100-f + 269 // _intiating_occupancy = 100-f +
269 // f * (CMSTrigger[Perm]Ratio/100) 270 // f * (CMSTriggerRatio/100)
270 // where CMSTrigger[Perm]Ratio is the argument "tr" below. 271 // where CMSTriggerRatio is the argument "tr" below.
271 // 272 //
272 // That is, if we assume the heap is at its desired maximum occupancy at the 273 // That is, if we assume the heap is at its desired maximum occupancy at the
273 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free 274 // end of a collection, we let CMSTriggerRatio of the (purported) free
274 // space be allocated before initiating a new collection cycle. 275 // space be allocated before initiating a new collection cycle.
275 // 276 //
276 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) { 277 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
277 assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments"); 278 assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
278 if (io >= 0) { 279 if (io >= 0) {
302 &_is_alive_closure, // closure for liveness info 303 &_is_alive_closure, // closure for liveness info
303 false); // next field updates do not need write barrier 304 false); // next field updates do not need write barrier
304 // Initialize the _ref_processor field of CMSGen 305 // Initialize the _ref_processor field of CMSGen
305 _cmsGen->set_ref_processor(_ref_processor); 306 _cmsGen->set_ref_processor(_ref_processor);
306 307
307 // Allocate a dummy ref processor for perm gen.
308 ReferenceProcessor* rp2 = new ReferenceProcessor();
309 if (rp2 == NULL) {
310 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
311 }
312 _permGen->set_ref_processor(rp2);
313 } 308 }
314 } 309 }
315 310
316 CMSAdaptiveSizePolicy* CMSCollector::size_policy() { 311 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
317 GenCollectedHeap* gch = GenCollectedHeap::heap(); 312 GenCollectedHeap* gch = GenCollectedHeap::heap();
544 CMSCollector::Idling; 539 CMSCollector::Idling;
545 bool CMSCollector::_foregroundGCIsActive = false; 540 bool CMSCollector::_foregroundGCIsActive = false;
546 bool CMSCollector::_foregroundGCShouldWait = false; 541 bool CMSCollector::_foregroundGCShouldWait = false;
547 542
548 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, 543 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
549 ConcurrentMarkSweepGeneration* permGen,
550 CardTableRS* ct, 544 CardTableRS* ct,
551 ConcurrentMarkSweepPolicy* cp): 545 ConcurrentMarkSweepPolicy* cp):
552 _cmsGen(cmsGen), 546 _cmsGen(cmsGen),
553 _permGen(permGen),
554 _ct(ct), 547 _ct(ct),
555 _ref_processor(NULL), // will be set later 548 _ref_processor(NULL), // will be set later
556 _conc_workers(NULL), // may be set later 549 _conc_workers(NULL), // may be set later
557 _abort_preclean(false), 550 _abort_preclean(false),
558 _start_sampling(false), 551 _start_sampling(false),
559 _between_prologue_and_epilogue(false), 552 _between_prologue_and_epilogue(false),
560 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"), 553 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
561 _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
562 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize), 554 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
563 -1 /* lock-free */, "No_lock" /* dummy */), 555 -1 /* lock-free */, "No_lock" /* dummy */),
564 _modUnionClosure(&_modUnionTable), 556 _modUnionClosure(&_modUnionTable),
565 _modUnionClosurePar(&_modUnionTable), 557 _modUnionClosurePar(&_modUnionTable),
566 // Adjust my span to cover old (cms) gen and perm gen 558 // Adjust my span to cover old (cms) gen
567 _span(cmsGen->reserved()._union(permGen->reserved())), 559 _span(cmsGen->reserved()),
568 // Construct the is_alive_closure with _span & markBitMap 560 // Construct the is_alive_closure with _span & markBitMap
569 _is_alive_closure(_span, &_markBitMap), 561 _is_alive_closure(_span, &_markBitMap),
570 _restart_addr(NULL), 562 _restart_addr(NULL),
571 _overflow_list(NULL), 563 _overflow_list(NULL),
572 _stats(cmsGen), 564 _stats(cmsGen),
604 } 596 }
605 // Now expand the span and allocate the collection support structures 597 // Now expand the span and allocate the collection support structures
606 // (MUT, marking bit map etc.) to cover both generations subject to 598 // (MUT, marking bit map etc.) to cover both generations subject to
607 // collection. 599 // collection.
608 600
609 // First check that _permGen is adjacent to _cmsGen and above it.
610 assert( _cmsGen->reserved().word_size() > 0
611 && _permGen->reserved().word_size() > 0,
612 "generations should not be of zero size");
613 assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
614 "_cmsGen and _permGen should not overlap");
615 assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
616 "_cmsGen->end() different from _permGen->start()");
617
618 // For use by dirty card to oop closures. 601 // For use by dirty card to oop closures.
619 _cmsGen->cmsSpace()->set_collector(this); 602 _cmsGen->cmsSpace()->set_collector(this);
620 _permGen->cmsSpace()->set_collector(this);
621 603
622 // Allocate MUT and marking bit map 604 // Allocate MUT and marking bit map
623 { 605 {
624 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag); 606 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
625 if (!_markBitMap.allocate(_span)) { 607 if (!_markBitMap.allocate(_span)) {
633 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?"); 615 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
634 } 616 }
635 617
636 if (!_markStack.allocate(MarkStackSize)) { 618 if (!_markStack.allocate(MarkStackSize)) {
637 warning("Failed to allocate CMS Marking Stack"); 619 warning("Failed to allocate CMS Marking Stack");
638 return;
639 }
640 if (!_revisitStack.allocate(CMSRevisitStackSize)) {
641 warning("Failed to allocate CMS Revisit Stack");
642 return; 620 return;
643 } 621 }
644 622
645 // Support for multi-threaded concurrent phases 623 // Support for multi-threaded concurrent phases
646 if (CMSConcurrentMTEnabled) { 624 if (CMSConcurrentMTEnabled) {
708 } 686 }
709 } 687 }
710 } 688 }
711 689
712 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio); 690 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
713 _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
714 691
715 // Clip CMSBootstrapOccupancy between 0 and 100. 692 // Clip CMSBootstrapOccupancy between 0 and 100.
716 _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy))) 693 _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
717 /(double)100; 694 /(double)100;
718 695
799 || ( _survivor_chunk_capacity == 0 776 || ( _survivor_chunk_capacity == 0
800 && _survivor_chunk_index == 0), 777 && _survivor_chunk_index == 0),
801 "Error"); 778 "Error");
802 779
803 // Choose what strong roots should be scanned depending on verification options 780 // Choose what strong roots should be scanned depending on verification options
804 // and perm gen collection mode.
805 if (!CMSClassUnloadingEnabled) { 781 if (!CMSClassUnloadingEnabled) {
806 // If class unloading is disabled we want to include all classes into the root set. 782 // If class unloading is disabled we want to include all classes into the root set.
807 add_root_scanning_option(SharedHeap::SO_AllClasses); 783 add_root_scanning_option(SharedHeap::SO_AllClasses);
808 } else { 784 } else {
809 add_root_scanning_option(SharedHeap::SO_SystemClasses); 785 add_root_scanning_option(SharedHeap::SO_SystemClasses);
1052 assert(_markBitMap.covers(start, size), "Out of bounds"); 1028 assert(_markBitMap.covers(start, size), "Out of bounds");
1053 if (_collectorState >= Marking) { 1029 if (_collectorState >= Marking) {
1054 MutexLockerEx y(_markBitMap.lock(), 1030 MutexLockerEx y(_markBitMap.lock(),
1055 Mutex::_no_safepoint_check_flag); 1031 Mutex::_no_safepoint_check_flag);
1056 // [see comments preceding SweepClosure::do_blk() below for details] 1032 // [see comments preceding SweepClosure::do_blk() below for details]
1033 //
1034 // Can the P-bits be deleted now? JJJ
1035 //
1057 // 1. need to mark the object as live so it isn't collected 1036 // 1. need to mark the object as live so it isn't collected
1058 // 2. need to mark the 2nd bit to indicate the object may be uninitialized 1037 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1059 // 3. need to mark the end of the object so marking, precleaning or sweeping 1038 // 3. need to mark the end of the object so marking, precleaning or sweeping
1060 // can skip over uninitialized or unparsable objects. An allocated 1039 // can skip over uninitialized or unparsable objects. An allocated
1061 // object is considered uninitialized for our purposes as long as 1040 // object is considered uninitialized for our purposes as long as
1062 // its klass word is NULL. (Unparsable objects are those which are 1041 // its klass word is NULL. All old gen objects are parsable
1063 // initialized in the sense just described, but whose sizes can still
1064 // not be correctly determined. Note that the class of unparsable objects
1065 // can only occur in the perm gen. All old gen objects are parsable
1066 // as soon as they are initialized.) 1042 // as soon as they are initialized.)
1067 _markBitMap.mark(start); // object is live 1043 _markBitMap.mark(start); // object is live
1068 _markBitMap.mark(start + 1); // object is potentially uninitialized? 1044 _markBitMap.mark(start + 1); // object is potentially uninitialized?
1069 _markBitMap.mark(start + size - 1); 1045 _markBitMap.mark(start + size - 1);
1070 // mark end of object 1046 // mark end of object
1322 // ----------------------------------------------------- 1298 // -----------------------------------------------------
1323 // FREE: klass_word & 1 == 1; mark_word holds block size 1299 // FREE: klass_word & 1 == 1; mark_word holds block size
1324 // 1300 //
1325 // OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0; 1301 // OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1326 // obj->size() computes correct size 1302 // obj->size() computes correct size
1327 // [Perm Gen objects needs to be "parsable" before they can be navigated]
1328 // 1303 //
1329 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT 1304 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1330 // 1305 //
1331 // STATE IDENTIFICATION: (64 bit+COOPS) 1306 // STATE IDENTIFICATION: (64 bit+COOPS)
1332 // ------------------------------------ 1307 // ------------------------------------
1333 // FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size 1308 // FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1334 // 1309 //
1335 // OBJECT: klass_word installed; klass_word != 0; 1310 // OBJECT: klass_word installed; klass_word != 0;
1336 // obj->size() computes correct size 1311 // obj->size() computes correct size
1337 // [Perm Gen comment above continues to hold]
1338 // 1312 //
1339 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT 1313 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1340 // 1314 //
1341 // 1315 //
1342 // STATE TRANSITION DIAGRAM 1316 // STATE TRANSITION DIAGRAM
1401 obj->set_mark(m); 1375 obj->set_mark(m);
1402 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); 1376 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1403 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size"); 1377 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1404 OrderAccess::storestore(); 1378 OrderAccess::storestore();
1405 1379
1406 if (UseCompressedOops) { 1380 if (UseCompressedKlassPointers) {
1407 // Copy gap missed by (aligned) header size calculation below 1381 // Copy gap missed by (aligned) header size calculation below
1408 obj->set_klass_gap(old->klass_gap()); 1382 obj->set_klass_gap(old->klass_gap());
1409 } 1383 }
1410 if (word_sz > (size_t)oopDesc::header_size()) { 1384 if (word_sz > (size_t)oopDesc::header_size()) {
1411 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(), 1385 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1462 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; 1436 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1463 ParScanWithoutBarrierClosure* dummy_cl = NULL; 1437 ParScanWithoutBarrierClosure* dummy_cl = NULL;
1464 ps->promo.promoted_oops_iterate_nv(dummy_cl); 1438 ps->promo.promoted_oops_iterate_nv(dummy_cl);
1465 } 1439 }
1466 1440
1467 // XXXPERM
1468 bool ConcurrentMarkSweepGeneration::should_collect(bool full, 1441 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1469 size_t size, 1442 size_t size,
1470 bool tlab) 1443 bool tlab)
1471 { 1444 {
1472 // We allow a STW collection only if a full 1445 // We allow a STW collection only if a full
1515 _cmsGen->contiguous_available()); 1488 _cmsGen->contiguous_available());
1516 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate()); 1489 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1517 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate()); 1490 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1518 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy()); 1491 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1519 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy()); 1492 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1520 gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy()); 1493 gclog_or_tty->print_cr("metadata initialized %d",
1494 MetaspaceGC::should_concurrent_collect());
1521 } 1495 }
1522 // ------------------------------------------------------------------ 1496 // ------------------------------------------------------------------
1523 1497
1524 // If the estimated time to complete a cms collection (cms_duration()) 1498 // If the estimated time to complete a cms collection (cms_duration())
1525 // is less than the estimated time remaining until the cms generation 1499 // is less than the estimated time remaining until the cms generation
1544 return true; 1518 return true;
1545 } 1519 }
1546 } 1520 }
1547 } 1521 }
1548 1522
1549 // Otherwise, we start a collection cycle if either the perm gen or 1523 // Otherwise, we start a collection cycle if
1550 // old gen want a collection cycle started. Each may use 1524 // old gen want a collection cycle started. Each may use
1551 // an appropriate criterion for making this decision. 1525 // an appropriate criterion for making this decision.
1552 // XXX We need to make sure that the gen expansion 1526 // XXX We need to make sure that the gen expansion
1553 // criterion dovetails well with this. XXX NEED TO FIX THIS 1527 // criterion dovetails well with this. XXX NEED TO FIX THIS
1554 if (_cmsGen->should_concurrent_collect()) { 1528 if (_cmsGen->should_concurrent_collect()) {
1569 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail "); 1543 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1570 } 1544 }
1571 return true; 1545 return true;
1572 } 1546 }
1573 1547
1574 if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) { 1548 if (MetaspaceGC::should_concurrent_collect()) {
1575 bool res = update_should_unload_classes();
1576 if (res) {
1577 if (Verbose && PrintGCDetails) { 1549 if (Verbose && PrintGCDetails) {
1578 gclog_or_tty->print_cr("CMS perm gen initiated"); 1550 gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1579 } 1551 }
1580 return true; 1552 return true;
1581 } 1553 }
1582 } 1554
1583 return false; 1555 return false;
1584 } 1556 }
1585 1557
1586 // Clear _expansion_cause fields of constituent generations 1558 // Clear _expansion_cause fields of constituent generations
1587 void CMSCollector::clear_expansion_cause() { 1559 void CMSCollector::clear_expansion_cause() {
1588 _cmsGen->clear_expansion_cause(); 1560 _cmsGen->clear_expansion_cause();
1589 _permGen->clear_expansion_cause();
1590 } 1561 }
1591 1562
1592 // We should be conservative in starting a collection cycle. To 1563 // We should be conservative in starting a collection cycle. To
1593 // start too eagerly runs the risk of collecting too often in the 1564 // start too eagerly runs the risk of collecting too often in the
1594 // extreme. To collect too rarely falls back on full collections, 1565 // extreme. To collect too rarely falls back on full collections,
1607 // a concurrent collection (this may be based on criteria such as the 1578 // a concurrent collection (this may be based on criteria such as the
1608 // following: the space uses linear allocation and linear allocation is 1579 // following: the space uses linear allocation and linear allocation is
1609 // going to fail, or there is believed to be excessive fragmentation in 1580 // going to fail, or there is believed to be excessive fragmentation in
1610 // the generation, etc... or ... 1581 // the generation, etc... or ...
1611 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for 1582 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1612 // the case of the old generation, not the perm generation; see CR 6543076): 1583 // the case of the old generation; see CR 6543076):
1613 // we may be approaching a point at which allocation requests may fail because 1584 // we may be approaching a point at which allocation requests may fail because
1614 // we will be out of sufficient free space given allocation rate estimates.] 1585 // we will be out of sufficient free space given allocation rate estimates.]
1615 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const { 1586 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1616 1587
1617 assert_lock_strong(freelistLock()); 1588 assert_lock_strong(freelistLock());
1900 clear_expansion_cause(); 1871 clear_expansion_cause();
1901 _foregroundGCIsActive = false; 1872 _foregroundGCIsActive = false;
1902 return; 1873 return;
1903 } 1874 }
1904 1875
1905 // Resize the perm generation and the tenured generation 1876 // Resize the tenured generation
1906 // after obtaining the free list locks for the 1877 // after obtaining the free list locks for the
1907 // two generations. 1878 // two generations.
1908 void CMSCollector::compute_new_size() { 1879 void CMSCollector::compute_new_size() {
1909 assert_locked_or_safepoint(Heap_lock); 1880 assert_locked_or_safepoint(Heap_lock);
1910 FreelistLocker z(this); 1881 FreelistLocker z(this);
1911 _permGen->compute_new_size(); 1882 MetaspaceGC::compute_new_size();
1912 _cmsGen->compute_new_size(); 1883 _cmsGen->compute_new_size();
1913 } 1884 }
1914 1885
1915 // A work method used by foreground collection to determine 1886 // A work method used by foreground collection to determine
1916 // what type of collection (compacting or not, continuing or fresh) 1887 // what type of collection (compacting or not, continuing or fresh)
2010 // all clear. If we are assuming the collection from an asynchronous 1981 // all clear. If we are assuming the collection from an asynchronous
2011 // collection, clear the _modUnionTable. 1982 // collection, clear the _modUnionTable.
2012 assert(_collectorState != Idling || _modUnionTable.isAllClear(), 1983 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2013 "_modUnionTable should be clear if the baton was not passed"); 1984 "_modUnionTable should be clear if the baton was not passed");
2014 _modUnionTable.clear_all(); 1985 _modUnionTable.clear_all();
1986 assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1987 "mod union for klasses should be clear if the baton was passed");
1988 _ct->klass_rem_set()->clear_mod_union();
2015 1989
2016 // We must adjust the allocation statistics being maintained 1990 // We must adjust the allocation statistics being maintained
2017 // in the free list space. We do so by reading and clearing 1991 // in the free list space. We do so by reading and clearing
2018 // the sweep timer and updating the block flux rate estimates below. 1992 // the sweep timer and updating the block flux rate estimates below.
2019 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive"); 1993 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2048 "Should have been NULL'd before baton was passed"); 2022 "Should have been NULL'd before baton was passed");
2049 reset(false /* == !asynch */); 2023 reset(false /* == !asynch */);
2050 _cmsGen->reset_after_compaction(); 2024 _cmsGen->reset_after_compaction();
2051 _concurrent_cycles_since_last_unload = 0; 2025 _concurrent_cycles_since_last_unload = 0;
2052 2026
2053 if (verifying() && !should_unload_classes()) {
2054 perm_gen_verify_bit_map()->clear_all();
2055 }
2056
2057 // Clear any data recorded in the PLAB chunk arrays. 2027 // Clear any data recorded in the PLAB chunk arrays.
2058 if (_survivor_plab_array != NULL) { 2028 if (_survivor_plab_array != NULL) {
2059 reset_survivor_plab_arrays(); 2029 reset_survivor_plab_arrays();
2060 } 2030 }
2061 2031
2115 2085
2116 void CMSCollector::getFreelistLocks() const { 2086 void CMSCollector::getFreelistLocks() const {
2117 // Get locks for all free lists in all generations that this 2087 // Get locks for all free lists in all generations that this
2118 // collector is responsible for 2088 // collector is responsible for
2119 _cmsGen->freelistLock()->lock_without_safepoint_check(); 2089 _cmsGen->freelistLock()->lock_without_safepoint_check();
2120 _permGen->freelistLock()->lock_without_safepoint_check();
2121 } 2090 }
2122 2091
2123 void CMSCollector::releaseFreelistLocks() const { 2092 void CMSCollector::releaseFreelistLocks() const {
2124 // Release locks for all free lists in all generations that this 2093 // Release locks for all free lists in all generations that this
2125 // collector is responsible for 2094 // collector is responsible for
2126 _cmsGen->freelistLock()->unlock(); 2095 _cmsGen->freelistLock()->unlock();
2127 _permGen->freelistLock()->unlock();
2128 } 2096 }
2129 2097
2130 bool CMSCollector::haveFreelistLocks() const { 2098 bool CMSCollector::haveFreelistLocks() const {
2131 // Check locks for all free lists in all generations that this 2099 // Check locks for all free lists in all generations that this
2132 // collector is responsible for 2100 // collector is responsible for
2133 assert_lock_strong(_cmsGen->freelistLock()); 2101 assert_lock_strong(_cmsGen->freelistLock());
2134 assert_lock_strong(_permGen->freelistLock());
2135 PRODUCT_ONLY(ShouldNotReachHere()); 2102 PRODUCT_ONLY(ShouldNotReachHere());
2136 return true; 2103 return true;
2137 } 2104 }
2138 2105
2139 // A utility class that is used by the CMS collector to 2106 // A utility class that is used by the CMS collector to
2189 assert(_collectorState == Idling, "Should be idling before start."); 2156 assert(_collectorState == Idling, "Should be idling before start.");
2190 _collectorState = InitialMarking; 2157 _collectorState = InitialMarking;
2191 // Reset the expansion cause, now that we are about to begin 2158 // Reset the expansion cause, now that we are about to begin
2192 // a new cycle. 2159 // a new cycle.
2193 clear_expansion_cause(); 2160 clear_expansion_cause();
2161
2162 // Clear the MetaspaceGC flag since a concurrent collection
2163 // is starting but also clear it after the collection.
2164 MetaspaceGC::set_should_concurrent_collect(false);
2194 } 2165 }
2195 // Decide if we want to enable class unloading as part of the 2166 // Decide if we want to enable class unloading as part of the
2196 // ensuing concurrent GC cycle. 2167 // ensuing concurrent GC cycle.
2197 update_should_unload_classes(); 2168 update_should_unload_classes();
2198 _full_gc_requested = false; // acks all outstanding full gc requests 2169 _full_gc_requested = false; // acks all outstanding full gc requests
2366 case Resetting: 2337 case Resetting:
2367 // CMS heap resizing has been completed 2338 // CMS heap resizing has been completed
2368 reset(true); 2339 reset(true);
2369 assert(_collectorState == Idling, "Collector state should " 2340 assert(_collectorState == Idling, "Collector state should "
2370 "have changed"); 2341 "have changed");
2342
2343 MetaspaceGC::set_should_concurrent_collect(false);
2344
2371 stats().record_cms_end(); 2345 stats().record_cms_end();
2372 // Don't move the concurrent_phases_end() and compute_new_size() 2346 // Don't move the concurrent_phases_end() and compute_new_size()
2373 // calls to here because a preempted background collection 2347 // calls to here because a preempted background collection
2374 // has it's state set to "Resetting". 2348 // has it's state set to "Resetting".
2375 break; 2349 break;
2582 // work common to all generations it's responsible for. A similar 2556 // work common to all generations it's responsible for. A similar
2583 // comment applies to the gc_epilogue()'s. 2557 // comment applies to the gc_epilogue()'s.
2584 // The role of the varaible _between_prologue_and_epilogue is to 2558 // The role of the varaible _between_prologue_and_epilogue is to
2585 // enforce the invocation protocol. 2559 // enforce the invocation protocol.
2586 void CMSCollector::gc_prologue(bool full) { 2560 void CMSCollector::gc_prologue(bool full) {
2587 // Call gc_prologue_work() for each CMSGen and PermGen that 2561 // Call gc_prologue_work() for the CMSGen
2588 // we are responsible for. 2562 // we are responsible for.
2589 2563
2590 // The following locking discipline assumes that we are only called 2564 // The following locking discipline assumes that we are only called
2591 // when the world is stopped. 2565 // when the world is stopped.
2592 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption"); 2566 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2593 2567
2594 // The CMSCollector prologue must call the gc_prologues for the 2568 // The CMSCollector prologue must call the gc_prologues for the
2595 // "generations" (including PermGen if any) that it's responsible 2569 // "generations" that it's responsible
2596 // for. 2570 // for.
2597 2571
2598 assert( Thread::current()->is_VM_thread() 2572 assert( Thread::current()->is_VM_thread()
2599 || ( CMSScavengeBeforeRemark 2573 || ( CMSScavengeBeforeRemark
2600 && Thread::current()->is_ConcurrentGC_thread()), 2574 && Thread::current()->is_ConcurrentGC_thread()),
2608 } 2582 }
2609 2583
2610 // set a bit saying prologue has been called; cleared in epilogue 2584 // set a bit saying prologue has been called; cleared in epilogue
2611 _between_prologue_and_epilogue = true; 2585 _between_prologue_and_epilogue = true;
2612 // Claim locks for common data structures, then call gc_prologue_work() 2586 // Claim locks for common data structures, then call gc_prologue_work()
2613 // for each CMSGen and PermGen that we are responsible for. 2587 // for each CMSGen.
2614 2588
2615 getFreelistLocks(); // gets free list locks on constituent spaces 2589 getFreelistLocks(); // gets free list locks on constituent spaces
2616 bitMapLock()->lock_without_safepoint_check(); 2590 bitMapLock()->lock_without_safepoint_check();
2617 2591
2618 // Should call gc_prologue_work() for all cms gens we are responsible for 2592 // Should call gc_prologue_work() for all cms gens we are responsible for
2619 bool registerClosure = _collectorState >= Marking 2593 bool duringMarking = _collectorState >= Marking
2620 && _collectorState < Sweeping; 2594 && _collectorState < Sweeping;
2595
2596 // The young collections clear the modified oops state, which tells if
2597 // there are any modified oops in the class. The remark phase also needs
2598 // that information. Tell the young collection to save the union of all
2599 // modified klasses.
2600 if (duringMarking) {
2601 _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2602 }
2603
2604 bool registerClosure = duringMarking;
2605
2621 ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ? 2606 ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
2622 &_modUnionClosurePar 2607 &_modUnionClosurePar
2623 : &_modUnionClosure; 2608 : &_modUnionClosure;
2624 _cmsGen->gc_prologue_work(full, registerClosure, muc); 2609 _cmsGen->gc_prologue_work(full, registerClosure, muc);
2625 _permGen->gc_prologue_work(full, registerClosure, muc);
2626 2610
2627 if (!full) { 2611 if (!full) {
2628 stats().record_gc0_begin(); 2612 stats().record_gc0_begin();
2629 } 2613 }
2630 } 2614 }
2684 return; 2668 return;
2685 } 2669 }
2686 assert(haveFreelistLocks(), "must have freelist locks"); 2670 assert(haveFreelistLocks(), "must have freelist locks");
2687 assert_lock_strong(bitMapLock()); 2671 assert_lock_strong(bitMapLock());
2688 2672
2673 _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2674
2689 _cmsGen->gc_epilogue_work(full); 2675 _cmsGen->gc_epilogue_work(full);
2690 _permGen->gc_epilogue_work(full);
2691 2676
2692 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) { 2677 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2693 // in case sampling was not already enabled, enable it 2678 // in case sampling was not already enabled, enable it
2694 _start_sampling = true; 2679 _start_sampling = true;
2695 } 2680 }
2696 // reset _eden_chunk_array so sampling starts afresh 2681 // reset _eden_chunk_array so sampling starts afresh
2697 _eden_chunk_index = 0; 2682 _eden_chunk_index = 0;
2698 2683
2699 size_t cms_used = _cmsGen->cmsSpace()->used(); 2684 size_t cms_used = _cmsGen->cmsSpace()->used();
2700 size_t perm_used = _permGen->cmsSpace()->used();
2701 2685
2702 // update performance counters - this uses a special version of 2686 // update performance counters - this uses a special version of
2703 // update_counters() that allows the utilization to be passed as a 2687 // update_counters() that allows the utilization to be passed as a
2704 // parameter, avoiding multiple calls to used(). 2688 // parameter, avoiding multiple calls to used().
2705 // 2689 //
2706 _cmsGen->update_counters(cms_used); 2690 _cmsGen->update_counters(cms_used);
2707 _permGen->update_counters(perm_used);
2708 2691
2709 if (CMSIncrementalMode) { 2692 if (CMSIncrementalMode) {
2710 icms_update_allocation_limits(); 2693 icms_update_allocation_limits();
2711 } 2694 }
2712 2695
2900 void CMSCollector::verify_after_remark_work_1() { 2883 void CMSCollector::verify_after_remark_work_1() {
2901 ResourceMark rm; 2884 ResourceMark rm;
2902 HandleMark hm; 2885 HandleMark hm;
2903 GenCollectedHeap* gch = GenCollectedHeap::heap(); 2886 GenCollectedHeap* gch = GenCollectedHeap::heap();
2904 2887
2888 // Get a clear set of claim bits for the strong roots processing to work with.
2889 ClassLoaderDataGraph::clear_claimed_marks();
2890
2905 // Mark from roots one level into CMS 2891 // Mark from roots one level into CMS
2906 MarkRefsIntoClosure notOlder(_span, verification_mark_bm()); 2892 MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2907 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 2893 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2908 2894
2909 gch->gen_process_strong_roots(_cmsGen->level(), 2895 gch->gen_process_strong_roots(_cmsGen->level(),
2910 true, // younger gens are roots 2896 true, // younger gens are roots
2911 true, // activate StrongRootsScope 2897 true, // activate StrongRootsScope
2912 true, // collecting perm gen 2898 false, // not scavenging
2913 SharedHeap::ScanningOption(roots_scanning_options()), 2899 SharedHeap::ScanningOption(roots_scanning_options()),
2914 &notOlder, 2900 &notOlder,
2915 true, // walk code active on stacks 2901 true, // walk code active on stacks
2916 NULL); 2902 NULL,
2903 NULL); // SSS: Provide correct closure
2917 2904
2918 // Now mark from the roots 2905 // Now mark from the roots
2919 assert(_revisitStack.isEmpty(), "Should be empty");
2920 MarkFromRootsClosure markFromRootsClosure(this, _span, 2906 MarkFromRootsClosure markFromRootsClosure(this, _span,
2921 verification_mark_bm(), verification_mark_stack(), &_revisitStack, 2907 verification_mark_bm(), verification_mark_stack(),
2922 false /* don't yield */, true /* verifying */); 2908 false /* don't yield */, true /* verifying */);
2923 assert(_restart_addr == NULL, "Expected pre-condition"); 2909 assert(_restart_addr == NULL, "Expected pre-condition");
2924 verification_mark_bm()->iterate(&markFromRootsClosure); 2910 verification_mark_bm()->iterate(&markFromRootsClosure);
2925 while (_restart_addr != NULL) { 2911 while (_restart_addr != NULL) {
2926 // Deal with stack overflow: by restarting at the indicated 2912 // Deal with stack overflow: by restarting at the indicated
2930 _restart_addr = NULL; 2916 _restart_addr = NULL;
2931 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); 2917 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2932 } 2918 }
2933 assert(verification_mark_stack()->isEmpty(), "Should have been drained"); 2919 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2934 verify_work_stacks_empty(); 2920 verify_work_stacks_empty();
2935 // Should reset the revisit stack above, since no class tree
2936 // surgery is forthcoming.
2937 _revisitStack.reset(); // throwing away all contents
2938 2921
2939 // Marking completed -- now verify that each bit marked in 2922 // Marking completed -- now verify that each bit marked in
2940 // verification_mark_bm() is also marked in markBitMap(); flag all 2923 // verification_mark_bm() is also marked in markBitMap(); flag all
2941 // errors by printing corresponding objects. 2924 // errors by printing corresponding objects.
2942 VerifyMarkedClosure vcl(markBitMap()); 2925 VerifyMarkedClosure vcl(markBitMap());
2946 Universe::heap()->print_on(gclog_or_tty); 2929 Universe::heap()->print_on(gclog_or_tty);
2947 fatal("CMS: failed marking verification after remark"); 2930 fatal("CMS: failed marking verification after remark");
2948 } 2931 }
2949 } 2932 }
2950 2933
2934 class VerifyKlassOopsKlassClosure : public KlassClosure {
2935 class VerifyKlassOopsClosure : public OopClosure {
2936 CMSBitMap* _bitmap;
2937 public:
2938 VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2939 void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2940 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2941 } _oop_closure;
2942 public:
2943 VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2944 void do_klass(Klass* k) {
2945 k->oops_do(&_oop_closure);
2946 }
2947 };
2948
2951 void CMSCollector::verify_after_remark_work_2() { 2949 void CMSCollector::verify_after_remark_work_2() {
2952 ResourceMark rm; 2950 ResourceMark rm;
2953 HandleMark hm; 2951 HandleMark hm;
2954 GenCollectedHeap* gch = GenCollectedHeap::heap(); 2952 GenCollectedHeap* gch = GenCollectedHeap::heap();
2955 2953
2954 // Get a clear set of claim bits for the strong roots processing to work with.
2955 ClassLoaderDataGraph::clear_claimed_marks();
2956
2956 // Mark from roots one level into CMS 2957 // Mark from roots one level into CMS
2957 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(), 2958 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2958 markBitMap()); 2959 markBitMap());
2960 CMKlassClosure klass_closure(&notOlder);
2961
2959 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 2962 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2960 gch->gen_process_strong_roots(_cmsGen->level(), 2963 gch->gen_process_strong_roots(_cmsGen->level(),
2961 true, // younger gens are roots 2964 true, // younger gens are roots
2962 true, // activate StrongRootsScope 2965 true, // activate StrongRootsScope
2963 true, // collecting perm gen 2966 false, // not scavenging
2964 SharedHeap::ScanningOption(roots_scanning_options()), 2967 SharedHeap::ScanningOption(roots_scanning_options()),
2965 &notOlder, 2968 &notOlder,
2966 true, // walk code active on stacks 2969 true, // walk code active on stacks
2967 NULL); 2970 NULL,
2971 &klass_closure);
2968 2972
2969 // Now mark from the roots 2973 // Now mark from the roots
2970 assert(_revisitStack.isEmpty(), "Should be empty");
2971 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span, 2974 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2972 verification_mark_bm(), markBitMap(), verification_mark_stack()); 2975 verification_mark_bm(), markBitMap(), verification_mark_stack());
2973 assert(_restart_addr == NULL, "Expected pre-condition"); 2976 assert(_restart_addr == NULL, "Expected pre-condition");
2974 verification_mark_bm()->iterate(&markFromRootsClosure); 2977 verification_mark_bm()->iterate(&markFromRootsClosure);
2975 while (_restart_addr != NULL) { 2978 while (_restart_addr != NULL) {
2980 _restart_addr = NULL; 2983 _restart_addr = NULL;
2981 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); 2984 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2982 } 2985 }
2983 assert(verification_mark_stack()->isEmpty(), "Should have been drained"); 2986 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2984 verify_work_stacks_empty(); 2987 verify_work_stacks_empty();
2985 // Should reset the revisit stack above, since no class tree 2988
2986 // surgery is forthcoming. 2989 VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2987 _revisitStack.reset(); // throwing away all contents 2990 ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2988 2991
2989 // Marking completed -- now verify that each bit marked in 2992 // Marking completed -- now verify that each bit marked in
2990 // verification_mark_bm() is also marked in markBitMap(); flag all 2993 // verification_mark_bm() is also marked in markBitMap(); flag all
2991 // errors by printing corresponding objects. 2994 // errors by printing corresponding objects.
2992 VerifyMarkedClosure vcl(markBitMap()); 2995 VerifyMarkedClosure vcl(markBitMap());
3044 younger_refs_in_space_iterate(_cmsSpace, cl); 3047 younger_refs_in_space_iterate(_cmsSpace, cl);
3045 cl->reset_generation(); 3048 cl->reset_generation();
3046 } 3049 }
3047 3050
3048 void 3051 void
3049 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) { 3052 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
3050 if (freelistLock()->owned_by_self()) { 3053 if (freelistLock()->owned_by_self()) {
3051 Generation::oop_iterate(mr, cl); 3054 Generation::oop_iterate(mr, cl);
3052 } else { 3055 } else {
3053 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); 3056 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3054 Generation::oop_iterate(mr, cl); 3057 Generation::oop_iterate(mr, cl);
3055 } 3058 }
3056 } 3059 }
3057 3060
3058 void 3061 void
3059 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) { 3062 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
3060 if (freelistLock()->owned_by_self()) { 3063 if (freelistLock()->owned_by_self()) {
3061 Generation::oop_iterate(cl); 3064 Generation::oop_iterate(cl);
3062 } else { 3065 } else {
3063 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); 3066 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3064 Generation::oop_iterate(cl); 3067 Generation::oop_iterate(cl);
3081 Generation::safe_object_iterate(cl); 3084 Generation::safe_object_iterate(cl);
3082 } else { 3085 } else {
3083 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); 3086 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3084 Generation::safe_object_iterate(cl); 3087 Generation::safe_object_iterate(cl);
3085 } 3088 }
3086 }
3087
3088 void
3089 ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
3090 } 3089 }
3091 3090
3092 void 3091 void
3093 ConcurrentMarkSweepGeneration::post_compact() { 3092 ConcurrentMarkSweepGeneration::post_compact() {
3094 } 3093 }
3121 } 3120 }
3122 } 3121 }
3123 3122
3124 void CMSCollector::verify() { 3123 void CMSCollector::verify() {
3125 _cmsGen->verify(); 3124 _cmsGen->verify();
3126 _permGen->verify();
3127 } 3125 }
3128 3126
3129 #ifndef PRODUCT 3127 #ifndef PRODUCT
3130 bool CMSCollector::overflow_list_is_empty() const { 3128 bool CMSCollector::overflow_list_is_empty() const {
3131 assert(_num_par_pushes >= 0, "Inconsistency"); 3129 assert(_num_par_pushes >= 0, "Inconsistency");
3147 assert(no_preserved_marks(), "No preserved marks"); 3145 assert(no_preserved_marks(), "No preserved marks");
3148 } 3146 }
3149 #endif // PRODUCT 3147 #endif // PRODUCT
3150 3148
3151 // Decide if we want to enable class unloading as part of the 3149 // Decide if we want to enable class unloading as part of the
3152 // ensuing concurrent GC cycle. We will collect the perm gen and 3150 // ensuing concurrent GC cycle. We will collect and
3153 // unload classes if it's the case that: 3151 // unload classes if it's the case that:
3154 // (1) an explicit gc request has been made and the flag 3152 // (1) an explicit gc request has been made and the flag
3155 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR 3153 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3156 // (2) (a) class unloading is enabled at the command line, and 3154 // (2) (a) class unloading is enabled at the command line, and
3157 // (b) (i) perm gen threshold has been crossed, or 3155 // (b) old gen is getting really full
3158 // (ii) old gen is getting really full, or
3159 // (iii) the previous N CMS collections did not collect the
3160 // perm gen
3161 // NOTE: Provided there is no change in the state of the heap between 3156 // NOTE: Provided there is no change in the state of the heap between
3162 // calls to this method, it should have idempotent results. Moreover, 3157 // calls to this method, it should have idempotent results. Moreover,
3163 // its results should be monotonically increasing (i.e. going from 0 to 1, 3158 // its results should be monotonically increasing (i.e. going from 0 to 1,
3164 // but not 1 to 0) between successive calls between which the heap was 3159 // but not 1 to 0) between successive calls between which the heap was
3165 // not collected. For the implementation below, it must thus rely on 3160 // not collected. For the implementation below, it must thus rely on
3166 // the property that concurrent_cycles_since_last_unload() 3161 // the property that concurrent_cycles_since_last_unload()
3167 // will not decrease unless a collection cycle happened and that 3162 // will not decrease unless a collection cycle happened and that
3168 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are 3163 // _cmsGen->is_too_full() are
3169 // themselves also monotonic in that sense. See check_monotonicity() 3164 // themselves also monotonic in that sense. See check_monotonicity()
3170 // below. 3165 // below.
3171 bool CMSCollector::update_should_unload_classes() { 3166 void CMSCollector::update_should_unload_classes() {
3172 _should_unload_classes = false; 3167 _should_unload_classes = false;
3173 // Condition 1 above 3168 // Condition 1 above
3174 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) { 3169 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3175 _should_unload_classes = true; 3170 _should_unload_classes = true;
3176 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above 3171 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3177 // Disjuncts 2.b.(i,ii,iii) above 3172 // Disjuncts 2.b.(i,ii,iii) above
3178 _should_unload_classes = (concurrent_cycles_since_last_unload() >= 3173 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3179 CMSClassUnloadingMaxInterval) 3174 CMSClassUnloadingMaxInterval)
3180 || _permGen->should_concurrent_collect()
3181 || _cmsGen->is_too_full(); 3175 || _cmsGen->is_too_full();
3182 } 3176 }
3183 return _should_unload_classes;
3184 } 3177 }
3185 3178
3186 bool ConcurrentMarkSweepGeneration::is_too_full() const { 3179 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3187 bool res = should_concurrent_collect(); 3180 bool res = should_concurrent_collect();
3188 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0); 3181 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3201 } 3194 }
3202 3195
3203 // Not unloading classes this cycle 3196 // Not unloading classes this cycle
3204 assert(!should_unload_classes(), "Inconsitency!"); 3197 assert(!should_unload_classes(), "Inconsitency!");
3205 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) { 3198 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3206 // We were not verifying, or we _were_ unloading classes in the last cycle,
3207 // AND some verification options are enabled this cycle; in this case,
3208 // we must make sure that the deadness map is allocated if not already so,
3209 // and cleared (if already allocated previously --
3210 // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3211 if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3212 if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3213 warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3214 "permanent generation verification disabled");
3215 return; // Note that we leave verification disabled, so we'll retry this
3216 // allocation next cycle. We _could_ remember this failure
3217 // and skip further attempts and permanently disable verification
3218 // attempts if that is considered more desirable.
3219 }
3220 assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3221 "_perm_gen_ver_bit_map inconsistency?");
3222 } else {
3223 perm_gen_verify_bit_map()->clear_all();
3224 }
3225 // Include symbols, strings and code cache elements to prevent their resurrection. 3199 // Include symbols, strings and code cache elements to prevent their resurrection.
3226 add_root_scanning_option(rso); 3200 add_root_scanning_option(rso);
3227 set_verifying(true); 3201 set_verifying(true);
3228 } else if (verifying() && !should_verify) { 3202 } else if (verifying() && !should_verify) {
3229 // We were verifying, but some verification flags got disabled. 3203 // We were verifying, but some verification flags got disabled.
3239 HeapWord* CMSCollector::block_start(const void* p) const { 3213 HeapWord* CMSCollector::block_start(const void* p) const {
3240 const HeapWord* addr = (HeapWord*)p; 3214 const HeapWord* addr = (HeapWord*)p;
3241 if (_span.contains(p)) { 3215 if (_span.contains(p)) {
3242 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) { 3216 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3243 return _cmsGen->cmsSpace()->block_start(p); 3217 return _cmsGen->cmsSpace()->block_start(p);
3244 } else {
3245 assert(_permGen->cmsSpace()->is_in_reserved(addr),
3246 "Inconsistent _span?");
3247 return _permGen->cmsSpace()->block_start(p);
3248 } 3218 }
3249 } 3219 }
3250 return NULL; 3220 return NULL;
3251 } 3221 }
3252 #endif 3222 #endif
3558 gch->save_marks(); 3528 gch->save_marks();
3559 3529
3560 // weak reference processing has not started yet. 3530 // weak reference processing has not started yet.
3561 ref_processor()->set_enqueuing_is_done(false); 3531 ref_processor()->set_enqueuing_is_done(false);
3562 3532
3533 // Need to remember all newly created CLDs,
3534 // so that we can guarantee that the remark finds them.
3535 ClassLoaderDataGraph::remember_new_clds(true);
3536
3537 // Whenever a CLD is found, it will be claimed before proceeding to mark
3538 // the klasses. The claimed marks need to be cleared before marking starts.
3539 ClassLoaderDataGraph::clear_claimed_marks();
3540
3541 CMKlassClosure klass_closure(&notOlder);
3563 { 3542 {
3564 // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
3565 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) 3543 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3566 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 3544 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3567 gch->gen_process_strong_roots(_cmsGen->level(), 3545 gch->gen_process_strong_roots(_cmsGen->level(),
3568 true, // younger gens are roots 3546 true, // younger gens are roots
3569 true, // activate StrongRootsScope 3547 true, // activate StrongRootsScope
3570 true, // collecting perm gen 3548 false, // not scavenging
3571 SharedHeap::ScanningOption(roots_scanning_options()), 3549 SharedHeap::ScanningOption(roots_scanning_options()),
3572 &notOlder, 3550 &notOlder,
3573 true, // walk all of code cache if (so & SO_CodeCache) 3551 true, // walk all of code cache if (so & SO_CodeCache)
3574 NULL); 3552 NULL,
3553 &klass_closure);
3575 } 3554 }
3576 3555
3577 // Clear mod-union table; it will be dirtied in the prologue of 3556 // Clear mod-union table; it will be dirtied in the prologue of
3578 // CMS generation per each younger generation collection. 3557 // CMS generation per each younger generation collection.
3579 3558
3580 assert(_modUnionTable.isAllClear(), 3559 assert(_modUnionTable.isAllClear(),
3581 "Was cleared in most recent final checkpoint phase" 3560 "Was cleared in most recent final checkpoint phase"
3582 " or no bits are set in the gc_prologue before the start of the next " 3561 " or no bits are set in the gc_prologue before the start of the next "
3583 "subsequent marking phase."); 3562 "subsequent marking phase.");
3563
3564 assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3584 3565
3585 // Save the end of the used_region of the constituent generations 3566 // Save the end of the used_region of the constituent generations
3586 // to be used to limit the extent of sweep in each generation. 3567 // to be used to limit the extent of sweep in each generation.
3587 save_sweep_limits(); 3568 save_sweep_limits();
3588 if (UseAdaptiveSizePolicy) { 3569 if (UseAdaptiveSizePolicy) {
3670 // between concurrent such updates. 3651 // between concurrent such updates.
3671 3652
3672 // already have locks 3653 // already have locks
3673 assert_lock_strong(bitMapLock()); 3654 assert_lock_strong(bitMapLock());
3674 3655
3675 // Clear the revisit stack, just in case there are any
3676 // obsolete contents from a short-circuited previous CMS cycle.
3677 _revisitStack.reset();
3678 verify_work_stacks_empty(); 3656 verify_work_stacks_empty();
3679 verify_overflow_empty(); 3657 verify_overflow_empty();
3680 assert(_revisitStack.isEmpty(), "tabula rasa");
3681 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
3682 bool result = false; 3658 bool result = false;
3683 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) { 3659 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3684 result = do_marking_mt(asynch); 3660 result = do_marking_mt(asynch);
3685 } else { 3661 } else {
3686 result = do_marking_st(asynch); 3662 result = do_marking_st(asynch);
3724 CMSCollector* _collector; 3700 CMSCollector* _collector;
3725 int _n_workers; // requested/desired # workers 3701 int _n_workers; // requested/desired # workers
3726 bool _asynch; 3702 bool _asynch;
3727 bool _result; 3703 bool _result;
3728 CompactibleFreeListSpace* _cms_space; 3704 CompactibleFreeListSpace* _cms_space;
3729 CompactibleFreeListSpace* _perm_space;
3730 char _pad_front[64]; // padding to ... 3705 char _pad_front[64]; // padding to ...
3731 HeapWord* _global_finger; // ... avoid sharing cache line 3706 HeapWord* _global_finger; // ... avoid sharing cache line
3732 char _pad_back[64]; 3707 char _pad_back[64];
3733 HeapWord* _restart_addr; 3708 HeapWord* _restart_addr;
3734 3709
3743 CMSConcMarkingTerminatorTerminator _term_term; 3718 CMSConcMarkingTerminatorTerminator _term_term;
3744 3719
3745 public: 3720 public:
3746 CMSConcMarkingTask(CMSCollector* collector, 3721 CMSConcMarkingTask(CMSCollector* collector,
3747 CompactibleFreeListSpace* cms_space, 3722 CompactibleFreeListSpace* cms_space,
3748 CompactibleFreeListSpace* perm_space,
3749 bool asynch, 3723 bool asynch,
3750 YieldingFlexibleWorkGang* workers, 3724 YieldingFlexibleWorkGang* workers,
3751 OopTaskQueueSet* task_queues): 3725 OopTaskQueueSet* task_queues):
3752 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"), 3726 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3753 _collector(collector), 3727 _collector(collector),
3754 _cms_space(cms_space), 3728 _cms_space(cms_space),
3755 _perm_space(perm_space),
3756 _asynch(asynch), _n_workers(0), _result(true), 3729 _asynch(asynch), _n_workers(0), _result(true),
3757 _task_queues(task_queues), 3730 _task_queues(task_queues),
3758 _term(_n_workers, task_queues, _collector), 3731 _term(_n_workers, task_queues, _collector),
3759 _bit_map_lock(collector->bitMapLock()) 3732 _bit_map_lock(collector->bitMapLock())
3760 { 3733 {
3761 _requested_size = _n_workers; 3734 _requested_size = _n_workers;
3762 _term.set_task(this); 3735 _term.set_task(this);
3763 _term_term.set_task(this); 3736 _term_term.set_task(this);
3764 assert(_cms_space->bottom() < _perm_space->bottom(),
3765 "Finger incorrectly initialized below");
3766 _restart_addr = _global_finger = _cms_space->bottom(); 3737 _restart_addr = _global_finger = _cms_space->bottom();
3767 } 3738 }
3768 3739
3769 3740
3770 OopTaskQueueSet* task_queues() { return _task_queues; } 3741 OopTaskQueueSet* task_queues() { return _task_queues; }
3789 virtual void coordinator_yield(); // stuff done by coordinator 3760 virtual void coordinator_yield(); // stuff done by coordinator
3790 bool result() { return _result; } 3761 bool result() { return _result; }
3791 3762
3792 void reset(HeapWord* ra) { 3763 void reset(HeapWord* ra) {
3793 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)"); 3764 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
3794 assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)");
3795 assert(ra < _perm_space->end(), "ra too large");
3796 _restart_addr = _global_finger = ra; 3765 _restart_addr = _global_finger = ra;
3797 _term.reset_for_reuse(); 3766 _term.reset_for_reuse();
3798 } 3767 }
3799 3768
3800 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk, 3769 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3869 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec", 3838 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3870 worker_id, _timer.seconds()); 3839 worker_id, _timer.seconds());
3871 // XXX: need xxx/xxx type of notation, two timers 3840 // XXX: need xxx/xxx type of notation, two timers
3872 } 3841 }
3873 3842
3874 // ... do the same for the _perm_space
3875 _timer.reset();
3876 _timer.start();
3877 do_scan_and_mark(worker_id, _perm_space);
3878 _timer.stop();
3879 if (PrintCMSStatistics != 0) {
3880 gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
3881 worker_id, _timer.seconds());
3882 // XXX: need xxx/xxx type of notation, two timers
3883 }
3884
3885 // ... do work stealing 3843 // ... do work stealing
3886 _timer.reset(); 3844 _timer.reset();
3887 _timer.start(); 3845 _timer.start();
3888 do_work_steal(worker_id); 3846 do_work_steal(worker_id);
3889 _timer.stop(); 3847 _timer.stop();
3897 // Note that under the current task protocol, the 3855 // Note that under the current task protocol, the
3898 // following assertion is true even of the spaces 3856 // following assertion is true even of the spaces
3899 // expanded since the completion of the concurrent 3857 // expanded since the completion of the concurrent
3900 // marking. XXX This will likely change under a strict 3858 // marking. XXX This will likely change under a strict
3901 // ABORT semantics. 3859 // ABORT semantics.
3902 assert(_global_finger > _cms_space->end() && 3860 // After perm removal the comparison was changed to
3903 _global_finger >= _perm_space->end(), 3861 // greater than or equal to from strictly greater than.
3862 // Before perm removal the highest address sweep would
3863 // have been at the end of perm gen but now is at the
3864 // end of the tenured gen.
3865 assert(_global_finger >= _cms_space->end(),
3904 "All tasks have been completed"); 3866 "All tasks have been completed");
3905 DEBUG_ONLY(_collector->verify_overflow_empty();) 3867 DEBUG_ONLY(_collector->verify_overflow_empty();)
3906 } 3868 }
3907 3869
3908 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) { 3870 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
4023 // iteration should be incremental with periodic yields. 3985 // iteration should be incremental with periodic yields.
4024 Par_MarkFromRootsClosure cl(this, _collector, my_span, 3986 Par_MarkFromRootsClosure cl(this, _collector, my_span,
4025 &_collector->_markBitMap, 3987 &_collector->_markBitMap,
4026 work_queue(i), 3988 work_queue(i),
4027 &_collector->_markStack, 3989 &_collector->_markStack,
4028 &_collector->_revisitStack,
4029 _asynch); 3990 _asynch);
4030 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end()); 3991 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
4031 } // else nothing to do for this task 3992 } // else nothing to do for this task
4032 } // else nothing to do for this task 3993 } // else nothing to do for this task
4033 } 3994 }
4040 // have been bumped up by the thread that claimed the last 4001 // have been bumped up by the thread that claimed the last
4041 // task. 4002 // task.
4042 pst->all_tasks_completed(); 4003 pst->all_tasks_completed();
4043 } 4004 }
4044 4005
4045 class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure { 4006 class Par_ConcMarkingClosure: public CMSOopClosure {
4046 private: 4007 private:
4008 CMSCollector* _collector;
4047 CMSConcMarkingTask* _task; 4009 CMSConcMarkingTask* _task;
4048 MemRegion _span; 4010 MemRegion _span;
4049 CMSBitMap* _bit_map; 4011 CMSBitMap* _bit_map;
4050 CMSMarkStack* _overflow_stack; 4012 CMSMarkStack* _overflow_stack;
4051 OopTaskQueue* _work_queue; 4013 OopTaskQueue* _work_queue;
4052 protected: 4014 protected:
4053 DO_OOP_WORK_DEFN 4015 DO_OOP_WORK_DEFN
4054 public: 4016 public:
4055 Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue, 4017 Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4056 CMSBitMap* bit_map, CMSMarkStack* overflow_stack, 4018 CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
4057 CMSMarkStack* revisit_stack): 4019 CMSOopClosure(collector->ref_processor()),
4058 Par_KlassRememberingOopClosure(collector, collector->ref_processor(), revisit_stack), 4020 _collector(collector),
4059 _task(task), 4021 _task(task),
4060 _span(collector->_span), 4022 _span(collector->_span),
4061 _work_queue(work_queue), 4023 _work_queue(work_queue),
4062 _bit_map(bit_map), 4024 _bit_map(bit_map),
4063 _overflow_stack(overflow_stack) 4025 _overflow_stack(overflow_stack)
4064 { } 4026 { }
4065 virtual void do_oop(oop* p); 4027 virtual void do_oop(oop* p);
4066 virtual void do_oop(narrowOop* p); 4028 virtual void do_oop(narrowOop* p);
4029
4067 void trim_queue(size_t max); 4030 void trim_queue(size_t max);
4068 void handle_stack_overflow(HeapWord* lost); 4031 void handle_stack_overflow(HeapWord* lost);
4069 void do_yield_check() { 4032 void do_yield_check() {
4070 if (_task->should_yield()) { 4033 if (_task->should_yield()) {
4071 _task->yield(); 4034 _task->yield();
4126 oop new_oop; 4089 oop new_oop;
4127 if (_work_queue->pop_local(new_oop)) { 4090 if (_work_queue->pop_local(new_oop)) {
4128 assert(new_oop->is_oop(), "Should be an oop"); 4091 assert(new_oop->is_oop(), "Should be an oop");
4129 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object"); 4092 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4130 assert(_span.contains((HeapWord*)new_oop), "Not in span"); 4093 assert(_span.contains((HeapWord*)new_oop), "Not in span");
4131 assert(new_oop->is_parsable(), "Should be parsable");
4132 new_oop->oop_iterate(this); // do_oop() above 4094 new_oop->oop_iterate(this); // do_oop() above
4133 do_yield_check(); 4095 do_yield_check();
4134 } 4096 }
4135 } 4097 }
4136 } 4098 }
4154 void CMSConcMarkingTask::do_work_steal(int i) { 4116 void CMSConcMarkingTask::do_work_steal(int i) {
4155 OopTaskQueue* work_q = work_queue(i); 4117 OopTaskQueue* work_q = work_queue(i);
4156 oop obj_to_scan; 4118 oop obj_to_scan;
4157 CMSBitMap* bm = &(_collector->_markBitMap); 4119 CMSBitMap* bm = &(_collector->_markBitMap);
4158 CMSMarkStack* ovflw = &(_collector->_markStack); 4120 CMSMarkStack* ovflw = &(_collector->_markStack);
4159 CMSMarkStack* revisit = &(_collector->_revisitStack);
4160 int* seed = _collector->hash_seed(i); 4121 int* seed = _collector->hash_seed(i);
4161 Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw, revisit); 4122 Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
4162 while (true) { 4123 while (true) {
4163 cl.trim_queue(0); 4124 cl.trim_queue(0);
4164 assert(work_q->size() == 0, "Should have been emptied above"); 4125 assert(work_q->size() == 0, "Should have been emptied above");
4165 if (get_work_from_overflow_stack(ovflw, work_q)) { 4126 if (get_work_from_overflow_stack(ovflw, work_q)) {
4166 // Can't assert below because the work obtained from the 4127 // Can't assert below because the work obtained from the
4182 4143
4183 // This is run by the CMS (coordinator) thread. 4144 // This is run by the CMS (coordinator) thread.
4184 void CMSConcMarkingTask::coordinator_yield() { 4145 void CMSConcMarkingTask::coordinator_yield() {
4185 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 4146 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4186 "CMS thread should hold CMS token"); 4147 "CMS thread should hold CMS token");
4187 DEBUG_ONLY(RememberKlassesChecker mux(false);)
4188 // First give up the locks, then yield, then re-lock 4148 // First give up the locks, then yield, then re-lock
4189 // We should probably use a constructor/destructor idiom to 4149 // We should probably use a constructor/destructor idiom to
4190 // do this unlock/lock or modify the MutexUnlocker class to 4150 // do this unlock/lock or modify the MutexUnlocker class to
4191 // serve our purpose. XXX 4151 // serve our purpose. XXX
4192 assert_lock_strong(_bit_map_lock); 4152 assert_lock_strong(_bit_map_lock);
4242 conc_workers()->active_workers(), 4202 conc_workers()->active_workers(),
4243 Threads::number_of_non_daemon_threads()); 4203 Threads::number_of_non_daemon_threads());
4244 conc_workers()->set_active_workers(num_workers); 4204 conc_workers()->set_active_workers(num_workers);
4245 4205
4246 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); 4206 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
4247 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
4248 4207
4249 CMSConcMarkingTask tsk(this, 4208 CMSConcMarkingTask tsk(this,
4250 cms_space, 4209 cms_space,
4251 perm_space,
4252 asynch, 4210 asynch,
4253 conc_workers(), 4211 conc_workers(),
4254 task_queues()); 4212 task_queues());
4255 4213
4256 // Since the actual number of workers we get may be different 4214 // Since the actual number of workers we get may be different
4257 // from the number we requested above, do we need to do anything different 4215 // from the number we requested above, do we need to do anything different
4258 // below? In particular, may be we need to subclass the SequantialSubTasksDone 4216 // below? In particular, may be we need to subclass the SequantialSubTasksDone
4259 // class?? XXX 4217 // class?? XXX
4260 cms_space ->initialize_sequential_subtasks_for_marking(num_workers); 4218 cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4261 perm_space->initialize_sequential_subtasks_for_marking(num_workers);
4262 4219
4263 // Refs discovery is already non-atomic. 4220 // Refs discovery is already non-atomic.
4264 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic"); 4221 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4265 assert(ref_processor()->discovery_is_mt(), "Discovery should be MT"); 4222 assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
4266 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
4267 conc_workers()->start_task(&tsk); 4223 conc_workers()->start_task(&tsk);
4268 while (tsk.yielded()) { 4224 while (tsk.yielded()) {
4269 tsk.coordinator_yield(); 4225 tsk.coordinator_yield();
4270 conc_workers()->continue_task(&tsk); 4226 conc_workers()->continue_task(&tsk);
4271 } 4227 }
4294 } 4250 }
4295 // Adjust the task to restart from _restart_addr 4251 // Adjust the task to restart from _restart_addr
4296 tsk.reset(_restart_addr); 4252 tsk.reset(_restart_addr);
4297 cms_space ->initialize_sequential_subtasks_for_marking(num_workers, 4253 cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4298 _restart_addr); 4254 _restart_addr);
4299 perm_space->initialize_sequential_subtasks_for_marking(num_workers,
4300 _restart_addr);
4301 _restart_addr = NULL; 4255 _restart_addr = NULL;
4302 // Get the workers going again 4256 // Get the workers going again
4303 conc_workers()->start_task(&tsk); 4257 conc_workers()->start_task(&tsk);
4304 while (tsk.yielded()) { 4258 while (tsk.yielded()) {
4305 tsk.coordinator_yield(); 4259 tsk.coordinator_yield();
4316 HandleMark hm; 4270 HandleMark hm;
4317 4271
4318 // Temporarily make refs discovery single threaded (non-MT) 4272 // Temporarily make refs discovery single threaded (non-MT)
4319 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); 4273 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
4320 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap, 4274 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4321 &_markStack, &_revisitStack, CMSYield && asynch); 4275 &_markStack, CMSYield && asynch);
4322 // the last argument to iterate indicates whether the iteration 4276 // the last argument to iterate indicates whether the iteration
4323 // should be incremental with periodic yields. 4277 // should be incremental with periodic yields.
4324 _markBitMap.iterate(&markFromRootsClosure); 4278 _markBitMap.iterate(&markFromRootsClosure);
4325 // If _restart_addr is non-NULL, a marking stack overflow 4279 // If _restart_addr is non-NULL, a marking stack overflow
4326 // occurred; we need to do a fresh iteration from the 4280 // occurred; we need to do a fresh iteration from the
4508 // referents. 4462 // referents.
4509 if (clean_refs) { 4463 if (clean_refs) {
4510 CMSPrecleanRefsYieldClosure yield_cl(this); 4464 CMSPrecleanRefsYieldClosure yield_cl(this);
4511 assert(rp->span().equals(_span), "Spans should be equal"); 4465 assert(rp->span().equals(_span), "Spans should be equal");
4512 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, 4466 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4513 &_markStack, &_revisitStack, 4467 &_markStack, true /* preclean */);
4514 true /* preclean */);
4515 CMSDrainMarkingStackClosure complete_trace(this, 4468 CMSDrainMarkingStackClosure complete_trace(this,
4516 _span, &_markBitMap, &_markStack, 4469 _span, &_markBitMap, &_markStack,
4517 &keep_alive, true /* preclean */); 4470 &keep_alive, true /* preclean */);
4518 4471
4519 // We don't want this step to interfere with a young 4472 // We don't want this step to interfere with a young
4536 // collection to proceed promptly. XXX YSR: 4489 // collection to proceed promptly. XXX YSR:
4537 // The code in this method may need further 4490 // The code in this method may need further
4538 // tweaking for better performance and some restructuring 4491 // tweaking for better performance and some restructuring
4539 // for cleaner interfaces. 4492 // for cleaner interfaces.
4540 rp->preclean_discovered_references( 4493 rp->preclean_discovered_references(
4541 rp->is_alive_non_header(), &keep_alive, &complete_trace, 4494 rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl);
4542 &yield_cl, should_unload_classes());
4543 } 4495 }
4544 4496
4545 if (clean_survivor) { // preclean the active survivor space(s) 4497 if (clean_survivor) { // preclean the active survivor space(s)
4546 assert(_young_gen->kind() == Generation::DefNew || 4498 assert(_young_gen->kind() == Generation::DefNew ||
4547 _young_gen->kind() == Generation::ParNew || 4499 _young_gen->kind() == Generation::ParNew ||
4548 _young_gen->kind() == Generation::ASParNew, 4500 _young_gen->kind() == Generation::ASParNew,
4549 "incorrect type for cast"); 4501 "incorrect type for cast");
4550 DefNewGeneration* dng = (DefNewGeneration*)_young_gen; 4502 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4551 PushAndMarkClosure pam_cl(this, _span, ref_processor(), 4503 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4552 &_markBitMap, &_modUnionTable, 4504 &_markBitMap, &_modUnionTable,
4553 &_markStack, &_revisitStack, 4505 &_markStack, true /* precleaning phase */);
4554 true /* precleaning phase */);
4555 stopTimer(); 4506 stopTimer();
4556 CMSTokenSyncWithLocks ts(true /* is cms thread */, 4507 CMSTokenSyncWithLocks ts(true /* is cms thread */,
4557 bitMapLock()); 4508 bitMapLock());
4558 startTimer(); 4509 startTimer();
4559 unsigned int before_count = 4510 unsigned int before_count =
4560 GenCollectedHeap::heap()->total_collections(); 4511 GenCollectedHeap::heap()->total_collections();
4561 SurvivorSpacePrecleanClosure 4512 SurvivorSpacePrecleanClosure
4562 sss_cl(this, _span, &_markBitMap, &_markStack, 4513 sss_cl(this, _span, &_markBitMap, &_markStack,
4563 &pam_cl, before_count, CMSYield); 4514 &pam_cl, before_count, CMSYield);
4564 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4565 dng->from()->object_iterate_careful(&sss_cl); 4515 dng->from()->object_iterate_careful(&sss_cl);
4566 dng->to()->object_iterate_careful(&sss_cl); 4516 dng->to()->object_iterate_careful(&sss_cl);
4567 } 4517 }
4568 MarkRefsIntoAndScanClosure 4518 MarkRefsIntoAndScanClosure
4569 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable, 4519 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4570 &_markStack, &_revisitStack, this, CMSYield, 4520 &_markStack, this, CMSYield,
4571 true /* precleaning phase */); 4521 true /* precleaning phase */);
4572 // CAUTION: The following closure has persistent state that may need to 4522 // CAUTION: The following closure has persistent state that may need to
4573 // be reset upon a decrease in the sequence of addresses it 4523 // be reset upon a decrease in the sequence of addresses it
4574 // processes. 4524 // processes.
4575 ScanMarkedObjectsAgainCarefullyClosure 4525 ScanMarkedObjectsAgainCarefullyClosure
4576 smoac_cl(this, _span, 4526 smoac_cl(this, _span,
4577 &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield); 4527 &_markBitMap, &_markStack, &mrias_cl, CMSYield);
4578 4528
4579 // Preclean dirty cards in ModUnionTable and CardTable using 4529 // Preclean dirty cards in ModUnionTable and CardTable using
4580 // appropriate convergence criterion; 4530 // appropriate convergence criterion;
4581 // repeat CMSPrecleanIter times unless we find that 4531 // repeat CMSPrecleanIter times unless we find that
4582 // we are losing. 4532 // we are losing.
4589 size_t numIter, cumNumCards, lastNumCards, curNumCards; 4539 size_t numIter, cumNumCards, lastNumCards, curNumCards;
4590 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0; 4540 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4591 numIter < CMSPrecleanIter; 4541 numIter < CMSPrecleanIter;
4592 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) { 4542 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4593 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl); 4543 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
4594 if (CMSPermGenPrecleaningEnabled) {
4595 curNumCards += preclean_mod_union_table(_permGen, &smoac_cl);
4596 }
4597 if (Verbose && PrintGCDetails) { 4544 if (Verbose && PrintGCDetails) {
4598 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards); 4545 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4599 } 4546 }
4600 // Either there are very few dirty cards, so re-mark 4547 // Either there are very few dirty cards, so re-mark
4601 // pause will be small anyway, or our pre-cleaning isn't 4548 // pause will be small anyway, or our pre-cleaning isn't
4609 numIter++; 4556 numIter++;
4610 cumNumCards += curNumCards; 4557 cumNumCards += curNumCards;
4611 break; 4558 break;
4612 } 4559 }
4613 } 4560 }
4561
4562 preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
4563
4614 curNumCards = preclean_card_table(_cmsGen, &smoac_cl); 4564 curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4615 if (CMSPermGenPrecleaningEnabled) {
4616 curNumCards += preclean_card_table(_permGen, &smoac_cl);
4617 }
4618 cumNumCards += curNumCards; 4565 cumNumCards += curNumCards;
4619 if (PrintGCDetails && PrintCMSStatistics != 0) { 4566 if (PrintGCDetails && PrintCMSStatistics != 0) {
4620 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)", 4567 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4621 curNumCards, cumNumCards, numIter); 4568 curNumCards, cumNumCards, numIter);
4622 } 4569 }
4661 ConcurrentMarkSweepGeneration* gen, 4608 ConcurrentMarkSweepGeneration* gen,
4662 ScanMarkedObjectsAgainCarefullyClosure* cl) { 4609 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4663 verify_work_stacks_empty(); 4610 verify_work_stacks_empty();
4664 verify_overflow_empty(); 4611 verify_overflow_empty();
4665 4612
4666 // Turn off checking for this method but turn it back on
4667 // selectively. There are yield points in this method
4668 // but it is difficult to turn the checking off just around
4669 // the yield points. It is simpler to selectively turn
4670 // it on.
4671 DEBUG_ONLY(RememberKlassesChecker mux(false);)
4672
4673 // strategy: starting with the first card, accumulate contiguous 4613 // strategy: starting with the first card, accumulate contiguous
4674 // ranges of dirty cards; clear these cards, then scan the region 4614 // ranges of dirty cards; clear these cards, then scan the region
4675 // covered by these cards. 4615 // covered by these cards.
4676 4616
4677 // Since all of the MUT is committed ahead, we can just use 4617 // Since all of the MUT is committed ahead, we can just use
4730 startTimer(); 4670 startTimer();
4731 { 4671 {
4732 verify_work_stacks_empty(); 4672 verify_work_stacks_empty();
4733 verify_overflow_empty(); 4673 verify_overflow_empty();
4734 sample_eden(); 4674 sample_eden();
4735 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4736 stop_point = 4675 stop_point =
4737 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); 4676 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4738 } 4677 }
4739 if (stop_point != NULL) { 4678 if (stop_point != NULL) {
4740 // The careful iteration stopped early either because it found an 4679 // The careful iteration stopped early either because it found an
4741 // uninitialized object, or because we were in the midst of an 4680 // uninitialized object, or because we were in the midst of an
4742 // "abortable preclean", which should now be aborted. Redirty 4681 // "abortable preclean", which should now be aborted. Redirty
4743 // the bits corresponding to the partially-scanned or unscanned 4682 // the bits corresponding to the partially-scanned or unscanned
4744 // cards. We'll either restart at the next block boundary or 4683 // cards. We'll either restart at the next block boundary or
4745 // abort the preclean. 4684 // abort the preclean.
4746 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) || 4685 assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4747 (_collectorState == AbortablePreclean && should_abort_preclean()), 4686 "Should only be AbortablePreclean.");
4748 "Unparsable objects should only be in perm gen.");
4749 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end())); 4687 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4750 if (should_abort_preclean()) { 4688 if (should_abort_preclean()) {
4751 break; // out of preclean loop 4689 break; // out of preclean loop
4752 } else { 4690 } else {
4753 // Compute the next address at which preclean should pick up; 4691 // Compute the next address at which preclean should pick up;
4818 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock()); 4756 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4819 startTimer(); 4757 startTimer();
4820 sample_eden(); 4758 sample_eden();
4821 verify_work_stacks_empty(); 4759 verify_work_stacks_empty();
4822 verify_overflow_empty(); 4760 verify_overflow_empty();
4823 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4824 HeapWord* stop_point = 4761 HeapWord* stop_point =
4825 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); 4762 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4826 if (stop_point != NULL) { 4763 if (stop_point != NULL) {
4827 // The careful iteration stopped early because it found an 4764 assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4828 // uninitialized object. Redirty the bits corresponding to the 4765 "Should only be AbortablePreclean.");
4829 // partially-scanned or unscanned cards, and start again at the
4830 // next block boundary.
4831 assert(CMSPermGenPrecleaningEnabled ||
4832 (_collectorState == AbortablePreclean && should_abort_preclean()),
4833 "Unparsable objects should only be in perm gen.");
4834 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end())); 4766 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4835 if (should_abort_preclean()) { 4767 if (should_abort_preclean()) {
4836 break; // out of preclean loop 4768 break; // out of preclean loop
4837 } else { 4769 } else {
4838 // Compute the next address at which preclean should pick up. 4770 // Compute the next address at which preclean should pick up.
4844 } 4776 }
4845 } 4777 }
4846 verify_work_stacks_empty(); 4778 verify_work_stacks_empty();
4847 verify_overflow_empty(); 4779 verify_overflow_empty();
4848 return cumNumDirtyCards; 4780 return cumNumDirtyCards;
4781 }
4782
4783 class PrecleanKlassClosure : public KlassClosure {
4784 CMKlassClosure _cm_klass_closure;
4785 public:
4786 PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4787 void do_klass(Klass* k) {
4788 if (k->has_accumulated_modified_oops()) {
4789 k->clear_accumulated_modified_oops();
4790
4791 _cm_klass_closure.do_klass(k);
4792 }
4793 }
4794 };
4795
4796 // The freelist lock is needed to prevent asserts, is it really needed?
4797 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4798
4799 cl->set_freelistLock(freelistLock);
4800
4801 CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4802
4803 // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4804 // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4805 PrecleanKlassClosure preclean_klass_closure(cl);
4806 ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4807
4808 verify_work_stacks_empty();
4809 verify_overflow_empty();
4849 } 4810 }
4850 4811
4851 void CMSCollector::checkpointRootsFinal(bool asynch, 4812 void CMSCollector::checkpointRootsFinal(bool asynch,
4852 bool clear_all_soft_refs, bool init_mark_was_synchronous) { 4813 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4853 assert(_collectorState == FinalMarking, "incorrect state transition?"); 4814 assert(_collectorState == FinalMarking, "incorrect state transition?");
4920 CodeCache::gc_prologue(); 4881 CodeCache::gc_prologue();
4921 } 4882 }
4922 assert(haveFreelistLocks(), "must have free list locks"); 4883 assert(haveFreelistLocks(), "must have free list locks");
4923 assert_lock_strong(bitMapLock()); 4884 assert_lock_strong(bitMapLock());
4924 4885
4925 DEBUG_ONLY(RememberKlassesChecker fmx(should_unload_classes());)
4926 if (!init_mark_was_synchronous) { 4886 if (!init_mark_was_synchronous) {
4927 // We might assume that we need not fill TLAB's when 4887 // We might assume that we need not fill TLAB's when
4928 // CMSScavengeBeforeRemark is set, because we may have just done 4888 // CMSScavengeBeforeRemark is set, because we may have just done
4929 // a scavenge which would have filled all TLAB's -- and besides 4889 // a scavenge which would have filled all TLAB's -- and besides
4930 // Eden would be empty. This however may not always be the case -- 4890 // Eden would be empty. This however may not always be the case --
5025 } 4985 }
5026 } 4986 }
5027 _markStack._hit_limit = 0; 4987 _markStack._hit_limit = 0;
5028 _markStack._failed_double = 0; 4988 _markStack._failed_double = 0;
5029 4989
5030 // Check that all the klasses have been checked
5031 assert(_revisitStack.isEmpty(), "Not all klasses revisited");
5032
5033 if ((VerifyAfterGC || VerifyDuringGC) && 4990 if ((VerifyAfterGC || VerifyDuringGC) &&
5034 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { 4991 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5035 verify_after_remark(); 4992 verify_after_remark();
5036 } 4993 }
5037 4994
5038 // Change under the freelistLocks. 4995 // Change under the freelistLocks.
5039 _collectorState = Sweeping; 4996 _collectorState = Sweeping;
5040 // Call isAllClear() under bitMapLock 4997 // Call isAllClear() under bitMapLock
5041 assert(_modUnionTable.isAllClear(), "Should be clear by end of the" 4998 assert(_modUnionTable.isAllClear(),
5042 " final marking"); 4999 "Should be clear by end of the final marking");
5000 assert(_ct->klass_rem_set()->mod_union_is_clear(),
5001 "Should be clear by end of the final marking");
5043 if (UseAdaptiveSizePolicy) { 5002 if (UseAdaptiveSizePolicy) {
5044 size_policy()->checkpoint_roots_final_end(gch->gc_cause()); 5003 size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5045 } 5004 }
5046 } 5005 }
5047 5006
5048 // Parallel remark task 5007 // Parallel remark task
5049 class CMSParRemarkTask: public AbstractGangTask { 5008 class CMSParRemarkTask: public AbstractGangTask {
5050 CMSCollector* _collector; 5009 CMSCollector* _collector;
5051 int _n_workers; 5010 int _n_workers;
5052 CompactibleFreeListSpace* _cms_space; 5011 CompactibleFreeListSpace* _cms_space;
5053 CompactibleFreeListSpace* _perm_space;
5054 5012
5055 // The per-thread work queues, available here for stealing. 5013 // The per-thread work queues, available here for stealing.
5056 OopTaskQueueSet* _task_queues; 5014 OopTaskQueueSet* _task_queues;
5057 ParallelTaskTerminator _term; 5015 ParallelTaskTerminator _term;
5058 5016
5059 public: 5017 public:
5060 // A value of 0 passed to n_workers will cause the number of 5018 // A value of 0 passed to n_workers will cause the number of
5061 // workers to be taken from the active workers in the work gang. 5019 // workers to be taken from the active workers in the work gang.
5062 CMSParRemarkTask(CMSCollector* collector, 5020 CMSParRemarkTask(CMSCollector* collector,
5063 CompactibleFreeListSpace* cms_space, 5021 CompactibleFreeListSpace* cms_space,
5064 CompactibleFreeListSpace* perm_space,
5065 int n_workers, FlexibleWorkGang* workers, 5022 int n_workers, FlexibleWorkGang* workers,
5066 OopTaskQueueSet* task_queues): 5023 OopTaskQueueSet* task_queues):
5067 AbstractGangTask("Rescan roots and grey objects in parallel"), 5024 AbstractGangTask("Rescan roots and grey objects in parallel"),
5068 _collector(collector), 5025 _collector(collector),
5069 _cms_space(cms_space), _perm_space(perm_space), 5026 _cms_space(cms_space),
5070 _n_workers(n_workers), 5027 _n_workers(n_workers),
5071 _task_queues(task_queues), 5028 _task_queues(task_queues),
5072 _term(n_workers, task_queues) { } 5029 _term(n_workers, task_queues) { }
5073 5030
5074 OopTaskQueueSet* task_queues() { return _task_queues; } 5031 OopTaskQueueSet* task_queues() { return _task_queues; }
5090 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i, 5047 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5091 Par_MarkRefsIntoAndScanClosure* cl); 5048 Par_MarkRefsIntoAndScanClosure* cl);
5092 5049
5093 // ... work stealing for the above 5050 // ... work stealing for the above
5094 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed); 5051 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5052 };
5053
5054 class RemarkKlassClosure : public KlassClosure {
5055 CMKlassClosure _cm_klass_closure;
5056 public:
5057 RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5058 void do_klass(Klass* k) {
5059 // Check if we have modified any oops in the Klass during the concurrent marking.
5060 if (k->has_accumulated_modified_oops()) {
5061 k->clear_accumulated_modified_oops();
5062
5063 // We could have transfered the current modified marks to the accumulated marks,
5064 // like we do with the Card Table to Mod Union Table. But it's not really necessary.
5065 } else if (k->has_modified_oops()) {
5066 // Don't clear anything, this info is needed by the next young collection.
5067 } else {
5068 // No modified oops in the Klass.
5069 return;
5070 }
5071
5072 // The klass has modified fields, need to scan the klass.
5073 _cm_klass_closure.do_klass(k);
5074 }
5095 }; 5075 };
5096 5076
5097 // work_queue(i) is passed to the closure 5077 // work_queue(i) is passed to the closure
5098 // Par_MarkRefsIntoAndScanClosure. The "i" parameter 5078 // Par_MarkRefsIntoAndScanClosure. The "i" parameter
5099 // also is passed to do_dirty_card_rescan_tasks() and to 5079 // also is passed to do_dirty_card_rescan_tasks() and to
5108 _timer.start(); 5088 _timer.start();
5109 GenCollectedHeap* gch = GenCollectedHeap::heap(); 5089 GenCollectedHeap* gch = GenCollectedHeap::heap();
5110 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector, 5090 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5111 _collector->_span, _collector->ref_processor(), 5091 _collector->_span, _collector->ref_processor(),
5112 &(_collector->_markBitMap), 5092 &(_collector->_markBitMap),
5113 work_queue(worker_id), &(_collector->_revisitStack)); 5093 work_queue(worker_id));
5114 5094
5115 // Rescan young gen roots first since these are likely 5095 // Rescan young gen roots first since these are likely
5116 // coarsely partitioned and may, on that account, constitute 5096 // coarsely partitioned and may, on that account, constitute
5117 // the critical path; thus, it's best to start off that 5097 // the critical path; thus, it's best to start off that
5118 // work first. 5098 // work first.
5147 _timer.reset(); 5127 _timer.reset();
5148 _timer.start(); 5128 _timer.start();
5149 gch->gen_process_strong_roots(_collector->_cmsGen->level(), 5129 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5150 false, // yg was scanned above 5130 false, // yg was scanned above
5151 false, // this is parallel code 5131 false, // this is parallel code
5152 true, // collecting perm gen 5132 false, // not scavenging
5153 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), 5133 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5154 &par_mrias_cl, 5134 &par_mrias_cl,
5155 true, // walk all of code cache if (so & SO_CodeCache) 5135 true, // walk all of code cache if (so & SO_CodeCache)
5156 NULL); 5136 NULL,
5137 NULL); // The dirty klasses will be handled below
5157 assert(_collector->should_unload_classes() 5138 assert(_collector->should_unload_classes()
5158 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache), 5139 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5159 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); 5140 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5160 _timer.stop(); 5141 _timer.stop();
5161 if (PrintCMSStatistics != 0) { 5142 if (PrintCMSStatistics != 0) {
5162 gclog_or_tty->print_cr( 5143 gclog_or_tty->print_cr(
5163 "Finished remaining root rescan work in %dth thread: %3.3f sec", 5144 "Finished remaining root rescan work in %dth thread: %3.3f sec",
5164 worker_id, _timer.seconds()); 5145 worker_id, _timer.seconds());
5165 } 5146 }
5166 5147
5148 // ---------- unhandled CLD scanning ----------
5149 if (worker_id == 0) { // Single threaded at the moment.
5150 _timer.reset();
5151 _timer.start();
5152
5153 // Scan all new class loader data objects and new dependencies that were
5154 // introduced during concurrent marking.
5155 ResourceMark rm;
5156 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5157 for (int i = 0; i < array->length(); i++) {
5158 par_mrias_cl.do_class_loader_data(array->at(i));
5159 }
5160
5161 // We don't need to keep track of new CLDs anymore.
5162 ClassLoaderDataGraph::remember_new_clds(false);
5163
5164 _timer.stop();
5165 if (PrintCMSStatistics != 0) {
5166 gclog_or_tty->print_cr(
5167 "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
5168 worker_id, _timer.seconds());
5169 }
5170 }
5171
5172 // ---------- dirty klass scanning ----------
5173 if (worker_id == 0) { // Single threaded at the moment.
5174 _timer.reset();
5175 _timer.start();
5176
5177 // Scan all classes that was dirtied during the concurrent marking phase.
5178 RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
5179 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5180
5181 _timer.stop();
5182 if (PrintCMSStatistics != 0) {
5183 gclog_or_tty->print_cr(
5184 "Finished dirty klass scanning work in %dth thread: %3.3f sec",
5185 worker_id, _timer.seconds());
5186 }
5187 }
5188
5189 // We might have added oops to ClassLoaderData::_handles during the
5190 // concurrent marking phase. These oops point to newly allocated objects
5191 // that are guaranteed to be kept alive. Either by the direct allocation
5192 // code, or when the young collector processes the strong roots. Hence,
5193 // we don't have to revisit the _handles block during the remark phase.
5194
5167 // ---------- rescan dirty cards ------------ 5195 // ---------- rescan dirty cards ------------
5168 _timer.reset(); 5196 _timer.reset();
5169 _timer.start(); 5197 _timer.start();
5170 5198
5171 // Do the rescan tasks for each of the two spaces 5199 // Do the rescan tasks for each of the two spaces
5172 // (cms_space and perm_space) in turn. 5200 // (cms_space) in turn.
5173 // "worker_id" is passed to select the task_queue for "worker_id" 5201 // "worker_id" is passed to select the task_queue for "worker_id"
5174 do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl); 5202 do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
5175 do_dirty_card_rescan_tasks(_perm_space, worker_id, &par_mrias_cl);
5176 _timer.stop(); 5203 _timer.stop();
5177 if (PrintCMSStatistics != 0) { 5204 if (PrintCMSStatistics != 0) {
5178 gclog_or_tty->print_cr( 5205 gclog_or_tty->print_cr(
5179 "Finished dirty card rescan work in %dth thread: %3.3f sec", 5206 "Finished dirty card rescan work in %dth thread: %3.3f sec",
5180 worker_id, _timer.seconds()); 5207 worker_id, _timer.seconds());
5282 // more work chunks. Such objects would potentially be scanned 5309 // more work chunks. Such objects would potentially be scanned
5283 // several times redundantly. Work on 4756801 should try and 5310 // several times redundantly. Work on 4756801 should try and
5284 // address that performance anomaly if at all possible. XXX 5311 // address that performance anomaly if at all possible. XXX
5285 MemRegion full_span = _collector->_span; 5312 MemRegion full_span = _collector->_span;
5286 CMSBitMap* bm = &(_collector->_markBitMap); // shared 5313 CMSBitMap* bm = &(_collector->_markBitMap); // shared
5287 CMSMarkStack* rs = &(_collector->_revisitStack); // shared
5288 MarkFromDirtyCardsClosure 5314 MarkFromDirtyCardsClosure
5289 greyRescanClosure(_collector, full_span, // entire span of interest 5315 greyRescanClosure(_collector, full_span, // entire span of interest
5290 sp, bm, work_q, rs, cl); 5316 sp, bm, work_q, cl);
5291 5317
5292 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); 5318 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5293 assert(pst->valid(), "Uninitialized use?"); 5319 assert(pst->valid(), "Uninitialized use?");
5294 uint nth_task = 0; 5320 uint nth_task = 0;
5295 const int alignment = CardTableModRefBS::card_size * BitsPerWord; 5321 const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5549 assert(n_workers > 0, "Should have been set during scavenge"); 5575 assert(n_workers > 0, "Should have been set during scavenge");
5550 n_workers = ParallelGCThreads; 5576 n_workers = ParallelGCThreads;
5551 workers->set_active_workers(n_workers); 5577 workers->set_active_workers(n_workers);
5552 } 5578 }
5553 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); 5579 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5554 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
5555 5580
5556 CMSParRemarkTask tsk(this, 5581 CMSParRemarkTask tsk(this,
5557 cms_space, perm_space, 5582 cms_space,
5558 n_workers, workers, task_queues()); 5583 n_workers, workers, task_queues());
5559 5584
5560 // Set up for parallel process_strong_roots work. 5585 // Set up for parallel process_strong_roots work.
5561 gch->set_par_threads(n_workers); 5586 gch->set_par_threads(n_workers);
5562 // We won't be iterating over the cards in the card table updating 5587 // We won't be iterating over the cards in the card table updating
5578 5603
5579 // The dirty card rescan work is broken up into a "sequence" 5604 // The dirty card rescan work is broken up into a "sequence"
5580 // of parallel tasks (per constituent space) that are dynamically 5605 // of parallel tasks (per constituent space) that are dynamically
5581 // claimed by the parallel threads. 5606 // claimed by the parallel threads.
5582 cms_space->initialize_sequential_subtasks_for_rescan(n_workers); 5607 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5583 perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
5584 5608
5585 // It turns out that even when we're using 1 thread, doing the work in a 5609 // It turns out that even when we're using 1 thread, doing the work in a
5586 // separate thread causes wide variance in run times. We can't help this 5610 // separate thread causes wide variance in run times. We can't help this
5587 // in the multi-threaded case, but we special-case n=1 here to get 5611 // in the multi-threaded case, but we special-case n=1 here to get
5588 // repeatable measurements of the 1-thread overhead of the parallel code. 5612 // repeatable measurements of the 1-thread overhead of the parallel code.
5596 } else { 5620 } else {
5597 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false); 5621 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5598 GenCollectedHeap::StrongRootsScope srs(gch); 5622 GenCollectedHeap::StrongRootsScope srs(gch);
5599 tsk.work(0); 5623 tsk.work(0);
5600 } 5624 }
5625
5601 gch->set_par_threads(0); // 0 ==> non-parallel. 5626 gch->set_par_threads(0); // 0 ==> non-parallel.
5602 // restore, single-threaded for now, any preserved marks 5627 // restore, single-threaded for now, any preserved marks
5603 // as a result of work_q overflow 5628 // as a result of work_q overflow
5604 restore_preserved_marks_if_any(); 5629 restore_preserved_marks_if_any();
5605 } 5630 }
5610 HandleMark hm; 5635 HandleMark hm;
5611 GenCollectedHeap* gch = GenCollectedHeap::heap(); 5636 GenCollectedHeap* gch = GenCollectedHeap::heap();
5612 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false); 5637 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5613 5638
5614 MarkRefsIntoAndScanClosure 5639 MarkRefsIntoAndScanClosure
5615 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable, 5640 mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5616 &_markStack, &_revisitStack, this, 5641 &_markStack, this,
5617 false /* should_yield */, false /* not precleaning */); 5642 false /* should_yield */, false /* not precleaning */);
5618 MarkFromDirtyCardsClosure 5643 MarkFromDirtyCardsClosure
5619 markFromDirtyCardsClosure(this, _span, 5644 markFromDirtyCardsClosure(this, _span,
5620 NULL, // space is set further below 5645 NULL, // space is set further below
5621 &_markBitMap, &_markStack, &_revisitStack, 5646 &_markBitMap, &_markStack, &mrias_cl);
5622 &mrias_cl);
5623 { 5647 {
5624 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty); 5648 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
5625 // Iterate over the dirty cards, setting the corresponding bits in the 5649 // Iterate over the dirty cards, setting the corresponding bits in the
5626 // mod union table. 5650 // mod union table.
5627 { 5651 {
5628 ModUnionClosure modUnionClosure(&_modUnionTable); 5652 ModUnionClosure modUnionClosure(&_modUnionTable);
5629 _ct->ct_bs()->dirty_card_iterate( 5653 _ct->ct_bs()->dirty_card_iterate(
5630 _cmsGen->used_region(), 5654 _cmsGen->used_region(),
5631 &modUnionClosure);
5632 _ct->ct_bs()->dirty_card_iterate(
5633 _permGen->used_region(),
5634 &modUnionClosure); 5655 &modUnionClosure);
5635 } 5656 }
5636 // Having transferred these marks into the modUnionTable, we just need 5657 // Having transferred these marks into the modUnionTable, we just need
5637 // to rescan the marked objects on the dirty cards in the modUnionTable. 5658 // to rescan the marked objects on the dirty cards in the modUnionTable.
5638 // The initial marking may have been done during an asynchronous 5659 // The initial marking may have been done during an asynchronous
5652 if (PrintCMSStatistics != 0) { 5673 if (PrintCMSStatistics != 0) {
5653 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ", 5674 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5654 markFromDirtyCardsClosure.num_dirty_cards()); 5675 markFromDirtyCardsClosure.num_dirty_cards());
5655 } 5676 }
5656 } 5677 }
5657 {
5658 // .. and then repeat for dirty cards in perm gen
5659 markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
5660 MemRegion ur = _permGen->used_region();
5661 HeapWord* lb = ur.start();
5662 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5663 MemRegion perm_span(lb, ub);
5664 _modUnionTable.dirty_range_iterate_clear(perm_span,
5665 &markFromDirtyCardsClosure);
5666 verify_work_stacks_empty();
5667 if (PrintCMSStatistics != 0) {
5668 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
5669 markFromDirtyCardsClosure.num_dirty_cards());
5670 }
5671 }
5672 } 5678 }
5673 if (VerifyDuringGC && 5679 if (VerifyDuringGC &&
5674 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { 5680 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5675 HandleMark hm; // Discard invalid handles created during verification 5681 HandleMark hm; // Discard invalid handles created during verification
5676 Universe::verify(true); 5682 Universe::verify(true);
5683 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 5689 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5684 GenCollectedHeap::StrongRootsScope srs(gch); 5690 GenCollectedHeap::StrongRootsScope srs(gch);
5685 gch->gen_process_strong_roots(_cmsGen->level(), 5691 gch->gen_process_strong_roots(_cmsGen->level(),
5686 true, // younger gens as roots 5692 true, // younger gens as roots
5687 false, // use the local StrongRootsScope 5693 false, // use the local StrongRootsScope
5688 true, // collecting perm gen 5694 false, // not scavenging
5689 SharedHeap::ScanningOption(roots_scanning_options()), 5695 SharedHeap::ScanningOption(roots_scanning_options()),
5690 &mrias_cl, 5696 &mrias_cl,
5691 true, // walk code active on stacks 5697 true, // walk code active on stacks
5692 NULL); 5698 NULL,
5699 NULL); // The dirty klasses will be handled below
5700
5693 assert(should_unload_classes() 5701 assert(should_unload_classes()
5694 || (roots_scanning_options() & SharedHeap::SO_CodeCache), 5702 || (roots_scanning_options() & SharedHeap::SO_CodeCache),
5695 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); 5703 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5696 } 5704 }
5705
5706 {
5707 TraceTime t("visit unhandled CLDs", PrintGCDetails, false, gclog_or_tty);
5708
5709 verify_work_stacks_empty();
5710
5711 // Scan all class loader data objects that might have been introduced
5712 // during concurrent marking.
5713 ResourceMark rm;
5714 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5715 for (int i = 0; i < array->length(); i++) {
5716 mrias_cl.do_class_loader_data(array->at(i));
5717 }
5718
5719 // We don't need to keep track of new CLDs anymore.
5720 ClassLoaderDataGraph::remember_new_clds(false);
5721
5722 verify_work_stacks_empty();
5723 }
5724
5725 {
5726 TraceTime t("dirty klass scan", PrintGCDetails, false, gclog_or_tty);
5727
5728 verify_work_stacks_empty();
5729
5730 RemarkKlassClosure remark_klass_closure(&mrias_cl);
5731 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5732
5733 verify_work_stacks_empty();
5734 }
5735
5736 // We might have added oops to ClassLoaderData::_handles during the
5737 // concurrent marking phase. These oops point to newly allocated objects
5738 // that are guaranteed to be kept alive. Either by the direct allocation
5739 // code, or when the young collector processes the strong roots. Hence,
5740 // we don't have to revisit the _handles block during the remark phase.
5741
5697 verify_work_stacks_empty(); 5742 verify_work_stacks_empty();
5698 // Restore evacuated mark words, if any, used for overflow list links 5743 // Restore evacuated mark words, if any, used for overflow list links
5699 if (!CMSOverflowEarlyRestoration) { 5744 if (!CMSOverflowEarlyRestoration) {
5700 restore_preserved_marks_if_any(); 5745 restore_preserved_marks_if_any();
5701 } 5746 }
5747 5792
5748 void CMSRefProcTaskProxy::work(uint worker_id) { 5793 void CMSRefProcTaskProxy::work(uint worker_id) {
5749 assert(_collector->_span.equals(_span), "Inconsistency in _span"); 5794 assert(_collector->_span.equals(_span), "Inconsistency in _span");
5750 CMSParKeepAliveClosure par_keep_alive(_collector, _span, 5795 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5751 _mark_bit_map, 5796 _mark_bit_map,
5752 &_collector->_revisitStack,
5753 work_queue(worker_id)); 5797 work_queue(worker_id));
5754 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span, 5798 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5755 _mark_bit_map, 5799 _mark_bit_map,
5756 &_collector->_revisitStack,
5757 work_queue(worker_id)); 5800 work_queue(worker_id));
5758 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map); 5801 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5759 _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack); 5802 _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5760 if (_task.marks_oops_alive()) { 5803 if (_task.marks_oops_alive()) {
5761 do_work_steal(worker_id, &par_drain_stack, &par_keep_alive, 5804 do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5780 _task.work(worker_id); 5823 _task.work(worker_id);
5781 } 5824 }
5782 }; 5825 };
5783 5826
5784 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector, 5827 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5785 MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack, 5828 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5786 OopTaskQueue* work_queue):
5787 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
5788 _span(span), 5829 _span(span),
5789 _bit_map(bit_map), 5830 _bit_map(bit_map),
5790 _work_queue(work_queue), 5831 _work_queue(work_queue),
5791 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue), 5832 _mark_and_push(collector, span, bit_map, work_queue),
5792 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4), 5833 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5793 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))) 5834 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5794 { } 5835 { }
5795 5836
5796 // . see if we can share work_queues with ParNew? XXX 5837 // . see if we can share work_queues with ParNew? XXX
5877 // Process weak references. 5918 // Process weak references.
5878 rp->setup_policy(clear_all_soft_refs); 5919 rp->setup_policy(clear_all_soft_refs);
5879 verify_work_stacks_empty(); 5920 verify_work_stacks_empty();
5880 5921
5881 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, 5922 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5882 &_markStack, &_revisitStack, 5923 &_markStack, false /* !preclean */);
5883 false /* !preclean */);
5884 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, 5924 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5885 _span, &_markBitMap, &_markStack, 5925 _span, &_markBitMap, &_markStack,
5886 &cmsKeepAliveClosure, false /* !preclean */); 5926 &cmsKeepAliveClosure, false /* !preclean */);
5887 { 5927 {
5888 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); 5928 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
5931 5971
5932 cmsDrainMarkingStackClosure.do_void(); 5972 cmsDrainMarkingStackClosure.do_void();
5933 verify_work_stacks_empty(); 5973 verify_work_stacks_empty();
5934 5974
5935 // Update subklass/sibling/implementor links in KlassKlass descendants 5975 // Update subklass/sibling/implementor links in KlassKlass descendants
5936 assert(!_revisitStack.isEmpty(), "revisit stack should not be empty"); 5976 Klass::clean_weak_klass_links(&_is_alive_closure);
5937 oop k; 5977 // Nothing should have been pushed onto the working stacks.
5938 while ((k = _revisitStack.pop()) != NULL) {
5939 ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
5940 &_is_alive_closure,
5941 &cmsKeepAliveClosure);
5942 }
5943 assert(!ClassUnloading ||
5944 (_markStack.isEmpty() && overflow_list_is_empty()),
5945 "Should not have found new reachable objects");
5946 assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
5947 cmsDrainMarkingStackClosure.do_void();
5948 verify_work_stacks_empty(); 5978 verify_work_stacks_empty();
5949 } 5979 }
5950 5980
5951 { 5981 {
5952 TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty); 5982 TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty);
5953 // Clean up unreferenced symbols in symbol table. 5983 // Clean up unreferenced symbols in symbol table.
5954 SymbolTable::unlink(); 5984 SymbolTable::unlink();
5955 } 5985 }
5956 } 5986 }
5957 5987
5958 if (should_unload_classes() || !JavaObjectsInPerm) { 5988 // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
5989 // Need to check if we really scanned the StringTable.
5990 if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
5959 TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty); 5991 TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty);
5960 // Now clean up stale oops in StringTable 5992 // Now clean up stale oops in StringTable
5961 StringTable::unlink(&_is_alive_closure); 5993 StringTable::unlink(&_is_alive_closure);
5962 } 5994 }
5963 5995
6017 6049
6018 _inter_sweep_timer.stop(); 6050 _inter_sweep_timer.stop();
6019 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds()); 6051 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
6020 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free()); 6052 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
6021 6053
6022 // PermGen verification support: If perm gen sweeping is disabled in
6023 // this cycle, we preserve the perm gen object "deadness" information
6024 // in the perm_gen_verify_bit_map. In order to do that we traverse
6025 // all blocks in perm gen and mark all dead objects.
6026 if (verifying() && !should_unload_classes()) {
6027 assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
6028 "Should have already been allocated");
6029 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
6030 markBitMap(), perm_gen_verify_bit_map());
6031 if (asynch) {
6032 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
6033 bitMapLock());
6034 _permGen->cmsSpace()->blk_iterate(&mdo);
6035 } else {
6036 // In the case of synchronous sweep, we already have
6037 // the requisite locks/tokens.
6038 _permGen->cmsSpace()->blk_iterate(&mdo);
6039 }
6040 }
6041
6042 assert(!_intra_sweep_timer.is_active(), "Should not be active"); 6054 assert(!_intra_sweep_timer.is_active(), "Should not be active");
6043 _intra_sweep_timer.reset(); 6055 _intra_sweep_timer.reset();
6044 _intra_sweep_timer.start(); 6056 _intra_sweep_timer.start();
6045 if (asynch) { 6057 if (asynch) {
6046 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 6058 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6047 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails); 6059 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
6048 // First sweep the old gen then the perm gen 6060 // First sweep the old gen
6049 { 6061 {
6050 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), 6062 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6051 bitMapLock()); 6063 bitMapLock());
6052 sweepWork(_cmsGen, asynch); 6064 sweepWork(_cmsGen, asynch);
6053 }
6054
6055 // Now repeat for perm gen
6056 if (should_unload_classes()) {
6057 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
6058 bitMapLock());
6059 sweepWork(_permGen, asynch);
6060 } 6065 }
6061 6066
6062 // Update Universe::_heap_*_at_gc figures. 6067 // Update Universe::_heap_*_at_gc figures.
6063 // We need all the free list locks to make the abstract state 6068 // We need all the free list locks to make the abstract state
6064 // transition from Sweeping to Resetting. See detailed note 6069 // transition from Sweeping to Resetting. See detailed note
6065 // further below. 6070 // further below.
6066 { 6071 {
6067 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), 6072 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
6068 _permGen->freelistLock());
6069 // Update heap occupancy information which is used as 6073 // Update heap occupancy information which is used as
6070 // input to soft ref clearing policy at the next gc. 6074 // input to soft ref clearing policy at the next gc.
6071 Universe::update_heap_info_at_gc(); 6075 Universe::update_heap_info_at_gc();
6072 _collectorState = Resizing; 6076 _collectorState = Resizing;
6073 } 6077 }
6074 } else { 6078 } else {
6075 // already have needed locks 6079 // already have needed locks
6076 sweepWork(_cmsGen, asynch); 6080 sweepWork(_cmsGen, asynch);
6077
6078 if (should_unload_classes()) {
6079 sweepWork(_permGen, asynch);
6080 }
6081 // Update heap occupancy information which is used as 6081 // Update heap occupancy information which is used as
6082 // input to soft ref clearing policy at the next gc. 6082 // input to soft ref clearing policy at the next gc.
6083 Universe::update_heap_info_at_gc(); 6083 Universe::update_heap_info_at_gc();
6084 _collectorState = Resizing; 6084 _collectorState = Resizing;
6085 } 6085 }
6410 } 6410 }
6411 6411
6412 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const { 6412 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6413 size_t sz = 0; 6413 size_t sz = 0;
6414 oop p = (oop)addr; 6414 oop p = (oop)addr;
6415 if (p->klass_or_null() != NULL && p->is_parsable()) { 6415 if (p->klass_or_null() != NULL) {
6416 sz = CompactibleFreeListSpace::adjustObjectSize(p->size()); 6416 sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6417 } else { 6417 } else {
6418 sz = block_size_using_printezis_bits(addr); 6418 sz = block_size_using_printezis_bits(addr);
6419 } 6419 }
6420 assert(sz > 0, "size must be nonzero"); 6420 assert(sz > 0, "size must be nonzero");
6599 // should refactor and consolidate common code. 6599 // should refactor and consolidate common code.
6600 6600
6601 // This closure is used to mark refs into the CMS generation in 6601 // This closure is used to mark refs into the CMS generation in
6602 // the CMS bit map. Called at the first checkpoint. This closure 6602 // the CMS bit map. Called at the first checkpoint. This closure
6603 // assumes that we do not need to re-mark dirty cards; if the CMS 6603 // assumes that we do not need to re-mark dirty cards; if the CMS
6604 // generation on which this is used is not an oldest (modulo perm gen) 6604 // generation on which this is used is not an oldest
6605 // generation then this will lose younger_gen cards! 6605 // generation then this will lose younger_gen cards!
6606 6606
6607 MarkRefsIntoClosure::MarkRefsIntoClosure( 6607 MarkRefsIntoClosure::MarkRefsIntoClosure(
6608 MemRegion span, CMSBitMap* bitMap): 6608 MemRegion span, CMSBitMap* bitMap):
6609 _span(span), 6609 _span(span),
6661 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span, 6661 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6662 ReferenceProcessor* rp, 6662 ReferenceProcessor* rp,
6663 CMSBitMap* bit_map, 6663 CMSBitMap* bit_map,
6664 CMSBitMap* mod_union_table, 6664 CMSBitMap* mod_union_table,
6665 CMSMarkStack* mark_stack, 6665 CMSMarkStack* mark_stack,
6666 CMSMarkStack* revisit_stack,
6667 CMSCollector* collector, 6666 CMSCollector* collector,
6668 bool should_yield, 6667 bool should_yield,
6669 bool concurrent_precleaning): 6668 bool concurrent_precleaning):
6670 _collector(collector), 6669 _collector(collector),
6671 _span(span), 6670 _span(span),
6672 _bit_map(bit_map), 6671 _bit_map(bit_map),
6673 _mark_stack(mark_stack), 6672 _mark_stack(mark_stack),
6674 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table, 6673 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6675 mark_stack, revisit_stack, concurrent_precleaning), 6674 mark_stack, concurrent_precleaning),
6676 _yield(should_yield), 6675 _yield(should_yield),
6677 _concurrent_precleaning(concurrent_precleaning), 6676 _concurrent_precleaning(concurrent_precleaning),
6678 _freelistLock(NULL) 6677 _freelistLock(NULL)
6679 { 6678 {
6680 _ref_processor = rp; 6679 _ref_processor = rp;
6705 bool res = _mark_stack->push(obj); 6704 bool res = _mark_stack->push(obj);
6706 assert(res, "Should have space to push on empty stack"); 6705 assert(res, "Should have space to push on empty stack");
6707 do { 6706 do {
6708 oop new_oop = _mark_stack->pop(); 6707 oop new_oop = _mark_stack->pop();
6709 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop"); 6708 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6710 assert(new_oop->is_parsable(), "Found unparsable oop");
6711 assert(_bit_map->isMarked((HeapWord*)new_oop), 6709 assert(_bit_map->isMarked((HeapWord*)new_oop),
6712 "only grey objects on this stack"); 6710 "only grey objects on this stack");
6713 // iterate over the oops in this oop, marking and pushing 6711 // iterate over the oops in this oop, marking and pushing
6714 // the ones in CMS heap (i.e. in _span). 6712 // the ones in CMS heap (i.e. in _span).
6715 new_oop->oop_iterate(&_pushAndMarkClosure); 6713 new_oop->oop_iterate(&_pushAndMarkClosure);
6745 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6743 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6746 "CMS thread should hold CMS token"); 6744 "CMS thread should hold CMS token");
6747 assert_lock_strong(_freelistLock); 6745 assert_lock_strong(_freelistLock);
6748 assert_lock_strong(_bit_map->lock()); 6746 assert_lock_strong(_bit_map->lock());
6749 // relinquish the free_list_lock and bitMaplock() 6747 // relinquish the free_list_lock and bitMaplock()
6750 DEBUG_ONLY(RememberKlassesChecker mux(false);)
6751 _bit_map->lock()->unlock(); 6748 _bit_map->lock()->unlock();
6752 _freelistLock->unlock(); 6749 _freelistLock->unlock();
6753 ConcurrentMarkSweepThread::desynchronize(true); 6750 ConcurrentMarkSweepThread::desynchronize(true);
6754 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6751 ConcurrentMarkSweepThread::acknowledge_yield_request();
6755 _collector->stopTimer(); 6752 _collector->stopTimer();
6779 // Par_MarkRefsIntoAndScanClosure: a parallel version of 6776 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6780 // MarkRefsIntoAndScanClosure 6777 // MarkRefsIntoAndScanClosure
6781 /////////////////////////////////////////////////////////// 6778 ///////////////////////////////////////////////////////////
6782 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure( 6779 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6783 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp, 6780 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6784 CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack): 6781 CMSBitMap* bit_map, OopTaskQueue* work_queue):
6785 _span(span), 6782 _span(span),
6786 _bit_map(bit_map), 6783 _bit_map(bit_map),
6787 _work_queue(work_queue), 6784 _work_queue(work_queue),
6788 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4), 6785 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6789 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))), 6786 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6790 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue, 6787 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6791 revisit_stack)
6792 { 6788 {
6793 _ref_processor = rp; 6789 _ref_processor = rp;
6794 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); 6790 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6795 } 6791 }
6796 6792
6847 return 0; 6843 return 0;
6848 } 6844 }
6849 if (_bitMap->isMarked(addr)) { 6845 if (_bitMap->isMarked(addr)) {
6850 // it's marked; is it potentially uninitialized? 6846 // it's marked; is it potentially uninitialized?
6851 if (p->klass_or_null() != NULL) { 6847 if (p->klass_or_null() != NULL) {
6852 // If is_conc_safe is false, the object may be undergoing
6853 // change by the VM outside a safepoint. Don't try to
6854 // scan it, but rather leave it for the remark phase.
6855 if (CMSPermGenPrecleaningEnabled &&
6856 (!p->is_conc_safe() || !p->is_parsable())) {
6857 // Signal precleaning to redirty the card since
6858 // the klass pointer is already installed.
6859 assert(size == 0, "Initial value");
6860 } else {
6861 assert(p->is_parsable(), "must be parsable.");
6862 // an initialized object; ignore mark word in verification below 6848 // an initialized object; ignore mark word in verification below
6863 // since we are running concurrent with mutators 6849 // since we are running concurrent with mutators
6864 assert(p->is_oop(true), "should be an oop"); 6850 assert(p->is_oop(true), "should be an oop");
6865 if (p->is_objArray()) { 6851 if (p->is_objArray()) {
6866 // objArrays are precisely marked; restrict scanning 6852 // objArrays are precisely marked; restrict scanning
6884 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1); 6870 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6885 assert(_bitMap->isMarked(addr+size-1), 6871 assert(_bitMap->isMarked(addr+size-1),
6886 "inconsistent Printezis mark"); 6872 "inconsistent Printezis mark");
6887 } 6873 }
6888 #endif // DEBUG 6874 #endif // DEBUG
6889 }
6890 } else { 6875 } else {
6891 // an unitialized object 6876 // an unitialized object
6892 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?"); 6877 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6893 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); 6878 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6894 size = pointer_delta(nextOneAddr + 1, addr); 6879 size = pointer_delta(nextOneAddr + 1, addr);
6898 // will dirty the card when the klass pointer is installed in the 6883 // will dirty the card when the klass pointer is installed in the
6899 // object (signalling the completion of initialization). 6884 // object (signalling the completion of initialization).
6900 } 6885 }
6901 } else { 6886 } else {
6902 // Either a not yet marked object or an uninitialized object 6887 // Either a not yet marked object or an uninitialized object
6903 if (p->klass_or_null() == NULL || !p->is_parsable()) { 6888 if (p->klass_or_null() == NULL) {
6904 // An uninitialized object, skip to the next card, since 6889 // An uninitialized object, skip to the next card, since
6905 // we may not be able to read its P-bits yet. 6890 // we may not be able to read its P-bits yet.
6906 assert(size == 0, "Initial value"); 6891 assert(size == 0, "Initial value");
6907 } else { 6892 } else {
6908 // An object not (yet) reached by marking: we merely need to 6893 // An object not (yet) reached by marking: we merely need to
6918 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() { 6903 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6919 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6904 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6920 "CMS thread should hold CMS token"); 6905 "CMS thread should hold CMS token");
6921 assert_lock_strong(_freelistLock); 6906 assert_lock_strong(_freelistLock);
6922 assert_lock_strong(_bitMap->lock()); 6907 assert_lock_strong(_bitMap->lock());
6923 DEBUG_ONLY(RememberKlassesChecker mux(false);)
6924 // relinquish the free_list_lock and bitMaplock() 6908 // relinquish the free_list_lock and bitMaplock()
6925 _bitMap->lock()->unlock(); 6909 _bitMap->lock()->unlock();
6926 _freelistLock->unlock(); 6910 _freelistLock->unlock();
6927 ConcurrentMarkSweepThread::desynchronize(true); 6911 ConcurrentMarkSweepThread::desynchronize(true);
6928 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6912 ConcurrentMarkSweepThread::acknowledge_yield_request();
6957 6941
6958 HeapWord* addr = (HeapWord*)p; 6942 HeapWord* addr = (HeapWord*)p;
6959 DEBUG_ONLY(_collector->verify_work_stacks_empty();) 6943 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6960 assert(!_span.contains(addr), "we are scanning the survivor spaces"); 6944 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6961 assert(p->klass_or_null() != NULL, "object should be initializd"); 6945 assert(p->klass_or_null() != NULL, "object should be initializd");
6962 assert(p->is_parsable(), "must be parsable.");
6963 // an initialized object; ignore mark word in verification below 6946 // an initialized object; ignore mark word in verification below
6964 // since we are running concurrent with mutators 6947 // since we are running concurrent with mutators
6965 assert(p->is_oop(true), "should be an oop"); 6948 assert(p->is_oop(true), "should be an oop");
6966 // Note that we do not yield while we iterate over 6949 // Note that we do not yield while we iterate over
6967 // the interior oops of p, pushing the relevant ones 6950 // the interior oops of p, pushing the relevant ones
6975 // good idea to abort immediately and complete the marking 6958 // good idea to abort immediately and complete the marking
6976 // from the grey objects at a later time. 6959 // from the grey objects at a later time.
6977 while (!_mark_stack->isEmpty()) { 6960 while (!_mark_stack->isEmpty()) {
6978 oop new_oop = _mark_stack->pop(); 6961 oop new_oop = _mark_stack->pop();
6979 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop"); 6962 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6980 assert(new_oop->is_parsable(), "Found unparsable oop");
6981 assert(_bit_map->isMarked((HeapWord*)new_oop), 6963 assert(_bit_map->isMarked((HeapWord*)new_oop),
6982 "only grey objects on this stack"); 6964 "only grey objects on this stack");
6983 // iterate over the oops in this oop, marking and pushing 6965 // iterate over the oops in this oop, marking and pushing
6984 // the ones in CMS heap (i.e. in _span). 6966 // the ones in CMS heap (i.e. in _span).
6985 new_oop->oop_iterate(_scanning_closure); 6967 new_oop->oop_iterate(_scanning_closure);
6995 6977
6996 void SurvivorSpacePrecleanClosure::do_yield_work() { 6978 void SurvivorSpacePrecleanClosure::do_yield_work() {
6997 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6979 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6998 "CMS thread should hold CMS token"); 6980 "CMS thread should hold CMS token");
6999 assert_lock_strong(_bit_map->lock()); 6981 assert_lock_strong(_bit_map->lock());
7000 DEBUG_ONLY(RememberKlassesChecker smx(false);)
7001 // Relinquish the bit map lock 6982 // Relinquish the bit map lock
7002 _bit_map->lock()->unlock(); 6983 _bit_map->lock()->unlock();
7003 ConcurrentMarkSweepThread::desynchronize(true); 6984 ConcurrentMarkSweepThread::desynchronize(true);
7004 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6985 ConcurrentMarkSweepThread::acknowledge_yield_request();
7005 _collector->stopTimer(); 6986 _collector->stopTimer();
7071 } 7052 }
7072 7053
7073 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector, 7054 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
7074 MemRegion span, 7055 MemRegion span,
7075 CMSBitMap* bitMap, CMSMarkStack* markStack, 7056 CMSBitMap* bitMap, CMSMarkStack* markStack,
7076 CMSMarkStack* revisitStack,
7077 bool should_yield, bool verifying): 7057 bool should_yield, bool verifying):
7078 _collector(collector), 7058 _collector(collector),
7079 _span(span), 7059 _span(span),
7080 _bitMap(bitMap), 7060 _bitMap(bitMap),
7081 _mut(&collector->_modUnionTable), 7061 _mut(&collector->_modUnionTable),
7082 _markStack(markStack), 7062 _markStack(markStack),
7083 _revisitStack(revisitStack),
7084 _yield(should_yield), 7063 _yield(should_yield),
7085 _skipBits(0) 7064 _skipBits(0)
7086 { 7065 {
7087 assert(_markStack->isEmpty(), "stack should be empty"); 7066 assert(_markStack->isEmpty(), "stack should be empty");
7088 _finger = _bitMap->startWord(); 7067 _finger = _bitMap->startWord();
7115 if (_bitMap->isMarked(addr+1)) { 7094 if (_bitMap->isMarked(addr+1)) {
7116 // this is an allocated but not yet initialized object 7095 // this is an allocated but not yet initialized object
7117 assert(_skipBits == 0, "tautology"); 7096 assert(_skipBits == 0, "tautology");
7118 _skipBits = 2; // skip next two marked bits ("Printezis-marks") 7097 _skipBits = 2; // skip next two marked bits ("Printezis-marks")
7119 oop p = oop(addr); 7098 oop p = oop(addr);
7120 if (p->klass_or_null() == NULL || !p->is_parsable()) { 7099 if (p->klass_or_null() == NULL) {
7121 DEBUG_ONLY(if (!_verifying) {) 7100 DEBUG_ONLY(if (!_verifying) {)
7122 // We re-dirty the cards on which this object lies and increase 7101 // We re-dirty the cards on which this object lies and increase
7123 // the _threshold so that we'll come back to scan this object 7102 // the _threshold so that we'll come back to scan this object
7124 // during the preclean or remark phase. (CMSCleanOnEnter) 7103 // during the preclean or remark phase. (CMSCleanOnEnter)
7125 if (CMSCleanOnEnter) { 7104 if (CMSCleanOnEnter) {
7158 // do this unlock/lock or modify the MutexUnlocker class to 7137 // do this unlock/lock or modify the MutexUnlocker class to
7159 // serve our purpose. XXX 7138 // serve our purpose. XXX
7160 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 7139 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7161 "CMS thread should hold CMS token"); 7140 "CMS thread should hold CMS token");
7162 assert_lock_strong(_bitMap->lock()); 7141 assert_lock_strong(_bitMap->lock());
7163 DEBUG_ONLY(RememberKlassesChecker mux(false);)
7164 _bitMap->lock()->unlock(); 7142 _bitMap->lock()->unlock();
7165 ConcurrentMarkSweepThread::desynchronize(true); 7143 ConcurrentMarkSweepThread::desynchronize(true);
7166 ConcurrentMarkSweepThread::acknowledge_yield_request(); 7144 ConcurrentMarkSweepThread::acknowledge_yield_request();
7167 _collector->stopTimer(); 7145 _collector->stopTimer();
7168 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); 7146 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7225 _threshold = (HeapWord*)round_to( 7203 _threshold = (HeapWord*)round_to(
7226 (intptr_t)_finger, CardTableModRefBS::card_size); 7204 (intptr_t)_finger, CardTableModRefBS::card_size);
7227 MemRegion mr(old_threshold, _threshold); 7205 MemRegion mr(old_threshold, _threshold);
7228 assert(!mr.is_empty(), "Control point invariant"); 7206 assert(!mr.is_empty(), "Control point invariant");
7229 assert(_span.contains(mr), "Should clear within span"); 7207 assert(_span.contains(mr), "Should clear within span");
7230 // XXX When _finger crosses from old gen into perm gen
7231 // we may be doing unnecessary cleaning; do better in the
7232 // future by detecting that condition and clearing fewer
7233 // MUT/CT entries.
7234 _mut->clear_range(mr); 7208 _mut->clear_range(mr);
7235 } 7209 }
7236 DEBUG_ONLY(}) 7210 DEBUG_ONLY(})
7237 // Note: the finger doesn't advance while we drain 7211 // Note: the finger doesn't advance while we drain
7238 // the stack below. 7212 // the stack below.
7239 PushOrMarkClosure pushOrMarkClosure(_collector, 7213 PushOrMarkClosure pushOrMarkClosure(_collector,
7240 _span, _bitMap, _markStack, 7214 _span, _bitMap, _markStack,
7241 _revisitStack,
7242 _finger, this); 7215 _finger, this);
7243 bool res = _markStack->push(obj); 7216 bool res = _markStack->push(obj);
7244 assert(res, "Empty non-zero size stack should have space for single push"); 7217 assert(res, "Empty non-zero size stack should have space for single push");
7245 while (!_markStack->isEmpty()) { 7218 while (!_markStack->isEmpty()) {
7246 oop new_oop = _markStack->pop(); 7219 oop new_oop = _markStack->pop();
7257 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task, 7230 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7258 CMSCollector* collector, MemRegion span, 7231 CMSCollector* collector, MemRegion span,
7259 CMSBitMap* bit_map, 7232 CMSBitMap* bit_map,
7260 OopTaskQueue* work_queue, 7233 OopTaskQueue* work_queue,
7261 CMSMarkStack* overflow_stack, 7234 CMSMarkStack* overflow_stack,
7262 CMSMarkStack* revisit_stack,
7263 bool should_yield): 7235 bool should_yield):
7264 _collector(collector), 7236 _collector(collector),
7265 _whole_span(collector->_span), 7237 _whole_span(collector->_span),
7266 _span(span), 7238 _span(span),
7267 _bit_map(bit_map), 7239 _bit_map(bit_map),
7268 _mut(&collector->_modUnionTable), 7240 _mut(&collector->_modUnionTable),
7269 _work_queue(work_queue), 7241 _work_queue(work_queue),
7270 _overflow_stack(overflow_stack), 7242 _overflow_stack(overflow_stack),
7271 _revisit_stack(revisit_stack),
7272 _yield(should_yield), 7243 _yield(should_yield),
7273 _skip_bits(0), 7244 _skip_bits(0),
7274 _task(task) 7245 _task(task)
7275 { 7246 {
7276 assert(_work_queue->size() == 0, "work_queue should be empty"); 7247 assert(_work_queue->size() == 0, "work_queue should be empty");
7294 if (_bit_map->isMarked(addr+1)) { 7265 if (_bit_map->isMarked(addr+1)) {
7295 // this is an allocated object that might not yet be initialized 7266 // this is an allocated object that might not yet be initialized
7296 assert(_skip_bits == 0, "tautology"); 7267 assert(_skip_bits == 0, "tautology");
7297 _skip_bits = 2; // skip next two marked bits ("Printezis-marks") 7268 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
7298 oop p = oop(addr); 7269 oop p = oop(addr);
7299 if (p->klass_or_null() == NULL || !p->is_parsable()) { 7270 if (p->klass_or_null() == NULL) {
7300 // in the case of Clean-on-Enter optimization, redirty card 7271 // in the case of Clean-on-Enter optimization, redirty card
7301 // and avoid clearing card by increasing the threshold. 7272 // and avoid clearing card by increasing the threshold.
7302 return true; 7273 return true;
7303 } 7274 }
7304 } 7275 }
7348 _threshold = (HeapWord*)round_to( 7319 _threshold = (HeapWord*)round_to(
7349 (intptr_t)_finger, CardTableModRefBS::card_size); 7320 (intptr_t)_finger, CardTableModRefBS::card_size);
7350 MemRegion mr(old_threshold, _threshold); 7321 MemRegion mr(old_threshold, _threshold);
7351 assert(!mr.is_empty(), "Control point invariant"); 7322 assert(!mr.is_empty(), "Control point invariant");
7352 assert(_span.contains(mr), "Should clear within span"); // _whole_span ?? 7323 assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7353 // XXX When _finger crosses from old gen into perm gen
7354 // we may be doing unnecessary cleaning; do better in the
7355 // future by detecting that condition and clearing fewer
7356 // MUT/CT entries.
7357 _mut->clear_range(mr); 7324 _mut->clear_range(mr);
7358 } 7325 }
7359 7326
7360 // Note: the local finger doesn't advance while we drain 7327 // Note: the local finger doesn't advance while we drain
7361 // the stack below, but the global finger sure can and will. 7328 // the stack below, but the global finger sure can and will.
7362 HeapWord** gfa = _task->global_finger_addr(); 7329 HeapWord** gfa = _task->global_finger_addr();
7363 Par_PushOrMarkClosure pushOrMarkClosure(_collector, 7330 Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7364 _span, _bit_map, 7331 _span, _bit_map,
7365 _work_queue, 7332 _work_queue,
7366 _overflow_stack, 7333 _overflow_stack,
7367 _revisit_stack,
7368 _finger, 7334 _finger,
7369 gfa, this); 7335 gfa, this);
7370 bool res = _work_queue->push(obj); // overflow could occur here 7336 bool res = _work_queue->push(obj); // overflow could occur here
7371 assert(res, "Will hold once we use workqueues"); 7337 assert(res, "Will hold once we use workqueues");
7372 while (true) { 7338 while (true) {
7459 7425
7460 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure( 7426 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7461 CMSCollector* collector, MemRegion span, 7427 CMSCollector* collector, MemRegion span,
7462 CMSBitMap* verification_bm, CMSBitMap* cms_bm, 7428 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7463 CMSMarkStack* mark_stack): 7429 CMSMarkStack* mark_stack):
7464 OopClosure(collector->ref_processor()), 7430 CMSOopClosure(collector->ref_processor()),
7465 _collector(collector), 7431 _collector(collector),
7466 _span(span), 7432 _span(span),
7467 _verification_bm(verification_bm), 7433 _verification_bm(verification_bm),
7468 _cms_bm(cms_bm), 7434 _cms_bm(cms_bm),
7469 _mark_stack(mark_stack) 7435 _mark_stack(mark_stack)
7511 } 7477 }
7512 7478
7513 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector, 7479 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7514 MemRegion span, 7480 MemRegion span,
7515 CMSBitMap* bitMap, CMSMarkStack* markStack, 7481 CMSBitMap* bitMap, CMSMarkStack* markStack,
7516 CMSMarkStack* revisitStack,
7517 HeapWord* finger, MarkFromRootsClosure* parent) : 7482 HeapWord* finger, MarkFromRootsClosure* parent) :
7518 KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack), 7483 CMSOopClosure(collector->ref_processor()),
7484 _collector(collector),
7519 _span(span), 7485 _span(span),
7520 _bitMap(bitMap), 7486 _bitMap(bitMap),
7521 _markStack(markStack), 7487 _markStack(markStack),
7522 _finger(finger), 7488 _finger(finger),
7523 _parent(parent) 7489 _parent(parent)
7526 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, 7492 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7527 MemRegion span, 7493 MemRegion span,
7528 CMSBitMap* bit_map, 7494 CMSBitMap* bit_map,
7529 OopTaskQueue* work_queue, 7495 OopTaskQueue* work_queue,
7530 CMSMarkStack* overflow_stack, 7496 CMSMarkStack* overflow_stack,
7531 CMSMarkStack* revisit_stack,
7532 HeapWord* finger, 7497 HeapWord* finger,
7533 HeapWord** global_finger_addr, 7498 HeapWord** global_finger_addr,
7534 Par_MarkFromRootsClosure* parent) : 7499 Par_MarkFromRootsClosure* parent) :
7535 Par_KlassRememberingOopClosure(collector, 7500 CMSOopClosure(collector->ref_processor()),
7536 collector->ref_processor(), 7501 _collector(collector),
7537 revisit_stack),
7538 _whole_span(collector->_span), 7502 _whole_span(collector->_span),
7539 _span(span), 7503 _span(span),
7540 _bit_map(bit_map), 7504 _bit_map(bit_map),
7541 _work_queue(work_queue), 7505 _work_queue(work_queue),
7542 _overflow_stack(overflow_stack), 7506 _overflow_stack(overflow_stack),
7578 // Remember the least grey address discarded 7542 // Remember the least grey address discarded
7579 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); 7543 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7580 _collector->lower_restart_addr(ra); 7544 _collector->lower_restart_addr(ra);
7581 _overflow_stack->reset(); // discard stack contents 7545 _overflow_stack->reset(); // discard stack contents
7582 _overflow_stack->expand(); // expand the stack if possible 7546 _overflow_stack->expand(); // expand the stack if possible
7547 }
7548
7549 void CMKlassClosure::do_klass(Klass* k) {
7550 assert(_oop_closure != NULL, "Not initialized?");
7551 k->oops_do(_oop_closure);
7583 } 7552 }
7584 7553
7585 void PushOrMarkClosure::do_oop(oop obj) { 7554 void PushOrMarkClosure::do_oop(oop obj) {
7586 // Ignore mark word because we are running concurrent with mutators. 7555 // Ignore mark word because we are running concurrent with mutators.
7587 assert(obj->is_oop_or_null(true), "expected an oop or NULL"); 7556 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7670 } 7639 }
7671 7640
7672 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 7641 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7673 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 7642 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7674 7643
7675 KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector,
7676 ReferenceProcessor* rp,
7677 CMSMarkStack* revisit_stack) :
7678 OopClosure(rp),
7679 _collector(collector),
7680 _revisit_stack(revisit_stack),
7681 _should_remember_klasses(collector->should_unload_classes()) {}
7682
7683 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, 7644 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7684 MemRegion span, 7645 MemRegion span,
7685 ReferenceProcessor* rp, 7646 ReferenceProcessor* rp,
7686 CMSBitMap* bit_map, 7647 CMSBitMap* bit_map,
7687 CMSBitMap* mod_union_table, 7648 CMSBitMap* mod_union_table,
7688 CMSMarkStack* mark_stack, 7649 CMSMarkStack* mark_stack,
7689 CMSMarkStack* revisit_stack,
7690 bool concurrent_precleaning): 7650 bool concurrent_precleaning):
7691 KlassRememberingOopClosure(collector, rp, revisit_stack), 7651 CMSOopClosure(rp),
7652 _collector(collector),
7692 _span(span), 7653 _span(span),
7693 _bit_map(bit_map), 7654 _bit_map(bit_map),
7694 _mod_union_table(mod_union_table), 7655 _mod_union_table(mod_union_table),
7695 _mark_stack(mark_stack), 7656 _mark_stack(mark_stack),
7696 _concurrent_precleaning(concurrent_precleaning) 7657 _concurrent_precleaning(concurrent_precleaning)
7757 7718
7758 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector, 7719 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7759 MemRegion span, 7720 MemRegion span,
7760 ReferenceProcessor* rp, 7721 ReferenceProcessor* rp,
7761 CMSBitMap* bit_map, 7722 CMSBitMap* bit_map,
7762 OopTaskQueue* work_queue, 7723 OopTaskQueue* work_queue):
7763 CMSMarkStack* revisit_stack): 7724 CMSOopClosure(rp),
7764 Par_KlassRememberingOopClosure(collector, rp, revisit_stack), 7725 _collector(collector),
7765 _span(span), 7726 _span(span),
7766 _bit_map(bit_map), 7727 _bit_map(bit_map),
7767 _work_queue(work_queue) 7728 _work_queue(work_queue)
7768 { 7729 {
7769 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); 7730 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7815 } 7776 }
7816 7777
7817 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } 7778 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7818 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } 7779 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7819 7780
7820 void PushAndMarkClosure::remember_mdo(DataLayout* v) {
7821 // TBD
7822 }
7823
7824 void Par_PushAndMarkClosure::remember_mdo(DataLayout* v) {
7825 // TBD
7826 }
7827
7828 void CMSPrecleanRefsYieldClosure::do_yield_work() { 7781 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7829 DEBUG_ONLY(RememberKlassesChecker mux(false);)
7830 Mutex* bml = _collector->bitMapLock(); 7782 Mutex* bml = _collector->bitMapLock();
7831 assert_lock_strong(bml); 7783 assert_lock_strong(bml);
7832 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 7784 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7833 "CMS thread should hold CMS token"); 7785 "CMS thread should hold CMS token");
7834 7786
8305 size = pointer_delta(nextOneAddr + 1, addr); 8257 size = pointer_delta(nextOneAddr + 1, addr);
8306 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), 8258 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8307 "alignment problem"); 8259 "alignment problem");
8308 8260
8309 #ifdef DEBUG 8261 #ifdef DEBUG
8310 if (oop(addr)->klass_or_null() != NULL && 8262 if (oop(addr)->klass_or_null() != NULL) {
8311 ( !_collector->should_unload_classes()
8312 || (oop(addr)->is_parsable()) &&
8313 oop(addr)->is_conc_safe())) {
8314 // Ignore mark word because we are running concurrent with mutators 8263 // Ignore mark word because we are running concurrent with mutators
8315 assert(oop(addr)->is_oop(true), "live block should be an oop"); 8264 assert(oop(addr)->is_oop(true), "live block should be an oop");
8316 // is_conc_safe is checked before performing this assertion
8317 // because an object that is not is_conc_safe may yet have
8318 // the return from size() correct.
8319 assert(size == 8265 assert(size ==
8320 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()), 8266 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8321 "P-mark and computed size do not agree"); 8267 "P-mark and computed size do not agree");
8322 } 8268 }
8323 #endif 8269 #endif
8324 8270
8325 } else { 8271 } else {
8326 // This should be an initialized object that's alive. 8272 // This should be an initialized object that's alive.
8327 assert(oop(addr)->klass_or_null() != NULL && 8273 assert(oop(addr)->klass_or_null() != NULL,
8328 (!_collector->should_unload_classes()
8329 || oop(addr)->is_parsable()),
8330 "Should be an initialized object"); 8274 "Should be an initialized object");
8331 // Note that there are objects used during class redefinition,
8332 // e.g. merge_cp in VM_RedefineClasses::merge_cp_and_rewrite(),
8333 // which are discarded with their is_conc_safe state still
8334 // false. These object may be floating garbage so may be
8335 // seen here. If they are floating garbage their size
8336 // should be attainable from their klass. Do not that
8337 // is_conc_safe() is true for oop(addr).
8338 // Ignore mark word because we are running concurrent with mutators 8275 // Ignore mark word because we are running concurrent with mutators
8339 assert(oop(addr)->is_oop(true), "live block should be an oop"); 8276 assert(oop(addr)->is_oop(true), "live block should be an oop");
8340 // Verify that the bit map has no bits marked between 8277 // Verify that the bit map has no bits marked between
8341 // addr and purported end of this block. 8278 // addr and purported end of this block.
8342 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); 8279 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8574 HeapWord* addr = (HeapWord*)obj; 8511 HeapWord* addr = (HeapWord*)obj;
8575 return addr != NULL && 8512 return addr != NULL &&
8576 (!_span.contains(addr) || _bit_map->isMarked(addr)); 8513 (!_span.contains(addr) || _bit_map->isMarked(addr));
8577 } 8514 }
8578 8515
8516
8579 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector, 8517 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8580 MemRegion span, 8518 MemRegion span,
8581 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 8519 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8582 CMSMarkStack* revisit_stack, bool cpc): 8520 bool cpc):
8583 KlassRememberingOopClosure(collector, NULL, revisit_stack), 8521 _collector(collector),
8584 _span(span), 8522 _span(span),
8585 _bit_map(bit_map), 8523 _bit_map(bit_map),
8586 _mark_stack(mark_stack), 8524 _mark_stack(mark_stack),
8587 _concurrent_precleaning(cpc) { 8525 _concurrent_precleaning(cpc) {
8588 assert(!_span.is_empty(), "Empty span could spell trouble"); 8526 assert(!_span.is_empty(), "Empty span could spell trouble");
8673 } 8611 }
8674 8612
8675 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure( 8613 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8676 CMSCollector* collector, 8614 CMSCollector* collector,
8677 MemRegion span, CMSBitMap* bit_map, 8615 MemRegion span, CMSBitMap* bit_map,
8678 CMSMarkStack* revisit_stack,
8679 OopTaskQueue* work_queue): 8616 OopTaskQueue* work_queue):
8680 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), 8617 _collector(collector),
8681 _span(span), 8618 _span(span),
8682 _bit_map(bit_map), 8619 _bit_map(bit_map),
8683 _work_queue(work_queue) { } 8620 _work_queue(work_queue) { }
8684 8621
8685 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) { 8622 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {