comparison src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @ 360:5d254928c888

Merge
author ysr
date Wed, 27 Aug 2008 11:20:46 -0700
parents ebeb6490b814 1ee8caae33af
children 00b023ae2d78
comparison
equal deleted inserted replaced
341:d60e4e6d7f72 360:5d254928c888
2759 bool _failed; 2759 bool _failed;
2760 2760
2761 public: 2761 public:
2762 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {} 2762 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2763 2763
2764 void do_bit(size_t offset) { 2764 bool do_bit(size_t offset) {
2765 HeapWord* addr = _marks->offsetToHeapWord(offset); 2765 HeapWord* addr = _marks->offsetToHeapWord(offset);
2766 if (!_marks->isMarked(addr)) { 2766 if (!_marks->isMarked(addr)) {
2767 oop(addr)->print(); 2767 oop(addr)->print();
2768 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr); 2768 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2769 _failed = true; 2769 _failed = true;
2770 } 2770 }
2771 return true;
2771 } 2772 }
2772 2773
2773 bool failed() { return _failed; } 2774 bool failed() { return _failed; }
2774 }; 2775 };
2775 2776
4667 stopTimer(); 4668 stopTimer();
4668 CMSTokenSync x(true); // is cms thread 4669 CMSTokenSync x(true); // is cms thread
4669 startTimer(); 4670 startTimer();
4670 sample_eden(); 4671 sample_eden();
4671 // Get and clear dirty region from card table 4672 // Get and clear dirty region from card table
4672 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_preclean( 4673 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4673 MemRegion(nextAddr, endAddr)); 4674 MemRegion(nextAddr, endAddr),
4675 true,
4676 CardTableModRefBS::precleaned_card_val());
4677
4674 assert(dirtyRegion.start() >= nextAddr, 4678 assert(dirtyRegion.start() >= nextAddr,
4675 "returned region inconsistent?"); 4679 "returned region inconsistent?");
4676 } 4680 }
4677 lastAddr = dirtyRegion.end(); 4681 lastAddr = dirtyRegion.end();
4678 numDirtyCards = 4682 numDirtyCards =
5436 NULL, // space is set further below 5440 NULL, // space is set further below
5437 &_markBitMap, &_markStack, &_revisitStack, 5441 &_markBitMap, &_markStack, &_revisitStack,
5438 &mrias_cl); 5442 &mrias_cl);
5439 { 5443 {
5440 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty); 5444 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
5441 // Iterate over the dirty cards, marking them precleaned, and 5445 // Iterate over the dirty cards, setting the corresponding bits in the
5442 // setting the corresponding bits in the mod union table. 5446 // mod union table.
5443 { 5447 {
5444 ModUnionClosure modUnionClosure(&_modUnionTable); 5448 ModUnionClosure modUnionClosure(&_modUnionTable);
5445 _ct->ct_bs()->dirty_card_iterate( 5449 _ct->ct_bs()->dirty_card_iterate(
5446 _cmsGen->used_region(), 5450 _cmsGen->used_region(),
5447 &modUnionClosure); 5451 &modUnionClosure);
6209 6213
6210 // Construct a CMS bit map infrastructure, but don't create the 6214 // Construct a CMS bit map infrastructure, but don't create the
6211 // bit vector itself. That is done by a separate call CMSBitMap::allocate() 6215 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6212 // further below. 6216 // further below.
6213 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name): 6217 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6214 _bm(NULL,0), 6218 _bm(),
6215 _shifter(shifter), 6219 _shifter(shifter),
6216 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL) 6220 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6217 { 6221 {
6218 _bmStartWord = 0; 6222 _bmStartWord = 0;
6219 _bmWordSize = 0; 6223 _bmWordSize = 0;
6234 warning("CMS bit map backing store failure"); 6238 warning("CMS bit map backing store failure");
6235 return false; 6239 return false;
6236 } 6240 }
6237 assert(_virtual_space.committed_size() == brs.size(), 6241 assert(_virtual_space.committed_size() == brs.size(),
6238 "didn't reserve backing store for all of CMS bit map?"); 6242 "didn't reserve backing store for all of CMS bit map?");
6239 _bm.set_map((uintptr_t*)_virtual_space.low()); 6243 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6240 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= 6244 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6241 _bmWordSize, "inconsistency in bit map sizing"); 6245 _bmWordSize, "inconsistency in bit map sizing");
6242 _bm.set_size(_bmWordSize >> _shifter); 6246 _bm.set_size(_bmWordSize >> _shifter);
6243 6247
6244 // bm.clear(); // can we rely on getting zero'd memory? verify below 6248 // bm.clear(); // can we rely on getting zero'd memory? verify below
6872 (intptr_t)_finger, CardTableModRefBS::card_size); 6876 (intptr_t)_finger, CardTableModRefBS::card_size);
6873 } 6877 }
6874 6878
6875 // Should revisit to see if this should be restructured for 6879 // Should revisit to see if this should be restructured for
6876 // greater efficiency. 6880 // greater efficiency.
6877 void MarkFromRootsClosure::do_bit(size_t offset) { 6881 bool MarkFromRootsClosure::do_bit(size_t offset) {
6878 if (_skipBits > 0) { 6882 if (_skipBits > 0) {
6879 _skipBits--; 6883 _skipBits--;
6880 return; 6884 return true;
6881 } 6885 }
6882 // convert offset into a HeapWord* 6886 // convert offset into a HeapWord*
6883 HeapWord* addr = _bitMap->startWord() + offset; 6887 HeapWord* addr = _bitMap->startWord() + offset;
6884 assert(_bitMap->endWord() && addr < _bitMap->endWord(), 6888 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6885 "address out of range"); 6889 "address out of range");
6913 // Redirty the range of cards... 6917 // Redirty the range of cards...
6914 _mut->mark_range(redirty_range); 6918 _mut->mark_range(redirty_range);
6915 } // ...else the setting of klass will dirty the card anyway. 6919 } // ...else the setting of klass will dirty the card anyway.
6916 } 6920 }
6917 DEBUG_ONLY(}) 6921 DEBUG_ONLY(})
6918 return; 6922 return true;
6919 } 6923 }
6920 } 6924 }
6921 scanOopsInOop(addr); 6925 scanOopsInOop(addr);
6926 return true;
6922 } 6927 }
6923 6928
6924 // We take a break if we've been at this for a while, 6929 // We take a break if we've been at this for a while,
6925 // so as to avoid monopolizing the locks involved. 6930 // so as to avoid monopolizing the locks involved.
6926 void MarkFromRootsClosure::do_yield_work() { 6931 void MarkFromRootsClosure::do_yield_work() {
7050 assert(_span.contains(_finger), "Out of bounds _finger?"); 7055 assert(_span.contains(_finger), "Out of bounds _finger?");
7051 } 7056 }
7052 7057
7053 // Should revisit to see if this should be restructured for 7058 // Should revisit to see if this should be restructured for
7054 // greater efficiency. 7059 // greater efficiency.
7055 void Par_MarkFromRootsClosure::do_bit(size_t offset) { 7060 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7056 if (_skip_bits > 0) { 7061 if (_skip_bits > 0) {
7057 _skip_bits--; 7062 _skip_bits--;
7058 return; 7063 return true;
7059 } 7064 }
7060 // convert offset into a HeapWord* 7065 // convert offset into a HeapWord*
7061 HeapWord* addr = _bit_map->startWord() + offset; 7066 HeapWord* addr = _bit_map->startWord() + offset;
7062 assert(_bit_map->endWord() && addr < _bit_map->endWord(), 7067 assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7063 "address out of range"); 7068 "address out of range");
7068 _skip_bits = 2; // skip next two marked bits ("Printezis-marks") 7073 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
7069 oop p = oop(addr); 7074 oop p = oop(addr);
7070 if (p->klass_or_null() == NULL || !p->is_parsable()) { 7075 if (p->klass_or_null() == NULL || !p->is_parsable()) {
7071 // in the case of Clean-on-Enter optimization, redirty card 7076 // in the case of Clean-on-Enter optimization, redirty card
7072 // and avoid clearing card by increasing the threshold. 7077 // and avoid clearing card by increasing the threshold.
7073 return; 7078 return true;
7074 } 7079 }
7075 } 7080 }
7076 scan_oops_in_oop(addr); 7081 scan_oops_in_oop(addr);
7082 return true;
7077 } 7083 }
7078 7084
7079 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) { 7085 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7080 assert(_bit_map->isMarked(ptr), "expected bit to be set"); 7086 assert(_bit_map->isMarked(ptr), "expected bit to be set");
7081 // Should we assert that our work queue is empty or 7087 // Should we assert that our work queue is empty or
7194 _finger = addr; 7200 _finger = addr;
7195 } 7201 }
7196 7202
7197 // Should revisit to see if this should be restructured for 7203 // Should revisit to see if this should be restructured for
7198 // greater efficiency. 7204 // greater efficiency.
7199 void MarkFromRootsVerifyClosure::do_bit(size_t offset) { 7205 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7200 // convert offset into a HeapWord* 7206 // convert offset into a HeapWord*
7201 HeapWord* addr = _verification_bm->startWord() + offset; 7207 HeapWord* addr = _verification_bm->startWord() + offset;
7202 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(), 7208 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7203 "address out of range"); 7209 "address out of range");
7204 assert(_verification_bm->isMarked(addr), "tautology"); 7210 assert(_verification_bm->isMarked(addr), "tautology");
7222 assert(new_oop->is_oop(), "Oops! expected to pop an oop"); 7228 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7223 // now scan this oop's oops 7229 // now scan this oop's oops
7224 new_oop->oop_iterate(&_pam_verify_closure); 7230 new_oop->oop_iterate(&_pam_verify_closure);
7225 } 7231 }
7226 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition"); 7232 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7233 return true;
7227 } 7234 }
7228 7235
7229 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure( 7236 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7230 CMSCollector* collector, MemRegion span, 7237 CMSCollector* collector, MemRegion span,
7231 CMSBitMap* verification_bm, CMSBitMap* cms_bm, 7238 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7467 } 7474 }
7468 7475
7469 // Grey object rescan during pre-cleaning and second checkpoint phases -- 7476 // Grey object rescan during pre-cleaning and second checkpoint phases --
7470 // the non-parallel version (the parallel version appears further below.) 7477 // the non-parallel version (the parallel version appears further below.)
7471 void PushAndMarkClosure::do_oop(oop obj) { 7478 void PushAndMarkClosure::do_oop(oop obj) {
7472 // If _concurrent_precleaning, ignore mark word verification 7479 // Ignore mark word verification. If during concurrent precleaning,
7473 assert(obj->is_oop_or_null(_concurrent_precleaning), 7480 // the object monitor may be locked. If during the checkpoint
7481 // phases, the object may already have been reached by a different
7482 // path and may be at the end of the global overflow list (so
7483 // the mark word may be NULL).
7484 assert(obj->is_oop_or_null(true /* ignore mark word */),
7474 "expected an oop or NULL"); 7485 "expected an oop or NULL");
7475 HeapWord* addr = (HeapWord*)obj; 7486 HeapWord* addr = (HeapWord*)obj;
7476 // Check if oop points into the CMS generation 7487 // Check if oop points into the CMS generation
7477 // and is not marked 7488 // and is not marked
7478 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { 7489 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {