comparison src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @ 113:ba764ed4b6f2

6420645: Create a vm that uses compressed oops for up to 32gb heapsizes Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
author coleenp
date Sun, 13 Apr 2008 17:43:42 -0400
parents 0834225a7916
children b5489bb705c9
comparison
equal deleted inserted replaced
110:a49a647afe9a 113:ba764ed4b6f2
1224 } 1224 }
1225 1225
1226 return NULL; 1226 return NULL;
1227 } 1227 }
1228 1228
1229 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) { 1229 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1230 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); 1230 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1231 // allocate, copy and if necessary update promoinfo -- 1231 // allocate, copy and if necessary update promoinfo --
1232 // delegate to underlying space. 1232 // delegate to underlying space.
1233 assert_lock_strong(freelistLock()); 1233 assert_lock_strong(freelistLock());
1234 1234
1236 if (Universe::heap()->promotion_should_fail()) { 1236 if (Universe::heap()->promotion_should_fail()) {
1237 return NULL; 1237 return NULL;
1238 } 1238 }
1239 #endif // #ifndef PRODUCT 1239 #endif // #ifndef PRODUCT
1240 1240
1241 oop res = _cmsSpace->promote(obj, obj_size, ref); 1241 oop res = _cmsSpace->promote(obj, obj_size);
1242 if (res == NULL) { 1242 if (res == NULL) {
1243 // expand and retry 1243 // expand and retry
1244 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords 1244 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1245 expand(s*HeapWordSize, MinHeapDeltaBytes, 1245 expand(s*HeapWordSize, MinHeapDeltaBytes,
1246 CMSExpansionCause::_satisfy_promotion); 1246 CMSExpansionCause::_satisfy_promotion);
1247 // Since there's currently no next generation, we don't try to promote 1247 // Since there's currently no next generation, we don't try to promote
1248 // into a more senior generation. 1248 // into a more senior generation.
1249 assert(next_gen() == NULL, "assumption, based upon which no attempt " 1249 assert(next_gen() == NULL, "assumption, based upon which no attempt "
1250 "is made to pass on a possibly failing " 1250 "is made to pass on a possibly failing "
1251 "promotion to next generation"); 1251 "promotion to next generation");
1252 res = _cmsSpace->promote(obj, obj_size, ref); 1252 res = _cmsSpace->promote(obj, obj_size);
1253 } 1253 }
1254 if (res != NULL) { 1254 if (res != NULL) {
1255 // See comment in allocate() about when objects should 1255 // See comment in allocate() about when objects should
1256 // be allocated live. 1256 // be allocated live.
1257 assert(obj->is_oop(), "Will dereference klass pointer below"); 1257 assert(obj->is_oop(), "Will dereference klass pointer below");
3920 // task. 3920 // task.
3921 pst->all_tasks_completed(); 3921 pst->all_tasks_completed();
3922 } 3922 }
3923 3923
3924 class Par_ConcMarkingClosure: public OopClosure { 3924 class Par_ConcMarkingClosure: public OopClosure {
3925 private:
3925 CMSCollector* _collector; 3926 CMSCollector* _collector;
3926 MemRegion _span; 3927 MemRegion _span;
3927 CMSBitMap* _bit_map; 3928 CMSBitMap* _bit_map;
3928 CMSMarkStack* _overflow_stack; 3929 CMSMarkStack* _overflow_stack;
3929 CMSMarkStack* _revisit_stack; // XXXXXX Check proper use 3930 CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
3930 OopTaskQueue* _work_queue; 3931 OopTaskQueue* _work_queue;
3931 3932 protected:
3933 DO_OOP_WORK_DEFN
3932 public: 3934 public:
3933 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue, 3935 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
3934 CMSBitMap* bit_map, CMSMarkStack* overflow_stack): 3936 CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3935 _collector(collector), 3937 _collector(collector),
3936 _span(_collector->_span), 3938 _span(_collector->_span),
3937 _work_queue(work_queue), 3939 _work_queue(work_queue),
3938 _bit_map(bit_map), 3940 _bit_map(bit_map),
3939 _overflow_stack(overflow_stack) { } // need to initialize revisit stack etc. 3941 _overflow_stack(overflow_stack) { } // need to initialize revisit stack etc.
3940 3942 virtual void do_oop(oop* p);
3941 void do_oop(oop* p); 3943 virtual void do_oop(narrowOop* p);
3942 void trim_queue(size_t max); 3944 void trim_queue(size_t max);
3943 void handle_stack_overflow(HeapWord* lost); 3945 void handle_stack_overflow(HeapWord* lost);
3944 }; 3946 };
3945 3947
3946 // Grey object rescan during work stealing phase -- 3948 // Grey object rescan during work stealing phase --
3947 // the salient assumption here is that stolen oops must 3949 // the salient assumption here is that stolen oops must
3948 // always be initialized, so we do not need to check for 3950 // always be initialized, so we do not need to check for
3949 // uninitialized objects before scanning here. 3951 // uninitialized objects before scanning here.
3950 void Par_ConcMarkingClosure::do_oop(oop* p) { 3952 void Par_ConcMarkingClosure::do_oop(oop obj) {
3951 oop this_oop = *p; 3953 assert(obj->is_oop_or_null(), "expected an oop or NULL");
3952 assert(this_oop->is_oop_or_null(), 3954 HeapWord* addr = (HeapWord*)obj;
3953 "expected an oop or NULL");
3954 HeapWord* addr = (HeapWord*)this_oop;
3955 // Check if oop points into the CMS generation 3955 // Check if oop points into the CMS generation
3956 // and is not marked 3956 // and is not marked
3957 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { 3957 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3958 // a white object ... 3958 // a white object ...
3959 // If we manage to "claim" the object, by being the 3959 // If we manage to "claim" the object, by being the
3968 // simulate a stack overflow 3968 // simulate a stack overflow
3969 simulate_overflow = true; 3969 simulate_overflow = true;
3970 } 3970 }
3971 ) 3971 )
3972 if (simulate_overflow || 3972 if (simulate_overflow ||
3973 !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) { 3973 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3974 // stack overflow 3974 // stack overflow
3975 if (PrintCMSStatistics != 0) { 3975 if (PrintCMSStatistics != 0) {
3976 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " 3976 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
3977 SIZE_FORMAT, _overflow_stack->capacity()); 3977 SIZE_FORMAT, _overflow_stack->capacity());
3978 } 3978 }
3984 handle_stack_overflow(addr); 3984 handle_stack_overflow(addr);
3985 } 3985 }
3986 } // Else, some other thread got there first 3986 } // Else, some other thread got there first
3987 } 3987 }
3988 } 3988 }
3989
3990 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
3991 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
3989 3992
3990 void Par_ConcMarkingClosure::trim_queue(size_t max) { 3993 void Par_ConcMarkingClosure::trim_queue(size_t max) {
3991 while (_work_queue->size() > max) { 3994 while (_work_queue->size() > max) {
3992 oop new_oop; 3995 oop new_oop;
3993 if (_work_queue->pop_local(new_oop)) { 3996 if (_work_queue->pop_local(new_oop)) {
4084 // are almost identical into one for better maintenability and 4087 // are almost identical into one for better maintenability and
4085 // readability. See 6445193. 4088 // readability. See 6445193.
4086 // 4089 //
4087 // Tony 2006.06.29 4090 // Tony 2006.06.29
4088 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount && 4091 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4089 ConcurrentMarkSweepThread::should_yield() && 4092 ConcurrentMarkSweepThread::should_yield() &&
4090 !CMSCollector::foregroundGCIsActive(); ++i) { 4093 !CMSCollector::foregroundGCIsActive(); ++i) {
4091 os::sleep(Thread::current(), 1, false); 4094 os::sleep(Thread::current(), 1, false);
4092 ConcurrentMarkSweepThread::acknowledge_yield_request(); 4095 ConcurrentMarkSweepThread::acknowledge_yield_request();
4093 } 4096 }
4094 4097
4095 ConcurrentMarkSweepThread::synchronize(true); 4098 ConcurrentMarkSweepThread::synchronize(true);
6046 } 6049 }
6047 icms_wait(); 6050 icms_wait();
6048 6051
6049 // See the comment in coordinator_yield() 6052 // See the comment in coordinator_yield()
6050 for (unsigned i = 0; i < CMSYieldSleepCount && 6053 for (unsigned i = 0; i < CMSYieldSleepCount &&
6051 ConcurrentMarkSweepThread::should_yield() && 6054 ConcurrentMarkSweepThread::should_yield() &&
6052 !CMSCollector::foregroundGCIsActive(); ++i) { 6055 !CMSCollector::foregroundGCIsActive(); ++i) {
6053 os::sleep(Thread::current(), 1, false); 6056 os::sleep(Thread::current(), 1, false);
6054 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6057 ConcurrentMarkSweepThread::acknowledge_yield_request();
6055 } 6058 }
6056 6059
6057 ConcurrentMarkSweepThread::synchronize(true); 6060 ConcurrentMarkSweepThread::synchronize(true);
6360 { 6363 {
6361 assert(_ref_processor == NULL, "deliberately left NULL"); 6364 assert(_ref_processor == NULL, "deliberately left NULL");
6362 assert(_bitMap->covers(_span), "_bitMap/_span mismatch"); 6365 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6363 } 6366 }
6364 6367
6365 void MarkRefsIntoClosure::do_oop(oop* p) { 6368 void MarkRefsIntoClosure::do_oop(oop obj) {
6366 // if p points into _span, then mark corresponding bit in _markBitMap 6369 // if p points into _span, then mark corresponding bit in _markBitMap
6367 oop thisOop = *p; 6370 assert(obj->is_oop(), "expected an oop");
6368 if (thisOop != NULL) { 6371 HeapWord* addr = (HeapWord*)obj;
6369 assert(thisOop->is_oop(), "expected an oop"); 6372 if (_span.contains(addr)) {
6370 HeapWord* addr = (HeapWord*)thisOop; 6373 // this should be made more efficient
6371 if (_span.contains(addr)) { 6374 _bitMap->mark(addr);
6372 // this should be made more efficient 6375 }
6373 _bitMap->mark(addr); 6376 }
6374 } 6377
6375 } 6378 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6376 } 6379 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6377 6380
6378 // A variant of the above, used for CMS marking verification. 6381 // A variant of the above, used for CMS marking verification.
6379 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure( 6382 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6380 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm, 6383 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6381 bool should_do_nmethods): 6384 bool should_do_nmethods):
6385 _should_do_nmethods(should_do_nmethods) { 6388 _should_do_nmethods(should_do_nmethods) {
6386 assert(_ref_processor == NULL, "deliberately left NULL"); 6389 assert(_ref_processor == NULL, "deliberately left NULL");
6387 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch"); 6390 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6388 } 6391 }
6389 6392
6390 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { 6393 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6391 // if p points into _span, then mark corresponding bit in _markBitMap 6394 // if p points into _span, then mark corresponding bit in _markBitMap
6392 oop this_oop = *p; 6395 assert(obj->is_oop(), "expected an oop");
6393 if (this_oop != NULL) { 6396 HeapWord* addr = (HeapWord*)obj;
6394 assert(this_oop->is_oop(), "expected an oop"); 6397 if (_span.contains(addr)) {
6395 HeapWord* addr = (HeapWord*)this_oop; 6398 _verification_bm->mark(addr);
6396 if (_span.contains(addr)) { 6399 if (!_cms_bm->isMarked(addr)) {
6397 _verification_bm->mark(addr); 6400 oop(addr)->print();
6398 if (!_cms_bm->isMarked(addr)) { 6401 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6399 oop(addr)->print(); 6402 fatal("... aborting");
6400 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr); 6403 }
6401 fatal("... aborting"); 6404 }
6402 } 6405 }
6403 } 6406
6404 } 6407 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6405 } 6408 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6406 6409
6407 ////////////////////////////////////////////////// 6410 //////////////////////////////////////////////////
6408 // MarkRefsIntoAndScanClosure 6411 // MarkRefsIntoAndScanClosure
6409 ////////////////////////////////////////////////// 6412 //////////////////////////////////////////////////
6410 6413
6436 // the unmarked oops. It is also used during the concurrent precleaning 6439 // the unmarked oops. It is also used during the concurrent precleaning
6437 // phase while scanning objects on dirty cards in the CMS generation. 6440 // phase while scanning objects on dirty cards in the CMS generation.
6438 // The marks are made in the marking bit map and the marking stack is 6441 // The marks are made in the marking bit map and the marking stack is
6439 // used for keeping the (newly) grey objects during the scan. 6442 // used for keeping the (newly) grey objects during the scan.
6440 // The parallel version (Par_...) appears further below. 6443 // The parallel version (Par_...) appears further below.
6441 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { 6444 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6442 oop this_oop = *p; 6445 if (obj != NULL) {
6443 if (this_oop != NULL) { 6446 assert(obj->is_oop(), "expected an oop");
6444 assert(this_oop->is_oop(), "expected an oop"); 6447 HeapWord* addr = (HeapWord*)obj;
6445 HeapWord* addr = (HeapWord*)this_oop; 6448 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6446 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); 6449 assert(_collector->overflow_list_is_empty(),
6447 assert(_collector->overflow_list_is_empty(), "should be empty"); 6450 "overflow list should be empty");
6448 if (_span.contains(addr) && 6451 if (_span.contains(addr) &&
6449 !_bit_map->isMarked(addr)) { 6452 !_bit_map->isMarked(addr)) {
6450 // mark bit map (object is now grey) 6453 // mark bit map (object is now grey)
6451 _bit_map->mark(addr); 6454 _bit_map->mark(addr);
6452 // push on marking stack (stack should be empty), and drain the 6455 // push on marking stack (stack should be empty), and drain the
6453 // stack by applying this closure to the oops in the oops popped 6456 // stack by applying this closure to the oops in the oops popped
6454 // from the stack (i.e. blacken the grey objects) 6457 // from the stack (i.e. blacken the grey objects)
6455 bool res = _mark_stack->push(this_oop); 6458 bool res = _mark_stack->push(obj);
6456 assert(res, "Should have space to push on empty stack"); 6459 assert(res, "Should have space to push on empty stack");
6457 do { 6460 do {
6458 oop new_oop = _mark_stack->pop(); 6461 oop new_oop = _mark_stack->pop();
6459 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop"); 6462 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6460 assert(new_oop->is_parsable(), "Found unparsable oop"); 6463 assert(new_oop->is_parsable(), "Found unparsable oop");
6486 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(), 6489 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6487 "All preserved marks should have been restored above"); 6490 "All preserved marks should have been restored above");
6488 } 6491 }
6489 } 6492 }
6490 6493
6494 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6495 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6496
6491 void MarkRefsIntoAndScanClosure::do_yield_work() { 6497 void MarkRefsIntoAndScanClosure::do_yield_work() {
6492 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6498 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6493 "CMS thread should hold CMS token"); 6499 "CMS thread should hold CMS token");
6494 assert_lock_strong(_freelistLock); 6500 assert_lock_strong(_freelistLock);
6495 assert_lock_strong(_bit_map->lock()); 6501 assert_lock_strong(_bit_map->lock());
6504 _collector->incrementYields(); 6510 _collector->incrementYields();
6505 } 6511 }
6506 _collector->icms_wait(); 6512 _collector->icms_wait();
6507 6513
6508 // See the comment in coordinator_yield() 6514 // See the comment in coordinator_yield()
6509 for (unsigned i = 0; i < CMSYieldSleepCount && 6515 for (unsigned i = 0;
6510 ConcurrentMarkSweepThread::should_yield() && 6516 i < CMSYieldSleepCount &&
6511 !CMSCollector::foregroundGCIsActive(); ++i) { 6517 ConcurrentMarkSweepThread::should_yield() &&
6518 !CMSCollector::foregroundGCIsActive();
6519 ++i) {
6512 os::sleep(Thread::current(), 1, false); 6520 os::sleep(Thread::current(), 1, false);
6513 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6521 ConcurrentMarkSweepThread::acknowledge_yield_request();
6514 } 6522 }
6515 6523
6516 ConcurrentMarkSweepThread::synchronize(true); 6524 ConcurrentMarkSweepThread::synchronize(true);
6543 // the unmarked oops. The marks are made in the marking bit map and 6551 // the unmarked oops. The marks are made in the marking bit map and
6544 // the work_queue is used for keeping the (newly) grey objects during 6552 // the work_queue is used for keeping the (newly) grey objects during
6545 // the scan phase whence they are also available for stealing by parallel 6553 // the scan phase whence they are also available for stealing by parallel
6546 // threads. Since the marking bit map is shared, updates are 6554 // threads. Since the marking bit map is shared, updates are
6547 // synchronized (via CAS). 6555 // synchronized (via CAS).
6548 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { 6556 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6549 oop this_oop = *p; 6557 if (obj != NULL) {
6550 if (this_oop != NULL) {
6551 // Ignore mark word because this could be an already marked oop 6558 // Ignore mark word because this could be an already marked oop
6552 // that may be chained at the end of the overflow list. 6559 // that may be chained at the end of the overflow list.
6553 assert(this_oop->is_oop(true /* ignore mark word */), "expected an oop"); 6560 assert(obj->is_oop(), "expected an oop");
6554 HeapWord* addr = (HeapWord*)this_oop; 6561 HeapWord* addr = (HeapWord*)obj;
6555 if (_span.contains(addr) && 6562 if (_span.contains(addr) &&
6556 !_bit_map->isMarked(addr)) { 6563 !_bit_map->isMarked(addr)) {
6557 // mark bit map (object will become grey): 6564 // mark bit map (object will become grey):
6558 // It is possible for several threads to be 6565 // It is possible for several threads to be
6559 // trying to "claim" this object concurrently; 6566 // trying to "claim" this object concurrently;
6563 if (_bit_map->par_mark(addr)) { 6570 if (_bit_map->par_mark(addr)) {
6564 // push on work_queue (which may not be empty), and trim the 6571 // push on work_queue (which may not be empty), and trim the
6565 // queue to an appropriate length by applying this closure to 6572 // queue to an appropriate length by applying this closure to
6566 // the oops in the oops popped from the stack (i.e. blacken the 6573 // the oops in the oops popped from the stack (i.e. blacken the
6567 // grey objects) 6574 // grey objects)
6568 bool res = _work_queue->push(this_oop); 6575 bool res = _work_queue->push(obj);
6569 assert(res, "Low water mark should be less than capacity?"); 6576 assert(res, "Low water mark should be less than capacity?");
6570 trim_queue(_low_water_mark); 6577 trim_queue(_low_water_mark);
6571 } // Else, another thread claimed the object 6578 } // Else, another thread claimed the object
6572 } 6579 }
6573 } 6580 }
6574 } 6581 }
6582
6583 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6584 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6575 6585
6576 // This closure is used to rescan the marked objects on the dirty cards 6586 // This closure is used to rescan the marked objects on the dirty cards
6577 // in the mod union table and the card table proper. 6587 // in the mod union table and the card table proper.
6578 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( 6588 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6579 oop p, MemRegion mr) { 6589 oop p, MemRegion mr) {
6673 } 6683 }
6674 _collector->icms_wait(); 6684 _collector->icms_wait();
6675 6685
6676 // See the comment in coordinator_yield() 6686 // See the comment in coordinator_yield()
6677 for (unsigned i = 0; i < CMSYieldSleepCount && 6687 for (unsigned i = 0; i < CMSYieldSleepCount &&
6678 ConcurrentMarkSweepThread::should_yield() && 6688 ConcurrentMarkSweepThread::should_yield() &&
6679 !CMSCollector::foregroundGCIsActive(); ++i) { 6689 !CMSCollector::foregroundGCIsActive(); ++i) {
6680 os::sleep(Thread::current(), 1, false); 6690 os::sleep(Thread::current(), 1, false);
6681 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6691 ConcurrentMarkSweepThread::acknowledge_yield_request();
6682 } 6692 }
6683 6693
6684 ConcurrentMarkSweepThread::synchronize(true); 6694 ConcurrentMarkSweepThread::synchronize(true);
6926 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) { 6936 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6927 assert(_bitMap->isMarked(ptr), "expected bit to be set"); 6937 assert(_bitMap->isMarked(ptr), "expected bit to be set");
6928 assert(_markStack->isEmpty(), 6938 assert(_markStack->isEmpty(),
6929 "should drain stack to limit stack usage"); 6939 "should drain stack to limit stack usage");
6930 // convert ptr to an oop preparatory to scanning 6940 // convert ptr to an oop preparatory to scanning
6931 oop this_oop = oop(ptr); 6941 oop obj = oop(ptr);
6932 // Ignore mark word in verification below, since we 6942 // Ignore mark word in verification below, since we
6933 // may be running concurrent with mutators. 6943 // may be running concurrent with mutators.
6934 assert(this_oop->is_oop(true), "should be an oop"); 6944 assert(obj->is_oop(true), "should be an oop");
6935 assert(_finger <= ptr, "_finger runneth ahead"); 6945 assert(_finger <= ptr, "_finger runneth ahead");
6936 // advance the finger to right end of this object 6946 // advance the finger to right end of this object
6937 _finger = ptr + this_oop->size(); 6947 _finger = ptr + obj->size();
6938 assert(_finger > ptr, "we just incremented it above"); 6948 assert(_finger > ptr, "we just incremented it above");
6939 // On large heaps, it may take us some time to get through 6949 // On large heaps, it may take us some time to get through
6940 // the marking phase (especially if running iCMS). During 6950 // the marking phase (especially if running iCMS). During
6941 // this time it's possible that a lot of mutations have 6951 // this time it's possible that a lot of mutations have
6942 // accumulated in the card table and the mod union table -- 6952 // accumulated in the card table and the mod union table --
6978 // the stack below. 6988 // the stack below.
6979 PushOrMarkClosure pushOrMarkClosure(_collector, 6989 PushOrMarkClosure pushOrMarkClosure(_collector,
6980 _span, _bitMap, _markStack, 6990 _span, _bitMap, _markStack,
6981 _revisitStack, 6991 _revisitStack,
6982 _finger, this); 6992 _finger, this);
6983 bool res = _markStack->push(this_oop); 6993 bool res = _markStack->push(obj);
6984 assert(res, "Empty non-zero size stack should have space for single push"); 6994 assert(res, "Empty non-zero size stack should have space for single push");
6985 while (!_markStack->isEmpty()) { 6995 while (!_markStack->isEmpty()) {
6986 oop new_oop = _markStack->pop(); 6996 oop new_oop = _markStack->pop();
6987 // Skip verifying header mark word below because we are 6997 // Skip verifying header mark word below because we are
6988 // running concurrent with mutators. 6998 // running concurrent with mutators.
7050 // Should we assert that our work queue is empty or 7060 // Should we assert that our work queue is empty or
7051 // below some drain limit? 7061 // below some drain limit?
7052 assert(_work_queue->size() == 0, 7062 assert(_work_queue->size() == 0,
7053 "should drain stack to limit stack usage"); 7063 "should drain stack to limit stack usage");
7054 // convert ptr to an oop preparatory to scanning 7064 // convert ptr to an oop preparatory to scanning
7055 oop this_oop = oop(ptr); 7065 oop obj = oop(ptr);
7056 // Ignore mark word in verification below, since we 7066 // Ignore mark word in verification below, since we
7057 // may be running concurrent with mutators. 7067 // may be running concurrent with mutators.
7058 assert(this_oop->is_oop(true), "should be an oop"); 7068 assert(obj->is_oop(true), "should be an oop");
7059 assert(_finger <= ptr, "_finger runneth ahead"); 7069 assert(_finger <= ptr, "_finger runneth ahead");
7060 // advance the finger to right end of this object 7070 // advance the finger to right end of this object
7061 _finger = ptr + this_oop->size(); 7071 _finger = ptr + obj->size();
7062 assert(_finger > ptr, "we just incremented it above"); 7072 assert(_finger > ptr, "we just incremented it above");
7063 // On large heaps, it may take us some time to get through 7073 // On large heaps, it may take us some time to get through
7064 // the marking phase (especially if running iCMS). During 7074 // the marking phase (especially if running iCMS). During
7065 // this time it's possible that a lot of mutations have 7075 // this time it's possible that a lot of mutations have
7066 // accumulated in the card table and the mod union table -- 7076 // accumulated in the card table and the mod union table --
7104 _work_queue, 7114 _work_queue,
7105 _overflow_stack, 7115 _overflow_stack,
7106 _revisit_stack, 7116 _revisit_stack,
7107 _finger, 7117 _finger,
7108 gfa, this); 7118 gfa, this);
7109 bool res = _work_queue->push(this_oop); // overflow could occur here 7119 bool res = _work_queue->push(obj); // overflow could occur here
7110 assert(res, "Will hold once we use workqueues"); 7120 assert(res, "Will hold once we use workqueues");
7111 while (true) { 7121 while (true) {
7112 oop new_oop; 7122 oop new_oop;
7113 if (!_work_queue->pop_local(new_oop)) { 7123 if (!_work_queue->pop_local(new_oop)) {
7114 // We emptied our work_queue; check if there's stuff that can 7124 // We emptied our work_queue; check if there's stuff that can
7174 assert(_cms_bm->isMarked(addr), "tautology"); 7184 assert(_cms_bm->isMarked(addr), "tautology");
7175 7185
7176 assert(_mark_stack->isEmpty(), 7186 assert(_mark_stack->isEmpty(),
7177 "should drain stack to limit stack usage"); 7187 "should drain stack to limit stack usage");
7178 // convert addr to an oop preparatory to scanning 7188 // convert addr to an oop preparatory to scanning
7179 oop this_oop = oop(addr); 7189 oop obj = oop(addr);
7180 assert(this_oop->is_oop(), "should be an oop"); 7190 assert(obj->is_oop(), "should be an oop");
7181 assert(_finger <= addr, "_finger runneth ahead"); 7191 assert(_finger <= addr, "_finger runneth ahead");
7182 // advance the finger to right end of this object 7192 // advance the finger to right end of this object
7183 _finger = addr + this_oop->size(); 7193 _finger = addr + obj->size();
7184 assert(_finger > addr, "we just incremented it above"); 7194 assert(_finger > addr, "we just incremented it above");
7185 // Note: the finger doesn't advance while we drain 7195 // Note: the finger doesn't advance while we drain
7186 // the stack below. 7196 // the stack below.
7187 bool res = _mark_stack->push(this_oop); 7197 bool res = _mark_stack->push(obj);
7188 assert(res, "Empty non-zero size stack should have space for single push"); 7198 assert(res, "Empty non-zero size stack should have space for single push");
7189 while (!_mark_stack->isEmpty()) { 7199 while (!_mark_stack->isEmpty()) {
7190 oop new_oop = _mark_stack->pop(); 7200 oop new_oop = _mark_stack->pop();
7191 assert(new_oop->is_oop(), "Oops! expected to pop an oop"); 7201 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7192 // now scan this oop's oops 7202 // now scan this oop's oops
7205 _verification_bm(verification_bm), 7215 _verification_bm(verification_bm),
7206 _cms_bm(cms_bm), 7216 _cms_bm(cms_bm),
7207 _mark_stack(mark_stack) 7217 _mark_stack(mark_stack)
7208 { } 7218 { }
7209 7219
7220 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7221 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7210 7222
7211 // Upon stack overflow, we discard (part of) the stack, 7223 // Upon stack overflow, we discard (part of) the stack,
7212 // remembering the least address amongst those discarded 7224 // remembering the least address amongst those discarded
7213 // in CMSCollector's _restart_address. 7225 // in CMSCollector's _restart_address.
7214 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) { 7226 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7217 _collector->lower_restart_addr(ra); 7229 _collector->lower_restart_addr(ra);
7218 _mark_stack->reset(); // discard stack contents 7230 _mark_stack->reset(); // discard stack contents
7219 _mark_stack->expand(); // expand the stack if possible 7231 _mark_stack->expand(); // expand the stack if possible
7220 } 7232 }
7221 7233
7222 void PushAndMarkVerifyClosure::do_oop(oop* p) { 7234 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7223 oop this_oop = *p; 7235 assert(obj->is_oop_or_null(), "expected an oop or NULL");
7224 assert(this_oop->is_oop_or_null(), "expected an oop or NULL"); 7236 HeapWord* addr = (HeapWord*)obj;
7225 HeapWord* addr = (HeapWord*)this_oop;
7226 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) { 7237 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7227 // Oop lies in _span and isn't yet grey or black 7238 // Oop lies in _span and isn't yet grey or black
7228 _verification_bm->mark(addr); // now grey 7239 _verification_bm->mark(addr); // now grey
7229 if (!_cms_bm->isMarked(addr)) { 7240 if (!_cms_bm->isMarked(addr)) {
7230 oop(addr)->print(); 7241 oop(addr)->print();
7231 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr); 7242 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7243 addr);
7232 fatal("... aborting"); 7244 fatal("... aborting");
7233 } 7245 }
7234 7246
7235 if (!_mark_stack->push(this_oop)) { // stack overflow 7247 if (!_mark_stack->push(obj)) { // stack overflow
7236 if (PrintCMSStatistics != 0) { 7248 if (PrintCMSStatistics != 0) {
7237 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " 7249 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7238 SIZE_FORMAT, _mark_stack->capacity()); 7250 SIZE_FORMAT, _mark_stack->capacity());
7239 } 7251 }
7240 assert(_mark_stack->isFull(), "Else push should have succeeded"); 7252 assert(_mark_stack->isFull(), "Else push should have succeeded");
7283 _global_finger_addr(global_finger_addr), 7295 _global_finger_addr(global_finger_addr),
7284 _parent(parent), 7296 _parent(parent),
7285 _should_remember_klasses(collector->should_unload_classes()) 7297 _should_remember_klasses(collector->should_unload_classes())
7286 { } 7298 { }
7287 7299
7288
7289 void CMSCollector::lower_restart_addr(HeapWord* low) { 7300 void CMSCollector::lower_restart_addr(HeapWord* low) {
7290 assert(_span.contains(low), "Out of bounds addr"); 7301 assert(_span.contains(low), "Out of bounds addr");
7291 if (_restart_addr == NULL) { 7302 if (_restart_addr == NULL) {
7292 _restart_addr = low; 7303 _restart_addr = low;
7293 } else { 7304 } else {
7319 _collector->lower_restart_addr(ra); 7330 _collector->lower_restart_addr(ra);
7320 _overflow_stack->reset(); // discard stack contents 7331 _overflow_stack->reset(); // discard stack contents
7321 _overflow_stack->expand(); // expand the stack if possible 7332 _overflow_stack->expand(); // expand the stack if possible
7322 } 7333 }
7323 7334
7324 7335 void PushOrMarkClosure::do_oop(oop obj) {
7325 void PushOrMarkClosure::do_oop(oop* p) {
7326 oop thisOop = *p;
7327 // Ignore mark word because we are running concurrent with mutators. 7336 // Ignore mark word because we are running concurrent with mutators.
7328 assert(thisOop->is_oop_or_null(true), "expected an oop or NULL"); 7337 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7329 HeapWord* addr = (HeapWord*)thisOop; 7338 HeapWord* addr = (HeapWord*)obj;
7330 if (_span.contains(addr) && !_bitMap->isMarked(addr)) { 7339 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7331 // Oop lies in _span and isn't yet grey or black 7340 // Oop lies in _span and isn't yet grey or black
7332 _bitMap->mark(addr); // now grey 7341 _bitMap->mark(addr); // now grey
7333 if (addr < _finger) { 7342 if (addr < _finger) {
7334 // the bit map iteration has already either passed, or 7343 // the bit map iteration has already either passed, or
7340 _collector->simulate_overflow()) { 7349 _collector->simulate_overflow()) {
7341 // simulate a stack overflow 7350 // simulate a stack overflow
7342 simulate_overflow = true; 7351 simulate_overflow = true;
7343 } 7352 }
7344 ) 7353 )
7345 if (simulate_overflow || !_markStack->push(thisOop)) { // stack overflow 7354 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7346 if (PrintCMSStatistics != 0) { 7355 if (PrintCMSStatistics != 0) {
7347 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " 7356 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7348 SIZE_FORMAT, _markStack->capacity()); 7357 SIZE_FORMAT, _markStack->capacity());
7349 } 7358 }
7350 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded"); 7359 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7356 // bit map 7365 // bit map
7357 do_yield_check(); 7366 do_yield_check();
7358 } 7367 }
7359 } 7368 }
7360 7369
7361 void Par_PushOrMarkClosure::do_oop(oop* p) { 7370 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7362 oop this_oop = *p; 7371 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7372
7373 void Par_PushOrMarkClosure::do_oop(oop obj) {
7363 // Ignore mark word because we are running concurrent with mutators. 7374 // Ignore mark word because we are running concurrent with mutators.
7364 assert(this_oop->is_oop_or_null(true), "expected an oop or NULL"); 7375 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7365 HeapWord* addr = (HeapWord*)this_oop; 7376 HeapWord* addr = (HeapWord*)obj;
7366 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) { 7377 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7367 // Oop lies in _span and isn't yet grey or black 7378 // Oop lies in _span and isn't yet grey or black
7368 // We read the global_finger (volatile read) strictly after marking oop 7379 // We read the global_finger (volatile read) strictly after marking oop
7369 bool res = _bit_map->par_mark(addr); // now grey 7380 bool res = _bit_map->par_mark(addr); // now grey
7370 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr; 7381 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7389 // simulate a stack overflow 7400 // simulate a stack overflow
7390 simulate_overflow = true; 7401 simulate_overflow = true;
7391 } 7402 }
7392 ) 7403 )
7393 if (simulate_overflow || 7404 if (simulate_overflow ||
7394 !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) { 7405 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7395 // stack overflow 7406 // stack overflow
7396 if (PrintCMSStatistics != 0) { 7407 if (PrintCMSStatistics != 0) {
7397 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " 7408 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7398 SIZE_FORMAT, _overflow_stack->capacity()); 7409 SIZE_FORMAT, _overflow_stack->capacity());
7399 } 7410 }
7406 } 7417 }
7407 do_yield_check(); 7418 do_yield_check();
7408 } 7419 }
7409 } 7420 }
7410 7421
7422 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7423 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7411 7424
7412 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, 7425 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7413 MemRegion span, 7426 MemRegion span,
7414 ReferenceProcessor* rp, 7427 ReferenceProcessor* rp,
7415 CMSBitMap* bit_map, 7428 CMSBitMap* bit_map,
7430 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); 7443 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7431 } 7444 }
7432 7445
7433 // Grey object rescan during pre-cleaning and second checkpoint phases -- 7446 // Grey object rescan during pre-cleaning and second checkpoint phases --
7434 // the non-parallel version (the parallel version appears further below.) 7447 // the non-parallel version (the parallel version appears further below.)
7435 void PushAndMarkClosure::do_oop(oop* p) { 7448 void PushAndMarkClosure::do_oop(oop obj) {
7436 oop this_oop = *p; 7449 // If _concurrent_precleaning, ignore mark word verification
7437 // Ignore mark word verification. If during concurrent precleaning 7450 assert(obj->is_oop_or_null(_concurrent_precleaning),
7438 // the object monitor may be locked. If during the checkpoint
7439 // phases, the object may already have been reached by a different
7440 // path and may be at the end of the global overflow list (so
7441 // the mark word may be NULL).
7442 assert(this_oop->is_oop_or_null(true/* ignore mark word */),
7443 "expected an oop or NULL"); 7451 "expected an oop or NULL");
7444 HeapWord* addr = (HeapWord*)this_oop; 7452 HeapWord* addr = (HeapWord*)obj;
7445 // Check if oop points into the CMS generation 7453 // Check if oop points into the CMS generation
7446 // and is not marked 7454 // and is not marked
7447 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { 7455 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7448 // a white object ... 7456 // a white object ...
7449 _bit_map->mark(addr); // ... now grey 7457 _bit_map->mark(addr); // ... now grey
7454 _collector->simulate_overflow()) { 7462 _collector->simulate_overflow()) {
7455 // simulate a stack overflow 7463 // simulate a stack overflow
7456 simulate_overflow = true; 7464 simulate_overflow = true;
7457 } 7465 }
7458 ) 7466 )
7459 if (simulate_overflow || !_mark_stack->push(this_oop)) { 7467 if (simulate_overflow || !_mark_stack->push(obj)) {
7460 if (_concurrent_precleaning) { 7468 if (_concurrent_precleaning) {
7461 // During precleaning we can just dirty the appropriate card 7469 // During precleaning we can just dirty the appropriate card
7462 // in the mod union table, thus ensuring that the object remains 7470 // in the mod union table, thus ensuring that the object remains
7463 // in the grey set and continue. Note that no one can be intefering 7471 // in the grey set and continue. Note that no one can be intefering
7464 // with us in this action of dirtying the mod union table, so 7472 // with us in this action of dirtying the mod union table, so
7466 _mod_union_table->mark(addr); 7474 _mod_union_table->mark(addr);
7467 _collector->_ser_pmc_preclean_ovflw++; 7475 _collector->_ser_pmc_preclean_ovflw++;
7468 } else { 7476 } else {
7469 // During the remark phase, we need to remember this oop 7477 // During the remark phase, we need to remember this oop
7470 // in the overflow list. 7478 // in the overflow list.
7471 _collector->push_on_overflow_list(this_oop); 7479 _collector->push_on_overflow_list(obj);
7472 _collector->_ser_pmc_remark_ovflw++; 7480 _collector->_ser_pmc_remark_ovflw++;
7473 } 7481 }
7474 } 7482 }
7475 } 7483 }
7476 } 7484 }
7490 _should_remember_klasses(collector->should_unload_classes()) 7498 _should_remember_klasses(collector->should_unload_classes())
7491 { 7499 {
7492 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); 7500 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7493 } 7501 }
7494 7502
7503 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
7504 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7505
7495 // Grey object rescan during second checkpoint phase -- 7506 // Grey object rescan during second checkpoint phase --
7496 // the parallel version. 7507 // the parallel version.
7497 void Par_PushAndMarkClosure::do_oop(oop* p) { 7508 void Par_PushAndMarkClosure::do_oop(oop obj) {
7498 oop this_oop = *p;
7499 // In the assert below, we ignore the mark word because 7509 // In the assert below, we ignore the mark word because
7500 // this oop may point to an already visited object that is 7510 // this oop may point to an already visited object that is
7501 // on the overflow stack (in which case the mark word has 7511 // on the overflow stack (in which case the mark word has
7502 // been hijacked for chaining into the overflow stack -- 7512 // been hijacked for chaining into the overflow stack --
7503 // if this is the last object in the overflow stack then 7513 // if this is the last object in the overflow stack then
7505 // have been subsequently popped off the global overflow 7515 // have been subsequently popped off the global overflow
7506 // stack, and the mark word possibly restored to the prototypical 7516 // stack, and the mark word possibly restored to the prototypical
7507 // value, by the time we get to examined this failing assert in 7517 // value, by the time we get to examined this failing assert in
7508 // the debugger, is_oop_or_null(false) may subsequently start 7518 // the debugger, is_oop_or_null(false) may subsequently start
7509 // to hold. 7519 // to hold.
7510 assert(this_oop->is_oop_or_null(true), 7520 assert(obj->is_oop_or_null(true),
7511 "expected an oop or NULL"); 7521 "expected an oop or NULL");
7512 HeapWord* addr = (HeapWord*)this_oop; 7522 HeapWord* addr = (HeapWord*)obj;
7513 // Check if oop points into the CMS generation 7523 // Check if oop points into the CMS generation
7514 // and is not marked 7524 // and is not marked
7515 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { 7525 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7516 // a white object ... 7526 // a white object ...
7517 // If we manage to "claim" the object, by being the 7527 // If we manage to "claim" the object, by being the
7525 _collector->par_simulate_overflow()) { 7535 _collector->par_simulate_overflow()) {
7526 // simulate a stack overflow 7536 // simulate a stack overflow
7527 simulate_overflow = true; 7537 simulate_overflow = true;
7528 } 7538 }
7529 ) 7539 )
7530 if (simulate_overflow || !_work_queue->push(this_oop)) { 7540 if (simulate_overflow || !_work_queue->push(obj)) {
7531 _collector->par_push_on_overflow_list(this_oop); 7541 _collector->par_push_on_overflow_list(obj);
7532 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS 7542 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
7533 } 7543 }
7534 } // Else, some other thread got there first 7544 } // Else, some other thread got there first
7535 } 7545 }
7536 } 7546 }
7547
7548 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7549 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7537 7550
7538 void PushAndMarkClosure::remember_klass(Klass* k) { 7551 void PushAndMarkClosure::remember_klass(Klass* k) {
7539 if (!_revisit_stack->push(oop(k))) { 7552 if (!_revisit_stack->push(oop(k))) {
7540 fatal("Revisit stack overflowed in PushAndMarkClosure"); 7553 fatal("Revisit stack overflowed in PushAndMarkClosure");
7541 } 7554 }
8226 return addr != NULL && 8239 return addr != NULL &&
8227 (!_span.contains(addr) || _bit_map->isMarked(addr)); 8240 (!_span.contains(addr) || _bit_map->isMarked(addr));
8228 } 8241 }
8229 8242
8230 // CMSKeepAliveClosure: the serial version 8243 // CMSKeepAliveClosure: the serial version
8231 void CMSKeepAliveClosure::do_oop(oop* p) { 8244 void CMSKeepAliveClosure::do_oop(oop obj) {
8232 oop this_oop = *p; 8245 HeapWord* addr = (HeapWord*)obj;
8233 HeapWord* addr = (HeapWord*)this_oop;
8234 if (_span.contains(addr) && 8246 if (_span.contains(addr) &&
8235 !_bit_map->isMarked(addr)) { 8247 !_bit_map->isMarked(addr)) {
8236 _bit_map->mark(addr); 8248 _bit_map->mark(addr);
8237 bool simulate_overflow = false; 8249 bool simulate_overflow = false;
8238 NOT_PRODUCT( 8250 NOT_PRODUCT(
8240 _collector->simulate_overflow()) { 8252 _collector->simulate_overflow()) {
8241 // simulate a stack overflow 8253 // simulate a stack overflow
8242 simulate_overflow = true; 8254 simulate_overflow = true;
8243 } 8255 }
8244 ) 8256 )
8245 if (simulate_overflow || !_mark_stack->push(this_oop)) { 8257 if (simulate_overflow || !_mark_stack->push(obj)) {
8246 _collector->push_on_overflow_list(this_oop); 8258 _collector->push_on_overflow_list(obj);
8247 _collector->_ser_kac_ovflw++; 8259 _collector->_ser_kac_ovflw++;
8248 } 8260 }
8249 } 8261 }
8250 } 8262 }
8263
8264 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8265 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8251 8266
8252 // CMSParKeepAliveClosure: a parallel version of the above. 8267 // CMSParKeepAliveClosure: a parallel version of the above.
8253 // The work queues are private to each closure (thread), 8268 // The work queues are private to each closure (thread),
8254 // but (may be) available for stealing by other threads. 8269 // but (may be) available for stealing by other threads.
8255 void CMSParKeepAliveClosure::do_oop(oop* p) { 8270 void CMSParKeepAliveClosure::do_oop(oop obj) {
8256 oop this_oop = *p; 8271 HeapWord* addr = (HeapWord*)obj;
8257 HeapWord* addr = (HeapWord*)this_oop;
8258 if (_span.contains(addr) && 8272 if (_span.contains(addr) &&
8259 !_bit_map->isMarked(addr)) { 8273 !_bit_map->isMarked(addr)) {
8260 // In general, during recursive tracing, several threads 8274 // In general, during recursive tracing, several threads
8261 // may be concurrently getting here; the first one to 8275 // may be concurrently getting here; the first one to
8262 // "tag" it, claims it. 8276 // "tag" it, claims it.
8263 if (_bit_map->par_mark(addr)) { 8277 if (_bit_map->par_mark(addr)) {
8264 bool res = _work_queue->push(this_oop); 8278 bool res = _work_queue->push(obj);
8265 assert(res, "Low water mark should be much less than capacity"); 8279 assert(res, "Low water mark should be much less than capacity");
8266 // Do a recursive trim in the hope that this will keep 8280 // Do a recursive trim in the hope that this will keep
8267 // stack usage lower, but leave some oops for potential stealers 8281 // stack usage lower, but leave some oops for potential stealers
8268 trim_queue(_low_water_mark); 8282 trim_queue(_low_water_mark);
8269 } // Else, another thread got there first 8283 } // Else, another thread got there first
8270 } 8284 }
8271 } 8285 }
8286
8287 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8288 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8272 8289
8273 void CMSParKeepAliveClosure::trim_queue(uint max) { 8290 void CMSParKeepAliveClosure::trim_queue(uint max) {
8274 while (_work_queue->size() > max) { 8291 while (_work_queue->size() > max) {
8275 oop new_oop; 8292 oop new_oop;
8276 if (_work_queue->pop_local(new_oop)) { 8293 if (_work_queue->pop_local(new_oop)) {
8283 new_oop->oop_iterate(&_mark_and_push); 8300 new_oop->oop_iterate(&_mark_and_push);
8284 } 8301 }
8285 } 8302 }
8286 } 8303 }
8287 8304
8288 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { 8305 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8289 oop this_oop = *p; 8306 HeapWord* addr = (HeapWord*)obj;
8290 HeapWord* addr = (HeapWord*)this_oop;
8291 if (_span.contains(addr) && 8307 if (_span.contains(addr) &&
8292 !_bit_map->isMarked(addr)) { 8308 !_bit_map->isMarked(addr)) {
8293 if (_bit_map->par_mark(addr)) { 8309 if (_bit_map->par_mark(addr)) {
8294 bool simulate_overflow = false; 8310 bool simulate_overflow = false;
8295 NOT_PRODUCT( 8311 NOT_PRODUCT(
8297 _collector->par_simulate_overflow()) { 8313 _collector->par_simulate_overflow()) {
8298 // simulate a stack overflow 8314 // simulate a stack overflow
8299 simulate_overflow = true; 8315 simulate_overflow = true;
8300 } 8316 }
8301 ) 8317 )
8302 if (simulate_overflow || !_work_queue->push(this_oop)) { 8318 if (simulate_overflow || !_work_queue->push(obj)) {
8303 _collector->par_push_on_overflow_list(this_oop); 8319 _collector->par_push_on_overflow_list(obj);
8304 _collector->_par_kac_ovflw++; 8320 _collector->_par_kac_ovflw++;
8305 } 8321 }
8306 } // Else another thread got there already 8322 } // Else another thread got there already
8307 } 8323 }
8308 } 8324 }
8325
8326 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8327 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8309 8328
8310 ////////////////////////////////////////////////////////////////// 8329 //////////////////////////////////////////////////////////////////
8311 // CMSExpansionCause ///////////////////////////// 8330 // CMSExpansionCause /////////////////////////////
8312 ////////////////////////////////////////////////////////////////// 8331 //////////////////////////////////////////////////////////////////
8313 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) { 8332 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8335 // the max number to take from overflow list at a time 8354 // the max number to take from overflow list at a time
8336 const size_t num = _mark_stack->capacity()/4; 8355 const size_t num = _mark_stack->capacity()/4;
8337 while (!_mark_stack->isEmpty() || 8356 while (!_mark_stack->isEmpty() ||
8338 // if stack is empty, check the overflow list 8357 // if stack is empty, check the overflow list
8339 _collector->take_from_overflow_list(num, _mark_stack)) { 8358 _collector->take_from_overflow_list(num, _mark_stack)) {
8340 oop this_oop = _mark_stack->pop(); 8359 oop obj = _mark_stack->pop();
8341 HeapWord* addr = (HeapWord*)this_oop; 8360 HeapWord* addr = (HeapWord*)obj;
8342 assert(_span.contains(addr), "Should be within span"); 8361 assert(_span.contains(addr), "Should be within span");
8343 assert(_bit_map->isMarked(addr), "Should be marked"); 8362 assert(_bit_map->isMarked(addr), "Should be marked");
8344 assert(this_oop->is_oop(), "Should be an oop"); 8363 assert(obj->is_oop(), "Should be an oop");
8345 this_oop->oop_iterate(_keep_alive); 8364 obj->oop_iterate(_keep_alive);
8346 } 8365 }
8347 } 8366 }
8348 8367
8349 void CMSParDrainMarkingStackClosure::do_void() { 8368 void CMSParDrainMarkingStackClosure::do_void() {
8350 // drain queue 8369 // drain queue