comparison src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @ 340:ebeb6490b814

6722116: CMS: Incorrect overflow handling when using parallel concurrent marking Summary: Fixed CMSConcMarkingTask::reset() to store the restart address upon a marking stack overflow and to use it as the base, suitably aligned, for restarting the scan in CMSConcMarkingTask::do_scan_and_mark(). Reviewed-by: jcoomes, tonyp
author ysr
date Tue, 26 Aug 2008 14:54:48 -0700
parents 9199f248b0ee
children 5d254928c888
comparison
equal deleted inserted replaced
301:387a62b4be60 340:ebeb6490b814
3648 bool _asynch; 3648 bool _asynch;
3649 bool _result; 3649 bool _result;
3650 CompactibleFreeListSpace* _cms_space; 3650 CompactibleFreeListSpace* _cms_space;
3651 CompactibleFreeListSpace* _perm_space; 3651 CompactibleFreeListSpace* _perm_space;
3652 HeapWord* _global_finger; 3652 HeapWord* _global_finger;
3653 HeapWord* _restart_addr;
3653 3654
3654 // Exposed here for yielding support 3655 // Exposed here for yielding support
3655 Mutex* const _bit_map_lock; 3656 Mutex* const _bit_map_lock;
3656 3657
3657 // The per thread work queues, available here for stealing 3658 // The per thread work queues, available here for stealing
3678 "Else termination won't work correctly today"); // XXX FIX ME! 3679 "Else termination won't work correctly today"); // XXX FIX ME!
3679 _requested_size = n_workers; 3680 _requested_size = n_workers;
3680 _term.set_task(this); 3681 _term.set_task(this);
3681 assert(_cms_space->bottom() < _perm_space->bottom(), 3682 assert(_cms_space->bottom() < _perm_space->bottom(),
3682 "Finger incorrectly initialized below"); 3683 "Finger incorrectly initialized below");
3683 _global_finger = _cms_space->bottom(); 3684 _restart_addr = _global_finger = _cms_space->bottom();
3684 } 3685 }
3685 3686
3686 3687
3687 OopTaskQueueSet* task_queues() { return _task_queues; } 3688 OopTaskQueueSet* task_queues() { return _task_queues; }
3688 3689
3696 3697
3697 virtual void coordinator_yield(); // stuff done by coordinator 3698 virtual void coordinator_yield(); // stuff done by coordinator
3698 bool result() { return _result; } 3699 bool result() { return _result; }
3699 3700
3700 void reset(HeapWord* ra) { 3701 void reset(HeapWord* ra) {
3702 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
3703 assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)");
3704 assert(ra < _perm_space->end(), "ra too large");
3705 _restart_addr = _global_finger = ra;
3701 _term.reset_for_reuse(); 3706 _term.reset_for_reuse();
3702 } 3707 }
3703 3708
3704 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk, 3709 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3705 OopTaskQueue* work_q); 3710 OopTaskQueue* work_q);
3840 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) { 3845 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3841 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); 3846 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3842 int n_tasks = pst->n_tasks(); 3847 int n_tasks = pst->n_tasks();
3843 // We allow that there may be no tasks to do here because 3848 // We allow that there may be no tasks to do here because
3844 // we are restarting after a stack overflow. 3849 // we are restarting after a stack overflow.
3845 assert(pst->valid() || n_tasks == 0, "Uninitializd use?"); 3850 assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3846 int nth_task = 0; 3851 int nth_task = 0;
3847 3852
3848 HeapWord* start = sp->bottom(); 3853 HeapWord* aligned_start = sp->bottom();
3854 if (sp->used_region().contains(_restart_addr)) {
3855 // Align down to a card boundary for the start of 0th task
3856 // for this space.
3857 aligned_start =
3858 (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3859 CardTableModRefBS::card_size);
3860 }
3861
3849 size_t chunk_size = sp->marking_task_size(); 3862 size_t chunk_size = sp->marking_task_size();
3850 while (!pst->is_task_claimed(/* reference */ nth_task)) { 3863 while (!pst->is_task_claimed(/* reference */ nth_task)) {
3851 // Having claimed the nth task in this space, 3864 // Having claimed the nth task in this space,
3852 // compute the chunk that it corresponds to: 3865 // compute the chunk that it corresponds to:
3853 MemRegion span = MemRegion(start + nth_task*chunk_size, 3866 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3854 start + (nth_task+1)*chunk_size); 3867 aligned_start + (nth_task+1)*chunk_size);
3855 // Try and bump the global finger via a CAS; 3868 // Try and bump the global finger via a CAS;
3856 // note that we need to do the global finger bump 3869 // note that we need to do the global finger bump
3857 // _before_ taking the intersection below, because 3870 // _before_ taking the intersection below, because
3858 // the task corresponding to that region will be 3871 // the task corresponding to that region will be
3859 // deemed done even if the used_region() expands 3872 // deemed done even if the used_region() expands
3864 bump_global_finger(finger); // atomically 3877 bump_global_finger(finger); // atomically
3865 // There are null tasks here corresponding to chunks 3878 // There are null tasks here corresponding to chunks
3866 // beyond the "top" address of the space. 3879 // beyond the "top" address of the space.
3867 span = span.intersection(sp->used_region()); 3880 span = span.intersection(sp->used_region());
3868 if (!span.is_empty()) { // Non-null task 3881 if (!span.is_empty()) { // Non-null task
3869 // We want to skip the first object because 3882 HeapWord* prev_obj;
3870 // the protocol is to scan any object in its entirety 3883 assert(!span.contains(_restart_addr) || nth_task == 0,
3871 // that _starts_ in this span; a fortiori, any 3884 "Inconsistency");
3872 // object starting in an earlier span is scanned 3885 if (nth_task == 0) {
3873 // as part of an earlier claimed task. 3886 // For the 0th task, we'll not need to compute a block_start.
3874 // Below we use the "careful" version of block_start 3887 if (span.contains(_restart_addr)) {
3875 // so we do not try to navigate uninitialized objects. 3888 // In the case of a restart because of stack overflow,
3876 HeapWord* prev_obj = sp->block_start_careful(span.start()); 3889 // we might additionally skip a chunk prefix.
3877 // Below we use a variant of block_size that uses the 3890 prev_obj = _restart_addr;
3878 // Printezis bits to avoid waiting for allocated
3879 // objects to become initialized/parsable.
3880 while (prev_obj < span.start()) {
3881 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3882 if (sz > 0) {
3883 prev_obj += sz;
3884 } else { 3891 } else {
3885 // In this case we may end up doing a bit of redundant 3892 prev_obj = span.start();
3886 // scanning, but that appears unavoidable, short of 3893 }
3887 // locking the free list locks; see bug 6324141. 3894 } else {
3888 break; 3895 // We want to skip the first object because
3896 // the protocol is to scan any object in its entirety
3897 // that _starts_ in this span; a fortiori, any
3898 // object starting in an earlier span is scanned
3899 // as part of an earlier claimed task.
3900 // Below we use the "careful" version of block_start
3901 // so we do not try to navigate uninitialized objects.
3902 prev_obj = sp->block_start_careful(span.start());
3903 // Below we use a variant of block_size that uses the
3904 // Printezis bits to avoid waiting for allocated
3905 // objects to become initialized/parsable.
3906 while (prev_obj < span.start()) {
3907 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3908 if (sz > 0) {
3909 prev_obj += sz;
3910 } else {
3911 // In this case we may end up doing a bit of redundant
3912 // scanning, but that appears unavoidable, short of
3913 // locking the free list locks; see bug 6324141.
3914 break;
3915 }
3889 } 3916 }
3890 } 3917 }
3891 if (prev_obj < span.end()) { 3918 if (prev_obj < span.end()) {
3892 MemRegion my_span = MemRegion(prev_obj, span.end()); 3919 MemRegion my_span = MemRegion(prev_obj, span.end());
3893 // Do the marking work within a non-empty span -- 3920 // Do the marking work within a non-empty span --
3936 virtual void do_oop(narrowOop* p); 3963 virtual void do_oop(narrowOop* p);
3937 void trim_queue(size_t max); 3964 void trim_queue(size_t max);
3938 void handle_stack_overflow(HeapWord* lost); 3965 void handle_stack_overflow(HeapWord* lost);
3939 }; 3966 };
3940 3967
3941 // Grey object rescan during work stealing phase -- 3968 // Grey object scanning during work stealing phase --
3942 // the salient assumption here is that stolen oops must 3969 // the salient assumption here is that any references
3943 // always be initialized, so we do not need to check for 3970 // that are in these stolen objects being scanned must
3944 // uninitialized objects before scanning here. 3971 // already have been initialized (else they would not have
3972 // been published), so we do not need to check for
3973 // uninitialized objects before pushing here.
3945 void Par_ConcMarkingClosure::do_oop(oop obj) { 3974 void Par_ConcMarkingClosure::do_oop(oop obj) {
3946 assert(obj->is_oop_or_null(), "expected an oop or NULL"); 3975 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
3947 HeapWord* addr = (HeapWord*)obj; 3976 HeapWord* addr = (HeapWord*)obj;
3948 // Check if oop points into the CMS generation 3977 // Check if oop points into the CMS generation
3949 // and is not marked 3978 // and is not marked
3950 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { 3979 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3951 // a white object ... 3980 // a white object ...
3999 // Upon stack overflow, we discard (part of) the stack, 4028 // Upon stack overflow, we discard (part of) the stack,
4000 // remembering the least address amongst those discarded 4029 // remembering the least address amongst those discarded
4001 // in CMSCollector's _restart_address. 4030 // in CMSCollector's _restart_address.
4002 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) { 4031 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4003 // We need to do this under a mutex to prevent other 4032 // We need to do this under a mutex to prevent other
4004 // workers from interfering with the expansion below. 4033 // workers from interfering with the work done below.
4005 MutexLockerEx ml(_overflow_stack->par_lock(), 4034 MutexLockerEx ml(_overflow_stack->par_lock(),
4006 Mutex::_no_safepoint_check_flag); 4035 Mutex::_no_safepoint_check_flag);
4007 // Remember the least grey address discarded 4036 // Remember the least grey address discarded
4008 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); 4037 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4009 _collector->lower_restart_addr(ra); 4038 _collector->lower_restart_addr(ra);
6552 // synchronized (via CAS). 6581 // synchronized (via CAS).
6553 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) { 6582 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6554 if (obj != NULL) { 6583 if (obj != NULL) {
6555 // Ignore mark word because this could be an already marked oop 6584 // Ignore mark word because this could be an already marked oop
6556 // that may be chained at the end of the overflow list. 6585 // that may be chained at the end of the overflow list.
6557 assert(obj->is_oop(), "expected an oop"); 6586 assert(obj->is_oop(true), "expected an oop");
6558 HeapWord* addr = (HeapWord*)obj; 6587 HeapWord* addr = (HeapWord*)obj;
6559 if (_span.contains(addr) && 6588 if (_span.contains(addr) &&
6560 !_bit_map->isMarked(addr)) { 6589 !_bit_map->isMarked(addr)) {
6561 // mark bit map (object will become grey): 6590 // mark bit map (object will become grey):
6562 // It is possible for several threads to be 6591 // It is possible for several threads to be
7287 _global_finger_addr(global_finger_addr), 7316 _global_finger_addr(global_finger_addr),
7288 _parent(parent), 7317 _parent(parent),
7289 _should_remember_klasses(collector->should_unload_classes()) 7318 _should_remember_klasses(collector->should_unload_classes())
7290 { } 7319 { }
7291 7320
7321 // Assumes thread-safe access by callers, who are
7322 // responsible for mutual exclusion.
7292 void CMSCollector::lower_restart_addr(HeapWord* low) { 7323 void CMSCollector::lower_restart_addr(HeapWord* low) {
7293 assert(_span.contains(low), "Out of bounds addr"); 7324 assert(_span.contains(low), "Out of bounds addr");
7294 if (_restart_addr == NULL) { 7325 if (_restart_addr == NULL) {
7295 _restart_addr = low; 7326 _restart_addr = low;
7296 } else { 7327 } else {
7312 // Upon stack overflow, we discard (part of) the stack, 7343 // Upon stack overflow, we discard (part of) the stack,
7313 // remembering the least address amongst those discarded 7344 // remembering the least address amongst those discarded
7314 // in CMSCollector's _restart_address. 7345 // in CMSCollector's _restart_address.
7315 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { 7346 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7316 // We need to do this under a mutex to prevent other 7347 // We need to do this under a mutex to prevent other
7317 // workers from interfering with the expansion below. 7348 // workers from interfering with the work done below.
7318 MutexLockerEx ml(_overflow_stack->par_lock(), 7349 MutexLockerEx ml(_overflow_stack->par_lock(),
7319 Mutex::_no_safepoint_check_flag); 7350 Mutex::_no_safepoint_check_flag);
7320 // Remember the least grey address discarded 7351 // Remember the least grey address discarded
7321 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); 7352 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7322 _collector->lower_restart_addr(ra); 7353 _collector->lower_restart_addr(ra);