comparison src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 5988:2a0172480595

7127697: G1: remove dead code after recent concurrent mark changes Summary: Removed lots of dead code after some recent conc mark changes. Reviewed-by: brutisso, johnc
author tonyp
date Thu, 05 Apr 2012 13:57:23 -0400
parents eff609af17d7
children 5c86f8211d1e
comparison
equal deleted inserted replaced
5987:748051fd24ce 5988:2a0172480595
101 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 101 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
102 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 102 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
103 return (int) (diff >> _shifter); 103 return (int) (diff >> _shifter);
104 } 104 }
105 105
106 void CMBitMapRO::mostly_disjoint_range_union(BitMap* from_bitmap,
107 size_t from_start_index,
108 HeapWord* to_start_word,
109 size_t word_num) {
110 _bm.mostly_disjoint_range_union(from_bitmap,
111 from_start_index,
112 heapWordToOffset(to_start_word),
113 word_num);
114 }
115
116 #ifndef PRODUCT 106 #ifndef PRODUCT
117 bool CMBitMapRO::covers(ReservedSpace rs) const { 107 bool CMBitMapRO::covers(ReservedSpace rs) const {
118 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 108 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
119 assert(((size_t)_bm.size() * (size_t)(1 << _shifter)) == _bmWordSize, 109 assert(((size_t)_bm.size() * (size_t)(1 << _shifter)) == _bmWordSize,
120 "size inconsistency"); 110 "size inconsistency");
267 } 257 }
268 _index = new_ind; 258 _index = new_ind;
269 *n = k; 259 *n = k;
270 return true; 260 return true;
271 } 261 }
272 }
273
274 CMRegionStack::CMRegionStack() : _base(NULL) {}
275
276 void CMRegionStack::allocate(size_t size) {
277 _base = NEW_C_HEAP_ARRAY(MemRegion, size);
278 if (_base == NULL) {
279 vm_exit_during_initialization("Failed to allocate CM region mark stack");
280 }
281 _index = 0;
282 _capacity = (jint) size;
283 }
284
285 CMRegionStack::~CMRegionStack() {
286 if (_base != NULL) {
287 FREE_C_HEAP_ARRAY(oop, _base);
288 }
289 }
290
291 void CMRegionStack::push_lock_free(MemRegion mr) {
292 guarantee(false, "push_lock_free(): don't call this any more");
293
294 assert(mr.word_size() > 0, "Precondition");
295 while (true) {
296 jint index = _index;
297
298 if (index >= _capacity) {
299 _overflow = true;
300 return;
301 }
302 // Otherwise...
303 jint next_index = index+1;
304 jint res = Atomic::cmpxchg(next_index, &_index, index);
305 if (res == index) {
306 _base[index] = mr;
307 return;
308 }
309 // Otherwise, we need to try again.
310 }
311 }
312
313 // Lock-free pop of the region stack. Called during the concurrent
314 // marking / remark phases. Should only be called in tandem with
315 // other lock-free pops.
316 MemRegion CMRegionStack::pop_lock_free() {
317 guarantee(false, "pop_lock_free(): don't call this any more");
318
319 while (true) {
320 jint index = _index;
321
322 if (index == 0) {
323 return MemRegion();
324 }
325 // Otherwise...
326 jint next_index = index-1;
327 jint res = Atomic::cmpxchg(next_index, &_index, index);
328 if (res == index) {
329 MemRegion mr = _base[next_index];
330 if (mr.start() != NULL) {
331 assert(mr.end() != NULL, "invariant");
332 assert(mr.word_size() > 0, "invariant");
333 return mr;
334 } else {
335 // that entry was invalidated... let's skip it
336 assert(mr.end() == NULL, "invariant");
337 }
338 }
339 // Otherwise, we need to try again.
340 }
341 }
342
343 #if 0
344 // The routines that manipulate the region stack with a lock are
345 // not currently used. They should be retained, however, as a
346 // diagnostic aid.
347
348 void CMRegionStack::push_with_lock(MemRegion mr) {
349 assert(mr.word_size() > 0, "Precondition");
350 MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag);
351
352 if (isFull()) {
353 _overflow = true;
354 return;
355 }
356
357 _base[_index] = mr;
358 _index += 1;
359 }
360
361 MemRegion CMRegionStack::pop_with_lock() {
362 MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag);
363
364 while (true) {
365 if (_index == 0) {
366 return MemRegion();
367 }
368 _index -= 1;
369
370 MemRegion mr = _base[_index];
371 if (mr.start() != NULL) {
372 assert(mr.end() != NULL, "invariant");
373 assert(mr.word_size() > 0, "invariant");
374 return mr;
375 } else {
376 // that entry was invalidated... let's skip it
377 assert(mr.end() == NULL, "invariant");
378 }
379 }
380 }
381 #endif
382
383 bool CMRegionStack::invalidate_entries_into_cset() {
384 guarantee(false, "invalidate_entries_into_cset(): don't call this any more");
385
386 bool result = false;
387 G1CollectedHeap* g1h = G1CollectedHeap::heap();
388 for (int i = 0; i < _oops_do_bound; ++i) {
389 MemRegion mr = _base[i];
390 if (mr.start() != NULL) {
391 assert(mr.end() != NULL, "invariant");
392 assert(mr.word_size() > 0, "invariant");
393 HeapRegion* hr = g1h->heap_region_containing(mr.start());
394 assert(hr != NULL, "invariant");
395 if (hr->in_collection_set()) {
396 // The region points into the collection set
397 _base[i] = MemRegion();
398 result = true;
399 }
400 } else {
401 // that entry was invalidated... let's skip it
402 assert(mr.end() == NULL, "invariant");
403 }
404 }
405 return result;
406 } 262 }
407 263
408 template<class OopClosureClass> 264 template<class OopClosureClass>
409 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 265 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
410 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 266 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
563 CardTableModRefBS::card_shift, 419 CardTableModRefBS::card_shift,
564 false /* in_resource_area*/), 420 false /* in_resource_area*/),
565 421
566 _prevMarkBitMap(&_markBitMap1), 422 _prevMarkBitMap(&_markBitMap1),
567 _nextMarkBitMap(&_markBitMap2), 423 _nextMarkBitMap(&_markBitMap2),
568 _at_least_one_mark_complete(false),
569 424
570 _markStack(this), 425 _markStack(this),
571 _regionStack(),
572 // _finger set in set_non_marking_state 426 // _finger set in set_non_marking_state
573 427
574 _max_task_num(MAX2((uint)ParallelGCThreads, 1U)), 428 _max_task_num(MAX2((uint)ParallelGCThreads, 1U)),
575 // _active_tasks set in set_non_marking_state 429 // _active_tasks set in set_non_marking_state
576 // _tasks set inside the constructor 430 // _tasks set inside the constructor
580 _has_overflown(false), 434 _has_overflown(false),
581 _concurrent(false), 435 _concurrent(false),
582 _has_aborted(false), 436 _has_aborted(false),
583 _restart_for_overflow(false), 437 _restart_for_overflow(false),
584 _concurrent_marking_in_progress(false), 438 _concurrent_marking_in_progress(false),
585 _should_gray_objects(false),
586 439
587 // _verbose_level set below 440 // _verbose_level set below
588 441
589 _init_times(), 442 _init_times(),
590 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 443 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
609 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 462 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
610 "heap end = "PTR_FORMAT, _heap_start, _heap_end); 463 "heap end = "PTR_FORMAT, _heap_start, _heap_end);
611 } 464 }
612 465
613 _markStack.allocate(MarkStackSize); 466 _markStack.allocate(MarkStackSize);
614 _regionStack.allocate(G1MarkRegionStackSize);
615 467
616 // Create & start a ConcurrentMark thread. 468 // Create & start a ConcurrentMark thread.
617 _cmThread = new ConcurrentMarkThread(this); 469 _cmThread = new ConcurrentMarkThread(this);
618 assert(cmThread() != NULL, "CM Thread should have been created"); 470 assert(cmThread() != NULL, "CM Thread should have been created");
619 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 471 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
742 set_non_marking_state(); 594 set_non_marking_state();
743 } 595 }
744 596
745 void ConcurrentMark::update_g1_committed(bool force) { 597 void ConcurrentMark::update_g1_committed(bool force) {
746 // If concurrent marking is not in progress, then we do not need to 598 // If concurrent marking is not in progress, then we do not need to
747 // update _heap_end. This has a subtle and important 599 // update _heap_end.
748 // side-effect. Imagine that two evacuation pauses happen between
749 // marking completion and remark. The first one can grow the
750 // heap (hence now the finger is below the heap end). Then, the
751 // second one could unnecessarily push regions on the region
752 // stack. This causes the invariant that the region stack is empty
753 // at the beginning of remark to be false. By ensuring that we do
754 // not observe heap expansions after marking is complete, then we do
755 // not have this problem.
756 if (!concurrent_marking_in_progress() && !force) return; 600 if (!concurrent_marking_in_progress() && !force) return;
757 601
758 MemRegion committed = _g1h->g1_committed(); 602 MemRegion committed = _g1h->g1_committed();
759 assert(committed.start() == _heap_start, "start shouldn't change"); 603 assert(committed.start() == _heap_start, "start shouldn't change");
760 HeapWord* new_end = committed.end(); 604 HeapWord* new_end = committed.end();
1055 } else { 899 } else {
1056 return false; 900 return false;
1057 } 901 }
1058 } 902 }
1059 #endif // !PRODUCT 903 #endif // !PRODUCT
1060
1061 void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) {
1062 guarantee(false, "grayRegionIfNecessary(): don't call this any more");
1063
1064 // The objects on the region have already been marked "in bulk" by
1065 // the caller. We only need to decide whether to push the region on
1066 // the region stack or not.
1067
1068 if (!concurrent_marking_in_progress() || !_should_gray_objects) {
1069 // We're done with marking and waiting for remark. We do not need to
1070 // push anything else on the region stack.
1071 return;
1072 }
1073
1074 HeapWord* finger = _finger;
1075
1076 if (verbose_low()) {
1077 gclog_or_tty->print_cr("[global] attempting to push "
1078 "region ["PTR_FORMAT", "PTR_FORMAT"), finger is at "
1079 PTR_FORMAT, mr.start(), mr.end(), finger);
1080 }
1081
1082 if (mr.start() < finger) {
1083 // The finger is always heap region aligned and it is not possible
1084 // for mr to span heap regions.
1085 assert(mr.end() <= finger, "invariant");
1086
1087 // Separated the asserts so that we know which one fires.
1088 assert(mr.start() <= mr.end(),
1089 "region boundaries should fall within the committed space");
1090 assert(_heap_start <= mr.start(),
1091 "region boundaries should fall within the committed space");
1092 assert(mr.end() <= _heap_end,
1093 "region boundaries should fall within the committed space");
1094 if (verbose_low()) {
1095 gclog_or_tty->print_cr("[global] region ["PTR_FORMAT", "PTR_FORMAT") "
1096 "below the finger, pushing it",
1097 mr.start(), mr.end());
1098 }
1099
1100 if (!region_stack_push_lock_free(mr)) {
1101 if (verbose_low()) {
1102 gclog_or_tty->print_cr("[global] region stack has overflown.");
1103 }
1104 }
1105 }
1106 }
1107
1108 void ConcurrentMark::markAndGrayObjectIfNecessary(oop p) {
1109 guarantee(false, "markAndGrayObjectIfNecessary(): don't call this any more");
1110
1111 // The object is not marked by the caller. We need to at least mark
1112 // it and maybe push in on the stack.
1113
1114 HeapWord* addr = (HeapWord*)p;
1115 if (!_nextMarkBitMap->isMarked(addr)) {
1116 // We definitely need to mark it, irrespective whether we bail out
1117 // because we're done with marking.
1118 if (_nextMarkBitMap->parMark(addr)) {
1119 if (!concurrent_marking_in_progress() || !_should_gray_objects) {
1120 // If we're done with concurrent marking and we're waiting for
1121 // remark, then we're not pushing anything on the stack.
1122 return;
1123 }
1124
1125 // No OrderAccess:store_load() is needed. It is implicit in the
1126 // CAS done in parMark(addr) above
1127 HeapWord* finger = _finger;
1128
1129 if (addr < finger) {
1130 if (!mark_stack_push(oop(addr))) {
1131 if (verbose_low()) {
1132 gclog_or_tty->print_cr("[global] global stack overflow "
1133 "during parMark");
1134 }
1135 }
1136 }
1137 }
1138 }
1139 }
1140 904
1141 class CMConcurrentMarkingTask: public AbstractGangTask { 905 class CMConcurrentMarkingTask: public AbstractGangTask {
1142 private: 906 private:
1143 ConcurrentMark* _cm; 907 ConcurrentMark* _cm;
1144 ConcurrentMarkThread* _cmt; 908 ConcurrentMarkThread* _cmt;
2253 size_t known_garbage_bytes = 2017 size_t known_garbage_bytes =
2254 g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes(); 2018 g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes();
2255 g1p->set_known_garbage_bytes(known_garbage_bytes); 2019 g1p->set_known_garbage_bytes(known_garbage_bytes);
2256 2020
2257 size_t start_used_bytes = g1h->used(); 2021 size_t start_used_bytes = g1h->used();
2258 _at_least_one_mark_complete = true;
2259 g1h->set_marking_complete(); 2022 g1h->set_marking_complete();
2260 2023
2261 ergo_verbose4(ErgoConcCycles, 2024 ergo_verbose4(ErgoConcCycles,
2262 "finish cleanup", 2025 "finish cleanup",
2263 ergo_format_byte("occupancy") 2026 ergo_format_byte("occupancy")
3063 gclog_or_tty->print_cr(" done"); 2826 gclog_or_tty->print_cr(" done");
3064 gclog_or_tty->flush(); 2827 gclog_or_tty->flush();
3065 } 2828 }
3066 2829
3067 #endif // PRODUCT 2830 #endif // PRODUCT
3068
3069 // This note is for drainAllSATBBuffers and the code in between.
3070 // In the future we could reuse a task to do this work during an
3071 // evacuation pause (since now tasks are not active and can be claimed
3072 // during an evacuation pause). This was a late change to the code and
3073 // is currently not being taken advantage of.
3074
3075 void ConcurrentMark::deal_with_reference(oop obj) {
3076 if (verbose_high()) {
3077 gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT,
3078 (void*) obj);
3079 }
3080
3081 HeapWord* objAddr = (HeapWord*) obj;
3082 assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
3083 if (_g1h->is_in_g1_reserved(objAddr)) {
3084 assert(obj != NULL, "null check is implicit");
3085 if (!_nextMarkBitMap->isMarked(objAddr)) {
3086 // Only get the containing region if the object is not marked on the
3087 // bitmap (otherwise, it's a waste of time since we won't do
3088 // anything with it).
3089 HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
3090 if (!hr->obj_allocated_since_next_marking(obj)) {
3091 if (verbose_high()) {
3092 gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered "
3093 "marked", (void*) obj);
3094 }
3095
3096 // we need to mark it first
3097 if (_nextMarkBitMap->parMark(objAddr)) {
3098 // No OrderAccess:store_load() is needed. It is implicit in the
3099 // CAS done in parMark(objAddr) above
3100 HeapWord* finger = _finger;
3101 if (objAddr < finger) {
3102 if (verbose_high()) {
3103 gclog_or_tty->print_cr("[global] below the global finger "
3104 "("PTR_FORMAT"), pushing it", finger);
3105 }
3106 if (!mark_stack_push(obj)) {
3107 if (verbose_low()) {
3108 gclog_or_tty->print_cr("[global] global stack overflow during "
3109 "deal_with_reference");
3110 }
3111 }
3112 }
3113 }
3114 }
3115 }
3116 }
3117 }
3118
3119 class CMGlobalObjectClosure : public ObjectClosure {
3120 private:
3121 ConcurrentMark* _cm;
3122
3123 public:
3124 void do_object(oop obj) {
3125 _cm->deal_with_reference(obj);
3126 }
3127
3128 CMGlobalObjectClosure(ConcurrentMark* cm) : _cm(cm) { }
3129 };
3130
3131 void ConcurrentMark::drainAllSATBBuffers() {
3132 guarantee(false, "drainAllSATBBuffers(): don't call this any more");
3133
3134 CMGlobalObjectClosure oc(this);
3135 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3136 satb_mq_set.set_closure(&oc);
3137
3138 while (satb_mq_set.apply_closure_to_completed_buffer()) {
3139 if (verbose_medium()) {
3140 gclog_or_tty->print_cr("[global] processed an SATB buffer");
3141 }
3142 }
3143
3144 // no need to check whether we should do this, as this is only
3145 // called during an evacuation pause
3146 satb_mq_set.iterate_closure_all_threads();
3147
3148 satb_mq_set.set_closure(NULL);
3149 assert(satb_mq_set.completed_buffers_num() == 0, "invariant");
3150 }
3151 2831
3152 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2832 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
3153 // Note we are overriding the read-only view of the prev map here, via 2833 // Note we are overriding the read-only view of the prev map here, via
3154 // the cast. 2834 // the cast.
3155 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2835 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
3255 } 2935 }
3256 2936
3257 return NULL; 2937 return NULL;
3258 } 2938 }
3259 2939
3260 bool ConcurrentMark::invalidate_aborted_regions_in_cset() {
3261 guarantee(false, "invalidate_aborted_regions_in_cset(): "
3262 "don't call this any more");
3263
3264 bool result = false;
3265 for (int i = 0; i < (int)_max_task_num; ++i) {
3266 CMTask* the_task = _tasks[i];
3267 MemRegion mr = the_task->aborted_region();
3268 if (mr.start() != NULL) {
3269 assert(mr.end() != NULL, "invariant");
3270 assert(mr.word_size() > 0, "invariant");
3271 HeapRegion* hr = _g1h->heap_region_containing(mr.start());
3272 assert(hr != NULL, "invariant");
3273 if (hr->in_collection_set()) {
3274 // The region points into the collection set
3275 the_task->set_aborted_region(MemRegion());
3276 result = true;
3277 }
3278 }
3279 }
3280 return result;
3281 }
3282
3283 bool ConcurrentMark::has_aborted_regions() {
3284 for (int i = 0; i < (int)_max_task_num; ++i) {
3285 CMTask* the_task = _tasks[i];
3286 MemRegion mr = the_task->aborted_region();
3287 if (mr.start() != NULL) {
3288 assert(mr.end() != NULL, "invariant");
3289 assert(mr.word_size() > 0, "invariant");
3290 return true;
3291 }
3292 }
3293 return false;
3294 }
3295
3296 void ConcurrentMark::oops_do(OopClosure* cl) {
3297 if (_markStack.size() > 0 && verbose_low()) {
3298 gclog_or_tty->print_cr("[global] scanning the global marking stack, "
3299 "size = %d", _markStack.size());
3300 }
3301 // we first iterate over the contents of the mark stack...
3302 _markStack.oops_do(cl);
3303
3304 for (int i = 0; i < (int)_max_task_num; ++i) {
3305 OopTaskQueue* queue = _task_queues->queue((int)i);
3306
3307 if (queue->size() > 0 && verbose_low()) {
3308 gclog_or_tty->print_cr("[global] scanning task queue of task %d, "
3309 "size = %d", i, queue->size());
3310 }
3311
3312 // ...then over the contents of the all the task queues.
3313 queue->oops_do(cl);
3314 }
3315 }
3316
3317 #ifndef PRODUCT 2940 #ifndef PRODUCT
3318 enum VerifyNoCSetOopsPhase { 2941 enum VerifyNoCSetOopsPhase {
3319 VerifyNoCSetOopsStack, 2942 VerifyNoCSetOopsStack,
3320 VerifyNoCSetOopsQueues, 2943 VerifyNoCSetOopsQueues,
3321 VerifyNoCSetOopsSATBCompleted, 2944 VerifyNoCSetOopsSATBCompleted,
3443 #endif // PRODUCT 3066 #endif // PRODUCT
3444 3067
3445 void ConcurrentMark::clear_marking_state(bool clear_overflow) { 3068 void ConcurrentMark::clear_marking_state(bool clear_overflow) {
3446 _markStack.setEmpty(); 3069 _markStack.setEmpty();
3447 _markStack.clear_overflow(); 3070 _markStack.clear_overflow();
3448 _regionStack.setEmpty();
3449 _regionStack.clear_overflow();
3450 if (clear_overflow) { 3071 if (clear_overflow) {
3451 clear_has_overflown(); 3072 clear_has_overflown();
3452 } else { 3073 } else {
3453 assert(has_overflown(), "pre-condition"); 3074 assert(has_overflown(), "pre-condition");
3454 } 3075 }
3455 _finger = _heap_start; 3076 _finger = _heap_start;
3456 3077
3457 for (int i = 0; i < (int)_max_task_num; ++i) { 3078 for (int i = 0; i < (int)_max_task_num; ++i) {
3458 OopTaskQueue* queue = _task_queues->queue(i); 3079 OopTaskQueue* queue = _task_queues->queue(i);
3459 queue->set_empty(); 3080 queue->set_empty();
3460 // Clear any partial regions from the CMTasks
3461 _tasks[i]->clear_aborted_region();
3462 } 3081 }
3463 } 3082 }
3464 3083
3465 // Aggregate the counting data that was constructed concurrently 3084 // Aggregate the counting data that was constructed concurrently
3466 // with marking. 3085 // with marking.
3652 if (verbose_stats()) { 3271 if (verbose_stats()) {
3653 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3272 gclog_or_tty->print_cr("---------------------------------------------------------------------");
3654 for (size_t i = 0; i < _active_tasks; ++i) { 3273 for (size_t i = 0; i < _active_tasks; ++i) {
3655 _tasks[i]->print_stats(); 3274 _tasks[i]->print_stats();
3656 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3275 gclog_or_tty->print_cr("---------------------------------------------------------------------");
3657 }
3658 }
3659 }
3660
3661 // Closures used by ConcurrentMark::complete_marking_in_collection_set().
3662
3663 class CSetMarkOopClosure: public OopClosure {
3664 friend class CSetMarkBitMapClosure;
3665
3666 G1CollectedHeap* _g1h;
3667 CMBitMap* _bm;
3668 ConcurrentMark* _cm;
3669 oop* _ms;
3670 jint* _array_ind_stack;
3671 int _ms_size;
3672 int _ms_ind;
3673 int _array_increment;
3674 uint _worker_id;
3675
3676 bool push(oop obj, int arr_ind = 0) {
3677 if (_ms_ind == _ms_size) {
3678 gclog_or_tty->print_cr("Mark stack is full.");
3679 return false;
3680 }
3681 _ms[_ms_ind] = obj;
3682 if (obj->is_objArray()) {
3683 _array_ind_stack[_ms_ind] = arr_ind;
3684 }
3685 _ms_ind++;
3686 return true;
3687 }
3688
3689 oop pop() {
3690 if (_ms_ind == 0) {
3691 return NULL;
3692 } else {
3693 _ms_ind--;
3694 return _ms[_ms_ind];
3695 }
3696 }
3697
3698 template <class T> bool drain() {
3699 while (_ms_ind > 0) {
3700 oop obj = pop();
3701 assert(obj != NULL, "Since index was non-zero.");
3702 if (obj->is_objArray()) {
3703 jint arr_ind = _array_ind_stack[_ms_ind];
3704 objArrayOop aobj = objArrayOop(obj);
3705 jint len = aobj->length();
3706 jint next_arr_ind = arr_ind + _array_increment;
3707 if (next_arr_ind < len) {
3708 push(obj, next_arr_ind);
3709 }
3710 // Now process this portion of this one.
3711 int lim = MIN2(next_arr_ind, len);
3712 for (int j = arr_ind; j < lim; j++) {
3713 do_oop(aobj->objArrayOopDesc::obj_at_addr<T>(j));
3714 }
3715 } else {
3716 obj->oop_iterate(this);
3717 }
3718 if (abort()) return false;
3719 }
3720 return true;
3721 }
3722
3723 public:
3724 CSetMarkOopClosure(ConcurrentMark* cm, int ms_size, uint worker_id) :
3725 _g1h(G1CollectedHeap::heap()),
3726 _cm(cm),
3727 _bm(cm->nextMarkBitMap()),
3728 _ms_size(ms_size), _ms_ind(0),
3729 _ms(NEW_C_HEAP_ARRAY(oop, ms_size)),
3730 _array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)),
3731 _array_increment(MAX2(ms_size/8, 16)),
3732 _worker_id(worker_id) { }
3733
3734 ~CSetMarkOopClosure() {
3735 FREE_C_HEAP_ARRAY(oop, _ms);
3736 FREE_C_HEAP_ARRAY(jint, _array_ind_stack);
3737 }
3738
3739 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
3740 virtual void do_oop( oop* p) { do_oop_work(p); }
3741
3742 template <class T> void do_oop_work(T* p) {
3743 T heap_oop = oopDesc::load_heap_oop(p);
3744 if (oopDesc::is_null(heap_oop)) return;
3745 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3746 if (obj->is_forwarded()) {
3747 // If the object has already been forwarded, we have to make sure
3748 // that it's marked. So follow the forwarding pointer. Note that
3749 // this does the right thing for self-forwarding pointers in the
3750 // evacuation failure case.
3751 obj = obj->forwardee();
3752 }
3753 HeapRegion* hr = _g1h->heap_region_containing(obj);
3754 if (hr != NULL) {
3755 if (hr->in_collection_set()) {
3756 if (_g1h->is_obj_ill(obj)) {
3757 if (_bm->parMark((HeapWord*)obj)) {
3758 if (!push(obj)) {
3759 gclog_or_tty->print_cr("Setting abort in CSetMarkOopClosure because push failed.");
3760 set_abort();
3761 }
3762 }
3763 }
3764 } else {
3765 // Outside the collection set; we need to gray it
3766 _cm->deal_with_reference(obj);
3767 }
3768 }
3769 }
3770 };
3771
3772 class CSetMarkBitMapClosure: public BitMapClosure {
3773 G1CollectedHeap* _g1h;
3774 CMBitMap* _bitMap;
3775 ConcurrentMark* _cm;
3776 CSetMarkOopClosure _oop_cl;
3777 uint _worker_id;
3778
3779 public:
3780 CSetMarkBitMapClosure(ConcurrentMark* cm, int ms_size, int worker_id) :
3781 _g1h(G1CollectedHeap::heap()),
3782 _bitMap(cm->nextMarkBitMap()),
3783 _oop_cl(cm, ms_size, worker_id),
3784 _worker_id(worker_id) { }
3785
3786 bool do_bit(size_t offset) {
3787 // convert offset into a HeapWord*
3788 HeapWord* addr = _bitMap->offsetToHeapWord(offset);
3789 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
3790 "address out of range");
3791 assert(_bitMap->isMarked(addr), "tautology");
3792 oop obj = oop(addr);
3793 if (!obj->is_forwarded()) {
3794 if (!_oop_cl.push(obj)) return false;
3795 if (UseCompressedOops) {
3796 if (!_oop_cl.drain<narrowOop>()) return false;
3797 } else {
3798 if (!_oop_cl.drain<oop>()) return false;
3799 }
3800 }
3801 // Otherwise...
3802 return true;
3803 }
3804 };
3805
3806 class CompleteMarkingInCSetHRClosure: public HeapRegionClosure {
3807 CMBitMap* _bm;
3808 CSetMarkBitMapClosure _bit_cl;
3809 uint _worker_id;
3810
3811 enum SomePrivateConstants {
3812 MSSize = 1000
3813 };
3814
3815 public:
3816 CompleteMarkingInCSetHRClosure(ConcurrentMark* cm, int worker_id) :
3817 _bm(cm->nextMarkBitMap()),
3818 _bit_cl(cm, MSSize, worker_id),
3819 _worker_id(worker_id) { }
3820
3821 bool doHeapRegion(HeapRegion* hr) {
3822 if (hr->claimHeapRegion(HeapRegion::CompleteMarkCSetClaimValue)) {
3823 // The current worker has successfully claimed the region.
3824 if (!hr->evacuation_failed()) {
3825 MemRegion mr = MemRegion(hr->bottom(), hr->next_top_at_mark_start());
3826 if (!mr.is_empty()) {
3827 bool done = false;
3828 while (!done) {
3829 done = _bm->iterate(&_bit_cl, mr);
3830 }
3831 }
3832 }
3833 }
3834 return false;
3835 }
3836 };
3837
3838 class G1ParCompleteMarkInCSetTask: public AbstractGangTask {
3839 protected:
3840 G1CollectedHeap* _g1h;
3841 ConcurrentMark* _cm;
3842
3843 public:
3844 G1ParCompleteMarkInCSetTask(G1CollectedHeap* g1h,
3845 ConcurrentMark* cm) :
3846 AbstractGangTask("Complete Mark in CSet"),
3847 _g1h(g1h), _cm(cm) { }
3848
3849 void work(uint worker_id) {
3850 CompleteMarkingInCSetHRClosure cmplt(_cm, worker_id);
3851 HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
3852 _g1h->collection_set_iterate_from(hr, &cmplt);
3853 }
3854 };
3855
3856 void ConcurrentMark::complete_marking_in_collection_set() {
3857 guarantee(false, "complete_marking_in_collection_set(): "
3858 "don't call this any more");
3859
3860 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3861
3862 if (!g1h->mark_in_progress()) {
3863 g1h->g1_policy()->record_mark_closure_time(0.0);
3864 return;
3865 }
3866
3867 double start = os::elapsedTime();
3868 G1ParCompleteMarkInCSetTask complete_mark_task(g1h, this);
3869
3870 assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
3871
3872 if (G1CollectedHeap::use_parallel_gc_threads()) {
3873 int n_workers = g1h->workers()->active_workers();
3874 g1h->set_par_threads(n_workers);
3875 g1h->workers()->run_task(&complete_mark_task);
3876 g1h->set_par_threads(0);
3877 } else {
3878 complete_mark_task.work(0);
3879 }
3880
3881 assert(g1h->check_cset_heap_region_claim_values(HeapRegion::CompleteMarkCSetClaimValue), "sanity");
3882
3883 // Reset the claim values in the regions in the collection set.
3884 g1h->reset_cset_heap_region_claim_values();
3885
3886 assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
3887
3888 double end_time = os::elapsedTime();
3889 double elapsed_time_ms = (end_time - start) * 1000.0;
3890 g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms);
3891 }
3892
3893 // The next two methods deal with the following optimisation. Some
3894 // objects are gray by being marked and located above the finger. If
3895 // they are copied, during an evacuation pause, below the finger then
3896 // the need to be pushed on the stack. The observation is that, if
3897 // there are no regions in the collection set located above the
3898 // finger, then the above cannot happen, hence we do not need to
3899 // explicitly gray any objects when copying them to below the
3900 // finger. The global stack will be scanned to ensure that, if it
3901 // points to objects being copied, it will update their
3902 // location. There is a tricky situation with the gray objects in
3903 // region stack that are being coped, however. See the comment in
3904 // newCSet().
3905
3906 void ConcurrentMark::newCSet() {
3907 guarantee(false, "newCSet(): don't call this any more");
3908
3909 if (!concurrent_marking_in_progress()) {
3910 // nothing to do if marking is not in progress
3911 return;
3912 }
3913
3914 // find what the lowest finger is among the global and local fingers
3915 _min_finger = _finger;
3916 for (int i = 0; i < (int)_max_task_num; ++i) {
3917 CMTask* task = _tasks[i];
3918 HeapWord* task_finger = task->finger();
3919 if (task_finger != NULL && task_finger < _min_finger) {
3920 _min_finger = task_finger;
3921 }
3922 }
3923
3924 _should_gray_objects = false;
3925
3926 // This fixes a very subtle and fustrating bug. It might be the case
3927 // that, during en evacuation pause, heap regions that contain
3928 // objects that are gray (by being in regions contained in the
3929 // region stack) are included in the collection set. Since such gray
3930 // objects will be moved, and because it's not easy to redirect
3931 // region stack entries to point to a new location (because objects
3932 // in one region might be scattered to multiple regions after they
3933 // are copied), one option is to ensure that all marked objects
3934 // copied during a pause are pushed on the stack. Notice, however,
3935 // that this problem can only happen when the region stack is not
3936 // empty during an evacuation pause. So, we make the fix a bit less
3937 // conservative and ensure that regions are pushed on the stack,
3938 // irrespective whether all collection set regions are below the
3939 // finger, if the region stack is not empty. This is expected to be
3940 // a rare case, so I don't think it's necessary to be smarted about it.
3941 if (!region_stack_empty() || has_aborted_regions()) {
3942 _should_gray_objects = true;
3943 }
3944 }
3945
3946 void ConcurrentMark::registerCSetRegion(HeapRegion* hr) {
3947 guarantee(false, "registerCSetRegion(): don't call this any more");
3948
3949 if (!concurrent_marking_in_progress()) return;
3950
3951 HeapWord* region_end = hr->end();
3952 if (region_end > _min_finger) {
3953 _should_gray_objects = true;
3954 }
3955 }
3956
3957 // Resets the region fields of active CMTasks whose values point
3958 // into the collection set.
3959 void ConcurrentMark::reset_active_task_region_fields_in_cset() {
3960 guarantee(false, "reset_active_task_region_fields_in_cset(): "
3961 "don't call this any more");
3962
3963 assert(SafepointSynchronize::is_at_safepoint(), "should be in STW");
3964 assert(parallel_marking_threads() <= _max_task_num, "sanity");
3965
3966 for (int i = 0; i < (int)parallel_marking_threads(); i += 1) {
3967 CMTask* task = _tasks[i];
3968 HeapWord* task_finger = task->finger();
3969 if (task_finger != NULL) {
3970 assert(_g1h->is_in_g1_reserved(task_finger), "not in heap");
3971 HeapRegion* finger_region = _g1h->heap_region_containing(task_finger);
3972 if (finger_region->in_collection_set()) {
3973 // The task's current region is in the collection set.
3974 // This region will be evacuated in the current GC and
3975 // the region fields in the task will be stale.
3976 task->giveup_current_region();
3977 }
3978 } 3276 }
3979 } 3277 }
3980 } 3278 }
3981 3279
3982 // abandon current marking iteration due to a Full GC 3280 // abandon current marking iteration due to a Full GC
4110 private: 3408 private:
4111 // the bitmap that is being iterated over 3409 // the bitmap that is being iterated over
4112 CMBitMap* _nextMarkBitMap; 3410 CMBitMap* _nextMarkBitMap;
4113 ConcurrentMark* _cm; 3411 ConcurrentMark* _cm;
4114 CMTask* _task; 3412 CMTask* _task;
4115 // true if we're scanning a heap region claimed by the task (so that
4116 // we move the finger along), false if we're not, i.e. currently when
4117 // scanning a heap region popped from the region stack (so that we
4118 // do not move the task finger along; it'd be a mistake if we did so).
4119 bool _scanning_heap_region;
4120 3413
4121 public: 3414 public:
4122 CMBitMapClosure(CMTask *task, 3415 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
4123 ConcurrentMark* cm, 3416 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
4124 CMBitMap* nextMarkBitMap)
4125 : _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
4126
4127 void set_scanning_heap_region(bool scanning_heap_region) {
4128 _scanning_heap_region = scanning_heap_region;
4129 }
4130 3417
4131 bool do_bit(size_t offset) { 3418 bool do_bit(size_t offset) {
4132 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3419 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
4133 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3420 assert(_nextMarkBitMap->isMarked(addr), "invariant");
4134 assert( addr < _cm->finger(), "invariant"); 3421 assert( addr < _cm->finger(), "invariant");
4135 3422
4136 if (_scanning_heap_region) { 3423 statsOnly( _task->increase_objs_found_on_bitmap() );
4137 statsOnly( _task->increase_objs_found_on_bitmap() ); 3424 assert(addr >= _task->finger(), "invariant");
4138 assert(addr >= _task->finger(), "invariant"); 3425
4139 // We move that task's local finger along. 3426 // We move that task's local finger along.
4140 _task->move_finger_to(addr); 3427 _task->move_finger_to(addr);
4141 } else {
4142 // We move the task's region finger along.
4143 _task->move_region_finger_to(addr);
4144 }
4145 3428
4146 _task->scan_object(oop(addr)); 3429 _task->scan_object(oop(addr));
4147 // we only partially drain the local queue and global stack 3430 // we only partially drain the local queue and global stack
4148 _task->drain_local_queue(true); 3431 _task->drain_local_queue(true);
4149 _task->drain_global_stack(true); 3432 _task->drain_global_stack(true);
4247 // Values for these three fields that indicate that we're not 3530 // Values for these three fields that indicate that we're not
4248 // holding on to a region. 3531 // holding on to a region.
4249 _curr_region = NULL; 3532 _curr_region = NULL;
4250 _finger = NULL; 3533 _finger = NULL;
4251 _region_limit = NULL; 3534 _region_limit = NULL;
4252
4253 _region_finger = NULL;
4254 } 3535 }
4255 3536
4256 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3537 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
4257 if (cm_oop_closure == NULL) { 3538 if (cm_oop_closure == NULL) {
4258 assert(_cm_oop_closure != NULL, "invariant"); 3539 assert(_cm_oop_closure != NULL, "invariant");
4269 gclog_or_tty->print_cr("[%d] resetting", _task_id); 3550 gclog_or_tty->print_cr("[%d] resetting", _task_id);
4270 } 3551 }
4271 3552
4272 _nextMarkBitMap = nextMarkBitMap; 3553 _nextMarkBitMap = nextMarkBitMap;
4273 clear_region_fields(); 3554 clear_region_fields();
4274 assert(_aborted_region.is_empty(), "should have been cleared");
4275 3555
4276 _calls = 0; 3556 _calls = 0;
4277 _elapsed_time_ms = 0.0; 3557 _elapsed_time_ms = 0.0;
4278 _termination_time_ms = 0.0; 3558 _termination_time_ms = 0.0;
4279 _termination_start_time_ms = 0.0; 3559 _termination_start_time_ms = 0.0;
4286 _global_pushes = 0; 3566 _global_pushes = 0;
4287 _global_pops = 0; 3567 _global_pops = 0;
4288 _global_max_size = 0; 3568 _global_max_size = 0;
4289 _global_transfers_to = 0; 3569 _global_transfers_to = 0;
4290 _global_transfers_from = 0; 3570 _global_transfers_from = 0;
4291 _region_stack_pops = 0;
4292 _regions_claimed = 0; 3571 _regions_claimed = 0;
4293 _objs_found_on_bitmap = 0; 3572 _objs_found_on_bitmap = 0;
4294 _satb_buffers_processed = 0; 3573 _satb_buffers_processed = 0;
4295 _steal_attempts = 0; 3574 _steal_attempts = 0;
4296 _steals = 0; 3575 _steals = 0;
4659 } 3938 }
4660 3939
4661 // again, this was a potentially expensive operation, decrease the 3940 // again, this was a potentially expensive operation, decrease the
4662 // limits to get the regular clock call early 3941 // limits to get the regular clock call early
4663 decrease_limits(); 3942 decrease_limits();
4664 }
4665
4666 void CMTask::drain_region_stack(BitMapClosure* bc) {
4667 assert(_cm->region_stack_empty(), "region stack should be empty");
4668 assert(_aborted_region.is_empty(), "aborted region should be empty");
4669 return;
4670
4671 if (has_aborted()) return;
4672
4673 assert(_region_finger == NULL,
4674 "it should be NULL when we're not scanning a region");
4675
4676 if (!_cm->region_stack_empty() || !_aborted_region.is_empty()) {
4677 if (_cm->verbose_low()) {
4678 gclog_or_tty->print_cr("[%d] draining region stack, size = %d",
4679 _task_id, _cm->region_stack_size());
4680 }
4681
4682 MemRegion mr;
4683
4684 if (!_aborted_region.is_empty()) {
4685 mr = _aborted_region;
4686 _aborted_region = MemRegion();
4687
4688 if (_cm->verbose_low()) {
4689 gclog_or_tty->print_cr("[%d] scanning aborted region "
4690 "[ " PTR_FORMAT ", " PTR_FORMAT " )",
4691 _task_id, mr.start(), mr.end());
4692 }
4693 } else {
4694 mr = _cm->region_stack_pop_lock_free();
4695 // it returns MemRegion() if the pop fails
4696 statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
4697 }
4698
4699 while (mr.start() != NULL) {
4700 if (_cm->verbose_medium()) {
4701 gclog_or_tty->print_cr("[%d] we are scanning region "
4702 "["PTR_FORMAT", "PTR_FORMAT")",
4703 _task_id, mr.start(), mr.end());
4704 }
4705
4706 assert(mr.end() <= _cm->finger(),
4707 "otherwise the region shouldn't be on the stack");
4708 assert(!mr.is_empty(), "Only non-empty regions live on the region stack");
4709 if (_nextMarkBitMap->iterate(bc, mr)) {
4710 assert(!has_aborted(),
4711 "cannot abort the task without aborting the bitmap iteration");
4712
4713 // We finished iterating over the region without aborting.
4714 regular_clock_call();
4715 if (has_aborted()) {
4716 mr = MemRegion();
4717 } else {
4718 mr = _cm->region_stack_pop_lock_free();
4719 // it returns MemRegion() if the pop fails
4720 statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
4721 }
4722 } else {
4723 assert(has_aborted(), "currently the only way to do so");
4724
4725 // The only way to abort the bitmap iteration is to return
4726 // false from the do_bit() method. However, inside the
4727 // do_bit() method we move the _region_finger to point to the
4728 // object currently being looked at. So, if we bail out, we
4729 // have definitely set _region_finger to something non-null.
4730 assert(_region_finger != NULL, "invariant");
4731
4732 // Make sure that any previously aborted region has been
4733 // cleared.
4734 assert(_aborted_region.is_empty(), "aborted region not cleared");
4735
4736 // The iteration was actually aborted. So now _region_finger
4737 // points to the address of the object we last scanned. If we
4738 // leave it there, when we restart this task, we will rescan
4739 // the object. It is easy to avoid this. We move the finger by
4740 // enough to point to the next possible object header (the
4741 // bitmap knows by how much we need to move it as it knows its
4742 // granularity).
4743 MemRegion newRegion =
4744 MemRegion(_nextMarkBitMap->nextWord(_region_finger), mr.end());
4745
4746 if (!newRegion.is_empty()) {
4747 if (_cm->verbose_low()) {
4748 gclog_or_tty->print_cr("[%d] recording unscanned region"
4749 "[" PTR_FORMAT "," PTR_FORMAT ") in CMTask",
4750 _task_id,
4751 newRegion.start(), newRegion.end());
4752 }
4753 // Now record the part of the region we didn't scan to
4754 // make sure this task scans it later.
4755 _aborted_region = newRegion;
4756 }
4757 // break from while
4758 mr = MemRegion();
4759 }
4760 _region_finger = NULL;
4761 }
4762
4763 if (_cm->verbose_low()) {
4764 gclog_or_tty->print_cr("[%d] drained region stack, size = %d",
4765 _task_id, _cm->region_stack_size());
4766 }
4767 }
4768 } 3943 }
4769 3944
4770 void CMTask::print_stats() { 3945 void CMTask::print_stats() {
4771 gclog_or_tty->print_cr("Marking Stats, task = %d, calls = %d", 3946 gclog_or_tty->print_cr("Marking Stats, task = %d, calls = %d",
4772 _task_id, _calls); 3947 _task_id, _calls);
4793 _local_pushes, _local_pops, _local_max_size); 3968 _local_pushes, _local_pops, _local_max_size);
4794 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", 3969 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d",
4795 _global_pushes, _global_pops, _global_max_size); 3970 _global_pushes, _global_pops, _global_max_size);
4796 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", 3971 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d",
4797 _global_transfers_to,_global_transfers_from); 3972 _global_transfers_to,_global_transfers_from);
4798 gclog_or_tty->print_cr(" Regions: claimed = %d, Region Stack: pops = %d", 3973 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed);
4799 _regions_claimed, _region_stack_pops);
4800 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); 3974 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed);
4801 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", 3975 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d",
4802 _steal_attempts, _steals); 3976 _steal_attempts, _steals);
4803 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); 3977 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted);
4804 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", 3978 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d",
4853 phase, tasks attempt to keep the global mark stack at a small 4027 phase, tasks attempt to keep the global mark stack at a small
4854 length but not totally empty, so that entries are available for 4028 length but not totally empty, so that entries are available for
4855 popping by other tasks. Only when there is no more work, tasks 4029 popping by other tasks. Only when there is no more work, tasks
4856 will totally drain the global mark stack. 4030 will totally drain the global mark stack.
4857 4031
4858 (4) Global Region Stack. Entries on it correspond to areas of 4032 (4) SATB Buffer Queue. This is where completed SATB buffers are
4859 the bitmap that need to be scanned since they contain gray
4860 objects. Pushes on the region stack only happen during
4861 evacuation pauses and typically correspond to areas covered by
4862 GC LABS. If it overflows, then the marking phase should restart
4863 and iterate over the bitmap to identify gray objects. Tasks will
4864 try to totally drain the region stack as soon as possible.
4865
4866 (5) SATB Buffer Queue. This is where completed SATB buffers are
4867 made available. Buffers are regularly removed from this queue 4033 made available. Buffers are regularly removed from this queue
4868 and scanned for roots, so that the queue doesn't get too 4034 and scanned for roots, so that the queue doesn't get too
4869 long. During remark, all completed buffers are processed, as 4035 long. During remark, all completed buffers are processed, as
4870 well as the filled in parts of any uncompleted buffers. 4036 well as the filled in parts of any uncompleted buffers.
4871 4037
4873 has been reached. There are a few other cases when the 4039 has been reached. There are a few other cases when the
4874 do_marking_step() method also aborts: 4040 do_marking_step() method also aborts:
4875 4041
4876 (1) When the marking phase has been aborted (after a Full GC). 4042 (1) When the marking phase has been aborted (after a Full GC).
4877 4043
4878 (2) When a global overflow (either on the global stack or the 4044 (2) When a global overflow (on the global stack) has been
4879 region stack) has been triggered. Before the task aborts, it 4045 triggered. Before the task aborts, it will actually sync up with
4880 will actually sync up with the other tasks to ensure that all 4046 the other tasks to ensure that all the marking data structures
4881 the marking data structures (local queues, stacks, fingers etc.) 4047 (local queues, stacks, fingers etc.) are re-initialised so that
4882 are re-initialised so that when do_marking_step() completes, 4048 when do_marking_step() completes, the marking phase can
4883 the marking phase can immediately restart. 4049 immediately restart.
4884 4050
4885 (3) When enough completed SATB buffers are available. The 4051 (3) When enough completed SATB buffers are available. The
4886 do_marking_step() method only tries to drain SATB buffers right 4052 do_marking_step() method only tries to drain SATB buffers right
4887 at the beginning. So, if enough buffers are available, the 4053 at the beginning. So, if enough buffers are available, the
4888 marking step aborts and the SATB buffers are processed at 4054 marking step aborts and the SATB buffers are processed at
4921 bool do_stealing, 4087 bool do_stealing,
4922 bool do_termination) { 4088 bool do_termination) {
4923 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 4089 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
4924 assert(concurrent() == _cm->concurrent(), "they should be the same"); 4090 assert(concurrent() == _cm->concurrent(), "they should be the same");
4925 4091
4926 assert(concurrent() || _cm->region_stack_empty(),
4927 "the region stack should have been cleared before remark");
4928 assert(concurrent() || !_cm->has_aborted_regions(),
4929 "aborted regions should have been cleared before remark");
4930 assert(_region_finger == NULL,
4931 "this should be non-null only when a region is being scanned");
4932
4933 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 4092 G1CollectorPolicy* g1_policy = _g1h->g1_policy();
4934 assert(_task_queues != NULL, "invariant"); 4093 assert(_task_queues != NULL, "invariant");
4935 assert(_task_queue != NULL, "invariant"); 4094 assert(_task_queue != NULL, "invariant");
4936 assert(_task_queues->queue(_task_id) == _task_queue, "invariant"); 4095 assert(_task_queues->queue(_task_id) == _task_queue, "invariant");
4937 4096
4976 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 4135 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
4977 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 4136 G1CMOopClosure cm_oop_closure(_g1h, _cm, this);
4978 set_cm_oop_closure(&cm_oop_closure); 4137 set_cm_oop_closure(&cm_oop_closure);
4979 4138
4980 if (_cm->has_overflown()) { 4139 if (_cm->has_overflown()) {
4981 // This can happen if the region stack or the mark stack overflows 4140 // This can happen if the mark stack overflows during a GC pause
4982 // during a GC pause and this task, after a yield point, 4141 // and this task, after a yield point, restarts. We have to abort
4983 // restarts. We have to abort as we need to get into the overflow 4142 // as we need to get into the overflow protocol which happens
4984 // protocol which happens right at the end of this task. 4143 // right at the end of this task.
4985 set_has_aborted(); 4144 set_has_aborted();
4986 } 4145 }
4987 4146
4988 // First drain any available SATB buffers. After this, we will not 4147 // First drain any available SATB buffers. After this, we will not
4989 // look at SATB buffers before the next invocation of this method. 4148 // look at SATB buffers before the next invocation of this method.
4990 // If enough completed SATB buffers are queued up, the regular clock 4149 // If enough completed SATB buffers are queued up, the regular clock
4991 // will abort this task so that it restarts. 4150 // will abort this task so that it restarts.
4992 drain_satb_buffers(); 4151 drain_satb_buffers();
4993 // ...then partially drain the local queue and the global stack
4994 drain_local_queue(true);
4995 drain_global_stack(true);
4996
4997 // Then totally drain the region stack. We will not look at
4998 // it again before the next invocation of this method. Entries on
4999 // the region stack are only added during evacuation pauses, for
5000 // which we have to yield. When we do, we abort the task anyway so
5001 // it will look at the region stack again when it restarts.
5002 bitmap_closure.set_scanning_heap_region(false);
5003 drain_region_stack(&bitmap_closure);
5004 // ...then partially drain the local queue and the global stack 4152 // ...then partially drain the local queue and the global stack
5005 drain_local_queue(true); 4153 drain_local_queue(true);
5006 drain_global_stack(true); 4154 drain_global_stack(true);
5007 4155
5008 do { 4156 do {
5032 _task_id, _finger, _region_limit, _curr_region); 4180 _task_id, _finger, _region_limit, _curr_region);
5033 } 4181 }
5034 4182
5035 // Let's iterate over the bitmap of the part of the 4183 // Let's iterate over the bitmap of the part of the
5036 // region that is left. 4184 // region that is left.
5037 bitmap_closure.set_scanning_heap_region(true); 4185 if (mr.is_empty() || _nextMarkBitMap->iterate(&bitmap_closure, mr)) {
5038 if (mr.is_empty() ||
5039 _nextMarkBitMap->iterate(&bitmap_closure, mr)) {
5040 // We successfully completed iterating over the region. Now, 4186 // We successfully completed iterating over the region. Now,
5041 // let's give up the region. 4187 // let's give up the region.
5042 giveup_current_region(); 4188 giveup_current_region();
5043 regular_clock_call(); 4189 regular_clock_call();
5044 } else { 4190 } else {
5059 // granularity). 4205 // granularity).
5060 assert(_finger < _region_limit, "invariant"); 4206 assert(_finger < _region_limit, "invariant");
5061 HeapWord* new_finger = _nextMarkBitMap->nextWord(_finger); 4207 HeapWord* new_finger = _nextMarkBitMap->nextWord(_finger);
5062 // Check if bitmap iteration was aborted while scanning the last object 4208 // Check if bitmap iteration was aborted while scanning the last object
5063 if (new_finger >= _region_limit) { 4209 if (new_finger >= _region_limit) {
5064 giveup_current_region(); 4210 giveup_current_region();
5065 } else { 4211 } else {
5066 move_finger_to(new_finger); 4212 move_finger_to(new_finger);
5067 } 4213 }
5068 } 4214 }
5069 } 4215 }
5070 // At this point we have either completed iterating over the 4216 // At this point we have either completed iterating over the
5071 // region we were holding on to, or we have aborted. 4217 // region we were holding on to, or we have aborted.
5117 } 4263 }
5118 } while ( _curr_region != NULL && !has_aborted()); 4264 } while ( _curr_region != NULL && !has_aborted());
5119 4265
5120 if (!has_aborted()) { 4266 if (!has_aborted()) {
5121 // We cannot check whether the global stack is empty, since other 4267 // We cannot check whether the global stack is empty, since other
5122 // tasks might be pushing objects to it concurrently. We also cannot 4268 // tasks might be pushing objects to it concurrently.
5123 // check if the region stack is empty because if a thread is aborting
5124 // it can push a partially done region back.
5125 assert(_cm->out_of_regions(), 4269 assert(_cm->out_of_regions(),
5126 "at this point we should be out of regions"); 4270 "at this point we should be out of regions");
5127 4271
5128 if (_cm->verbose_low()) { 4272 if (_cm->verbose_low()) {
5129 gclog_or_tty->print_cr("[%d] all regions claimed", _task_id); 4273 gclog_or_tty->print_cr("[%d] all regions claimed", _task_id);
5143 if (do_stealing && !has_aborted()) { 4287 if (do_stealing && !has_aborted()) {
5144 // We have not aborted. This means that we have finished all that 4288 // We have not aborted. This means that we have finished all that
5145 // we could. Let's try to do some stealing... 4289 // we could. Let's try to do some stealing...
5146 4290
5147 // We cannot check whether the global stack is empty, since other 4291 // We cannot check whether the global stack is empty, since other
5148 // tasks might be pushing objects to it concurrently. We also cannot 4292 // tasks might be pushing objects to it concurrently.
5149 // check if the region stack is empty because if a thread is aborting
5150 // it can push a partially done region back.
5151 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4293 assert(_cm->out_of_regions() && _task_queue->size() == 0,
5152 "only way to reach here"); 4294 "only way to reach here");
5153 4295
5154 if (_cm->verbose_low()) { 4296 if (_cm->verbose_low()) {
5155 gclog_or_tty->print_cr("[%d] starting to steal", _task_id); 4297 gclog_or_tty->print_cr("[%d] starting to steal", _task_id);
5192 4334
5193 // We still haven't aborted. Now, let's try to get into the 4335 // We still haven't aborted. Now, let's try to get into the
5194 // termination protocol. 4336 // termination protocol.
5195 if (do_termination && !has_aborted()) { 4337 if (do_termination && !has_aborted()) {
5196 // We cannot check whether the global stack is empty, since other 4338 // We cannot check whether the global stack is empty, since other
5197 // tasks might be concurrently pushing objects on it. We also cannot 4339 // tasks might be concurrently pushing objects on it.
5198 // check if the region stack is empty because if a thread is aborting
5199 // it can push a partially done region back.
5200 // Separated the asserts so that we know which one fires. 4340 // Separated the asserts so that we know which one fires.
5201 assert(_cm->out_of_regions(), "only way to reach here"); 4341 assert(_cm->out_of_regions(), "only way to reach here");
5202 assert(_task_queue->size() == 0, "only way to reach here"); 4342 assert(_task_queue->size() == 0, "only way to reach here");
5203 4343
5204 if (_cm->verbose_low()) { 4344 if (_cm->verbose_low()) {
5231 // We can now guarantee that the global stack is empty, since 4371 // We can now guarantee that the global stack is empty, since
5232 // all other tasks have finished. We separated the guarantees so 4372 // all other tasks have finished. We separated the guarantees so
5233 // that, if a condition is false, we can immediately find out 4373 // that, if a condition is false, we can immediately find out
5234 // which one. 4374 // which one.
5235 guarantee(_cm->out_of_regions(), "only way to reach here"); 4375 guarantee(_cm->out_of_regions(), "only way to reach here");
5236 guarantee(_aborted_region.is_empty(), "only way to reach here");
5237 guarantee(_cm->region_stack_empty(), "only way to reach here");
5238 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4376 guarantee(_cm->mark_stack_empty(), "only way to reach here");
5239 guarantee(_task_queue->size() == 0, "only way to reach here"); 4377 guarantee(_task_queue->size() == 0, "only way to reach here");
5240 guarantee(!_cm->has_overflown(), "only way to reach here"); 4378 guarantee(!_cm->has_overflown(), "only way to reach here");
5241 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4379 guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
5242 guarantee(!_cm->region_stack_overflow(), "only way to reach here");
5243 4380
5244 if (_cm->verbose_low()) { 4381 if (_cm->verbose_low()) {
5245 gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id); 4382 gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id);
5246 } 4383 }
5247 } else { 4384 } else {
5340 _claimed(false), 4477 _claimed(false),
5341 _nextMarkBitMap(NULL), _hash_seed(17), 4478 _nextMarkBitMap(NULL), _hash_seed(17),
5342 _task_queue(task_queue), 4479 _task_queue(task_queue),
5343 _task_queues(task_queues), 4480 _task_queues(task_queues),
5344 _cm_oop_closure(NULL), 4481 _cm_oop_closure(NULL),
5345 _aborted_region(MemRegion()),
5346 _marked_bytes_array(marked_bytes), 4482 _marked_bytes_array(marked_bytes),
5347 _card_bm(card_bm) { 4483 _card_bm(card_bm) {
5348 guarantee(task_queue != NULL, "invariant"); 4484 guarantee(task_queue != NULL, "invariant");
5349 guarantee(task_queues != NULL, "invariant"); 4485 guarantee(task_queues != NULL, "invariant");
5350 4486