comparison src/share/vm/gc_implementation/g1/concurrentMark.hpp @ 1835:4805b9f4779e

6941395: G1: Use only lock-free versions of region stack push() and pop() Summary: Re-enable use of the lock-free versions of region stack push() and pop() by recording aborted regions in a thread-local structure, which are then processed when scanning of the region stack restarts. The previous locking versions of these routines are retained for diagnostic purposes. Reviewed-by: tonyp, ysr
author johnc
date Tue, 28 Sep 2010 09:51:37 -0700
parents c18cbe5936b8
children f95d63e2154a
comparison
equal deleted inserted replaced
1834:22cace5e30b5 1835:4805b9f4779e
248 ~CMRegionStack(); 248 ~CMRegionStack();
249 void allocate(size_t size); 249 void allocate(size_t size);
250 250
251 // This is lock-free; assumes that it will only be called in parallel 251 // This is lock-free; assumes that it will only be called in parallel
252 // with other "push" operations (no pops). 252 // with other "push" operations (no pops).
253 void push(MemRegion mr); 253 void push_lock_free(MemRegion mr);
254
255 #if 0
256 // This is currently not used. See the comment in the .cpp file.
257 254
258 // Lock-free; assumes that it will only be called in parallel 255 // Lock-free; assumes that it will only be called in parallel
259 // with other "pop" operations (no pushes). 256 // with other "pop" operations (no pushes).
260 MemRegion pop(); 257 MemRegion pop_lock_free();
261 #endif // 0 258
259 #if 0
260 // The routines that manipulate the region stack with a lock are
261 // not currently used. They should be retained, however, as a
262 // diagnostic aid.
262 263
263 // These two are the implementations that use a lock. They can be 264 // These two are the implementations that use a lock. They can be
264 // called concurrently with each other but they should not be called 265 // called concurrently with each other but they should not be called
265 // concurrently with the lock-free versions (push() / pop()). 266 // concurrently with the lock-free versions (push() / pop()).
266 void push_with_lock(MemRegion mr); 267 void push_with_lock(MemRegion mr);
267 MemRegion pop_with_lock(); 268 MemRegion pop_with_lock();
269 #endif
268 270
269 bool isEmpty() { return _index == 0; } 271 bool isEmpty() { return _index == 0; }
270 bool isFull() { return _index == _capacity; } 272 bool isFull() { return _index == _capacity; }
271 273
272 bool overflow() { return _overflow; } 274 bool overflow() { return _overflow; }
396 volatile bool _has_overflown; 398 volatile bool _has_overflown;
397 // true: marking is concurrent, false: we're in remark 399 // true: marking is concurrent, false: we're in remark
398 volatile bool _concurrent; 400 volatile bool _concurrent;
399 // set at the end of a Full GC so that marking aborts 401 // set at the end of a Full GC so that marking aborts
400 volatile bool _has_aborted; 402 volatile bool _has_aborted;
403
401 // used when remark aborts due to an overflow to indicate that 404 // used when remark aborts due to an overflow to indicate that
402 // another concurrent marking phase should start 405 // another concurrent marking phase should start
403 volatile bool _restart_for_overflow; 406 volatile bool _restart_for_overflow;
404 407
405 // This is true from the very start of concurrent marking until the 408 // This is true from the very start of concurrent marking until the
546 size_t mark_stack_size() { return _markStack.size(); } 549 size_t mark_stack_size() { return _markStack.size(); }
547 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; } 550 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
548 bool mark_stack_overflow() { return _markStack.overflow(); } 551 bool mark_stack_overflow() { return _markStack.overflow(); }
549 bool mark_stack_empty() { return _markStack.isEmpty(); } 552 bool mark_stack_empty() { return _markStack.isEmpty(); }
550 553
551 // Manipulation of the region stack 554 // (Lock-free) Manipulation of the region stack
552 bool region_stack_push(MemRegion mr) { 555 bool region_stack_push_lock_free(MemRegion mr) {
553 // Currently we only call the lock-free version during evacuation 556 // Currently we only call the lock-free version during evacuation
554 // pauses. 557 // pauses.
555 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); 558 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
556 559
557 _regionStack.push(mr); 560 _regionStack.push_lock_free(mr);
558 if (_regionStack.overflow()) { 561 if (_regionStack.overflow()) {
559 set_has_overflown(); 562 set_has_overflown();
560 return false; 563 return false;
561 } 564 }
562 return true; 565 return true;
563 } 566 }
567
568 // Lock-free version of region-stack pop. Should only be
569 // called in tandem with other lock-free pops.
570 MemRegion region_stack_pop_lock_free() {
571 return _regionStack.pop_lock_free();
572 }
573
564 #if 0 574 #if 0
565 // Currently this is not used. See the comment in the .cpp file. 575 // The routines that manipulate the region stack with a lock are
566 MemRegion region_stack_pop() { return _regionStack.pop(); } 576 // not currently used. They should be retained, however, as a
567 #endif // 0 577 // diagnostic aid.
568 578
569 bool region_stack_push_with_lock(MemRegion mr) { 579 bool region_stack_push_with_lock(MemRegion mr) {
570 // Currently we only call the lock-based version during either 580 // Currently we only call the lock-based version during either
571 // concurrent marking or remark. 581 // concurrent marking or remark.
572 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(), 582 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
577 set_has_overflown(); 587 set_has_overflown();
578 return false; 588 return false;
579 } 589 }
580 return true; 590 return true;
581 } 591 }
592
582 MemRegion region_stack_pop_with_lock() { 593 MemRegion region_stack_pop_with_lock() {
583 // Currently we only call the lock-based version during either 594 // Currently we only call the lock-based version during either
584 // concurrent marking or remark. 595 // concurrent marking or remark.
585 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(), 596 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
586 "if we are at a safepoint it should be the remark safepoint"); 597 "if we are at a safepoint it should be the remark safepoint");
587 598
588 return _regionStack.pop_with_lock(); 599 return _regionStack.pop_with_lock();
589 } 600 }
601 #endif
590 602
591 int region_stack_size() { return _regionStack.size(); } 603 int region_stack_size() { return _regionStack.size(); }
592 bool region_stack_overflow() { return _regionStack.overflow(); } 604 bool region_stack_overflow() { return _regionStack.overflow(); }
593 bool region_stack_empty() { return _regionStack.isEmpty(); } 605 bool region_stack_empty() { return _regionStack.isEmpty(); }
606
607 // Iterate over any regions that were aborted while draining the
608 // region stack (any such regions are saved in the corresponding
609 // CMTask) and invalidate (i.e. assign to the empty MemRegion())
610 // any regions that point into the collection set.
611 bool invalidate_aborted_regions_in_cset();
612
613 // Returns true if there are any aborted memory regions.
614 bool has_aborted_regions();
594 615
595 bool concurrent_marking_in_progress() { 616 bool concurrent_marking_in_progress() {
596 return _concurrent_marking_in_progress; 617 return _concurrent_marking_in_progress;
597 } 618 }
598 void set_concurrent_marking_in_progress() { 619 void set_concurrent_marking_in_progress() {
854 // iteration, we do not rescan the first part of the region. This 875 // iteration, we do not rescan the first part of the region. This
855 // should be NULL when we're not scanning a region from the region 876 // should be NULL when we're not scanning a region from the region
856 // stack. 877 // stack.
857 HeapWord* _region_finger; 878 HeapWord* _region_finger;
858 879
880 // If we abort while scanning a region we record the remaining
881 // unscanned portion and check this field when marking restarts.
882 // This avoids having to push on the region stack while other
883 // marking threads may still be popping regions.
884 // If we were to push the unscanned portion directly to the
885 // region stack then we would need to using locking versions
886 // of the push and pop operations.
887 MemRegion _aborted_region;
888
859 // the number of words this task has scanned 889 // the number of words this task has scanned
860 size_t _words_scanned; 890 size_t _words_scanned;
861 // When _words_scanned reaches this limit, the regular clock is 891 // When _words_scanned reaches this limit, the regular clock is
862 // called. Notice that this might be decreased under certain 892 // called. Notice that this might be decreased under certain
863 // circumstances (i.e. when we believe that we did an expensive 893 // circumstances (i.e. when we believe that we did an expensive
1009 1039
1010 bool has_aborted() { return _has_aborted; } 1040 bool has_aborted() { return _has_aborted; }
1011 void set_has_aborted() { _has_aborted = true; } 1041 void set_has_aborted() { _has_aborted = true; }
1012 void clear_has_aborted() { _has_aborted = false; } 1042 void clear_has_aborted() { _has_aborted = false; }
1013 bool claimed() { return _claimed; } 1043 bool claimed() { return _claimed; }
1044
1045 // Support routines for the partially scanned region that may be
1046 // recorded as a result of aborting while draining the CMRegionStack
1047 MemRegion aborted_region() { return _aborted_region; }
1048 void set_aborted_region(MemRegion mr)
1049 { _aborted_region = mr; }
1050
1051 // Clears any recorded partially scanned region
1052 void clear_aborted_region() { set_aborted_region(MemRegion()); }
1014 1053
1015 void set_oop_closure(OopClosure* oop_closure) { 1054 void set_oop_closure(OopClosure* oop_closure) {
1016 _oop_closure = oop_closure; 1055 _oop_closure = oop_closure;
1017 } 1056 }
1018 1057