comparison src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 20804:7848fc12602b

Merge with jdk8u40-b25
author Gilles Duboscq <gilles.m.duboscq@oracle.com>
date Tue, 07 Apr 2015 14:58:49 +0200
parents 52b4284cb496 8d27d6113625
children
comparison
equal deleted inserted replaced
20184:84105dcdb05b 20804:7848fc12602b
21 * questions. 21 * questions.
22 * 22 *
23 */ 23 */
24 24
25 #include "precompiled.hpp" 25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
26 #include "classfile/symbolTable.hpp" 27 #include "classfile/symbolTable.hpp"
28 #include "code/codeCache.hpp"
27 #include "gc_implementation/g1/concurrentMark.inline.hpp" 29 #include "gc_implementation/g1/concurrentMark.inline.hpp"
28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 31 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 32 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
31 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 33 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
32 #include "gc_implementation/g1/g1Log.hpp" 34 #include "gc_implementation/g1/g1Log.hpp"
33 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 35 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
34 #include "gc_implementation/g1/g1RemSet.hpp" 36 #include "gc_implementation/g1/g1RemSet.hpp"
35 #include "gc_implementation/g1/heapRegion.inline.hpp" 37 #include "gc_implementation/g1/heapRegion.inline.hpp"
38 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
36 #include "gc_implementation/g1/heapRegionRemSet.hpp" 39 #include "gc_implementation/g1/heapRegionRemSet.hpp"
37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 40 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
38 #include "gc_implementation/shared/vmGCOperations.hpp" 41 #include "gc_implementation/shared/vmGCOperations.hpp"
39 #include "gc_implementation/shared/gcTimer.hpp" 42 #include "gc_implementation/shared/gcTimer.hpp"
40 #include "gc_implementation/shared/gcTrace.hpp" 43 #include "gc_implementation/shared/gcTrace.hpp"
41 #include "gc_implementation/shared/gcTraceTime.hpp" 44 #include "gc_implementation/shared/gcTraceTime.hpp"
45 #include "memory/allocation.hpp"
42 #include "memory/genOopClosures.inline.hpp" 46 #include "memory/genOopClosures.inline.hpp"
43 #include "memory/referencePolicy.hpp" 47 #include "memory/referencePolicy.hpp"
44 #include "memory/resourceArea.hpp" 48 #include "memory/resourceArea.hpp"
45 #include "oops/oop.inline.hpp" 49 #include "oops/oop.inline.hpp"
46 #include "runtime/handles.inline.hpp" 50 #include "runtime/handles.inline.hpp"
47 #include "runtime/java.hpp" 51 #include "runtime/java.hpp"
52 #include "runtime/prefetch.inline.hpp"
48 #include "services/memTracker.hpp" 53 #include "services/memTracker.hpp"
49 54
50 // Concurrent marking bit map wrapper 55 // Concurrent marking bit map wrapper
51 56
52 CMBitMapRO::CMBitMapRO(int shifter) : 57 CMBitMapRO::CMBitMapRO(int shifter) :
54 _shifter(shifter) { 59 _shifter(shifter) {
55 _bmStartWord = 0; 60 _bmStartWord = 0;
56 _bmWordSize = 0; 61 _bmWordSize = 0;
57 } 62 }
58 63
59 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr, 64 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
60 HeapWord* limit) const { 65 const HeapWord* limit) const {
61 // First we must round addr *up* to a possible object boundary. 66 // First we must round addr *up* to a possible object boundary.
62 addr = (HeapWord*)align_size_up((intptr_t)addr, 67 addr = (HeapWord*)align_size_up((intptr_t)addr,
63 HeapWordSize << _shifter); 68 HeapWordSize << _shifter);
64 size_t addrOffset = heapWordToOffset(addr); 69 size_t addrOffset = heapWordToOffset(addr);
65 if (limit == NULL) { 70 if (limit == NULL) {
72 assert(nextAddr == limit || isMarked(nextAddr), 77 assert(nextAddr == limit || isMarked(nextAddr),
73 "get_next_one postcondition"); 78 "get_next_one postcondition");
74 return nextAddr; 79 return nextAddr;
75 } 80 }
76 81
77 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr, 82 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
78 HeapWord* limit) const { 83 const HeapWord* limit) const {
79 size_t addrOffset = heapWordToOffset(addr); 84 size_t addrOffset = heapWordToOffset(addr);
80 if (limit == NULL) { 85 if (limit == NULL) {
81 limit = _bmStartWord + _bmWordSize; 86 limit = _bmStartWord + _bmWordSize;
82 } 87 }
83 size_t limitOffset = heapWordToOffset(limit); 88 size_t limitOffset = heapWordToOffset(limit);
93 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 98 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
94 return (int) (diff >> _shifter); 99 return (int) (diff >> _shifter);
95 } 100 }
96 101
97 #ifndef PRODUCT 102 #ifndef PRODUCT
98 bool CMBitMapRO::covers(ReservedSpace heap_rs) const { 103 bool CMBitMapRO::covers(MemRegion heap_rs) const {
99 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 104 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
100 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 105 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
101 "size inconsistency"); 106 "size inconsistency");
102 return _bmStartWord == (HeapWord*)(heap_rs.base()) && 107 return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
103 _bmWordSize == heap_rs.size()>>LogHeapWordSize; 108 _bmWordSize == heap_rs.word_size();
104 } 109 }
105 #endif 110 #endif
106 111
107 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 112 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
108 _bm.print_on_error(st, prefix); 113 _bm.print_on_error(st, prefix);
109 } 114 }
110 115
111 bool CMBitMap::allocate(ReservedSpace heap_rs) { 116 size_t CMBitMap::compute_size(size_t heap_size) {
112 _bmStartWord = (HeapWord*)(heap_rs.base()); 117 return heap_size / mark_distance();
113 _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes 118 }
114 ReservedSpace brs(ReservedSpace::allocation_align_size_up( 119
115 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); 120 size_t CMBitMap::mark_distance() {
116 if (!brs.is_reserved()) { 121 return MinObjAlignmentInBytes * BitsPerByte;
117 warning("ConcurrentMark marking bit map allocation failure"); 122 }
123
124 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
125 _bmStartWord = heap.start();
126 _bmWordSize = heap.word_size();
127
128 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
129 _bm.set_size(_bmWordSize >> _shifter);
130
131 storage->set_mapping_changed_listener(&_listener);
132 }
133
134 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
135 if (zero_filled) {
136 return;
137 }
138 // We need to clear the bitmap on commit, removing any existing information.
139 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
140 _bm->clearRange(mr);
141 }
142
143 // Closure used for clearing the given mark bitmap.
144 class ClearBitmapHRClosure : public HeapRegionClosure {
145 private:
146 ConcurrentMark* _cm;
147 CMBitMap* _bitmap;
148 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration.
149 public:
150 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
151 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
152 }
153
154 virtual bool doHeapRegion(HeapRegion* r) {
155 size_t const chunk_size_in_words = M / HeapWordSize;
156
157 HeapWord* cur = r->bottom();
158 HeapWord* const end = r->end();
159
160 while (cur < end) {
161 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
162 _bitmap->clearRange(mr);
163
164 cur += chunk_size_in_words;
165
166 // Abort iteration if after yielding the marking has been aborted.
167 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
168 return true;
169 }
170 // Repeat the asserts from before the start of the closure. We will do them
171 // as asserts here to minimize their overhead on the product. However, we
172 // will have them as guarantees at the beginning / end of the bitmap
173 // clearing to get some checking in the product.
174 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
175 assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
176 }
177
118 return false; 178 return false;
119 } 179 }
120 MemTracker::record_virtual_memory_type((address)brs.base(), mtGC); 180 };
121 // For now we'll just commit all of the bit map up front.
122 // Later on we'll try to be more parsimonious with swap.
123 if (!_virtual_space.initialize(brs, brs.size())) {
124 warning("ConcurrentMark marking bit map backing store failure");
125 return false;
126 }
127 assert(_virtual_space.committed_size() == brs.size(),
128 "didn't reserve backing store for all of concurrent marking bit map?");
129 _bm.set_map((uintptr_t*)_virtual_space.low());
130 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
131 _bmWordSize, "inconsistency in bit map sizing");
132 _bm.set_size(_bmWordSize >> _shifter);
133 return true;
134 }
135 181
136 void CMBitMap::clearAll() { 182 void CMBitMap::clearAll() {
137 _bm.clear(); 183 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
184 G1CollectedHeap::heap()->heap_region_iterate(&cl);
185 guarantee(cl.complete(), "Must have completed iteration.");
138 return; 186 return;
139 } 187 }
140 188
141 void CMBitMap::markRange(MemRegion mr) { 189 void CMBitMap::markRange(MemRegion mr) {
142 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 190 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
387 for (int i = 0; i < _index; i += 1) { 435 for (int i = 0; i < _index; i += 1) {
388 f->do_oop(&_base[i]); 436 f->do_oop(&_base[i]);
389 } 437 }
390 } 438 }
391 439
392 bool ConcurrentMark::not_yet_marked(oop obj) const {
393 return _g1h->is_obj_ill(obj);
394 }
395
396 CMRootRegions::CMRootRegions() : 440 CMRootRegions::CMRootRegions() :
397 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 441 _young_list(NULL), _cm(NULL), _scan_in_progress(false),
398 _should_abort(false), _next_survivor(NULL) { } 442 _should_abort(false), _next_survivor(NULL) { }
399 443
400 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 444 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
477 521
478 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 522 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
479 return MAX2((n_par_threads + 2) / 4, 1U); 523 return MAX2((n_par_threads + 2) / 4, 1U);
480 } 524 }
481 525
482 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) : 526 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
483 _g1h(g1h), 527 _g1h(g1h),
484 _markBitMap1(log2_intptr(MinObjAlignment)), 528 _markBitMap1(),
485 _markBitMap2(log2_intptr(MinObjAlignment)), 529 _markBitMap2(),
486 _parallel_marking_threads(0), 530 _parallel_marking_threads(0),
487 _max_parallel_marking_threads(0), 531 _max_parallel_marking_threads(0),
488 _sleep_factor(0.0), 532 _sleep_factor(0.0),
489 _marking_task_overhead(1.0), 533 _marking_task_overhead(1.0),
490 _cleanup_sleep_factor(0.0), 534 _cleanup_sleep_factor(0.0),
491 _cleanup_task_overhead(1.0), 535 _cleanup_task_overhead(1.0),
492 _cleanup_list("Cleanup List"), 536 _cleanup_list("Cleanup List"),
493 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 537 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
494 _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >> 538 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
495 CardTableModRefBS::card_shift, 539 CardTableModRefBS::card_shift,
496 false /* in_resource_area*/), 540 false /* in_resource_area*/),
497 541
498 _prevMarkBitMap(&_markBitMap1), 542 _prevMarkBitMap(&_markBitMap1),
499 _nextMarkBitMap(&_markBitMap2), 543 _nextMarkBitMap(&_markBitMap2),
508 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 552 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
509 553
510 _has_overflown(false), 554 _has_overflown(false),
511 _concurrent(false), 555 _concurrent(false),
512 _has_aborted(false), 556 _has_aborted(false),
557 _aborted_gc_id(GCId::undefined()),
513 _restart_for_overflow(false), 558 _restart_for_overflow(false),
514 _concurrent_marking_in_progress(false), 559 _concurrent_marking_in_progress(false),
515 560
516 // _verbose_level set below 561 // _verbose_level set below
517 562
538 if (verbose_low()) { 583 if (verbose_low()) {
539 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 584 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
540 "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 585 "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
541 } 586 }
542 587
543 if (!_markBitMap1.allocate(heap_rs)) { 588 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
544 warning("Failed to allocate first CM bit map"); 589 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
545 return;
546 }
547 if (!_markBitMap2.allocate(heap_rs)) {
548 warning("Failed to allocate second CM bit map");
549 return;
550 }
551 590
552 // Create & start a ConcurrentMark thread. 591 // Create & start a ConcurrentMark thread.
553 _cmThread = new ConcurrentMarkThread(this); 592 _cmThread = new ConcurrentMarkThread(this);
554 assert(cmThread() != NULL, "CM Thread should have been created"); 593 assert(cmThread() != NULL, "CM Thread should have been created");
555 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 594 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
556 if (_cmThread->osthread() == NULL) { 595 if (_cmThread->osthread() == NULL) {
557 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 596 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
558 } 597 }
559 598
560 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 599 assert(CGC_lock != NULL, "Where's the CGC_lock?");
561 assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency"); 600 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
562 assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency"); 601 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
563 602
564 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 603 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
565 satb_qs.set_buffer_size(G1SATBBufferSize); 604 satb_qs.set_buffer_size(G1SATBBufferSize);
566 605
567 _root_regions.init(_g1h, this); 606 _root_regions.init(_g1h, this);
717 756
718 // Clear all the liveness counting data 757 // Clear all the liveness counting data
719 clear_all_count_data(); 758 clear_all_count_data();
720 759
721 // so that the call below can read a sensible value 760 // so that the call below can read a sensible value
722 _heap_start = (HeapWord*) heap_rs.base(); 761 _heap_start = g1h->reserved_region().start();
723 set_non_marking_state(); 762 set_non_marking_state();
724 _completed_initialization = true; 763 _completed_initialization = true;
725 } 764 }
726 765
727 void ConcurrentMark::update_g1_committed(bool force) {
728 // If concurrent marking is not in progress, then we do not need to
729 // update _heap_end.
730 if (!concurrent_marking_in_progress() && !force) return;
731
732 MemRegion committed = _g1h->g1_committed();
733 assert(committed.start() == _heap_start, "start shouldn't change");
734 HeapWord* new_end = committed.end();
735 if (new_end > _heap_end) {
736 // The heap has been expanded.
737
738 _heap_end = new_end;
739 }
740 // Notice that the heap can also shrink. However, this only happens
741 // during a Full GC (at least currently) and the entire marking
742 // phase will bail out and the task will not be restarted. So, let's
743 // do nothing.
744 }
745
746 void ConcurrentMark::reset() { 766 void ConcurrentMark::reset() {
747 // Starting values for these two. This should be called in a STW 767 // Starting values for these two. This should be called in a STW
748 // phase. CM will be notified of any future g1_committed expansions 768 // phase.
749 // will be at the end of evacuation pauses, when tasks are 769 MemRegion reserved = _g1h->g1_reserved();
750 // inactive. 770 _heap_start = reserved.start();
751 MemRegion committed = _g1h->g1_committed(); 771 _heap_end = reserved.end();
752 _heap_start = committed.start();
753 _heap_end = committed.end();
754 772
755 // Separated the asserts so that we know which one fires. 773 // Separated the asserts so that we know which one fires.
756 assert(_heap_start != NULL, "heap bounds should look ok"); 774 assert(_heap_start != NULL, "heap bounds should look ok");
757 assert(_heap_end != NULL, "heap bounds should look ok"); 775 assert(_heap_end != NULL, "heap bounds should look ok");
758 assert(_heap_start < _heap_end, "heap bounds should look ok"); 776 assert(_heap_start < _heap_end, "heap bounds should look ok");
820 // in a STW phase. 838 // in a STW phase.
821 assert(!concurrent_marking_in_progress(), "invariant"); 839 assert(!concurrent_marking_in_progress(), "invariant");
822 assert(out_of_regions(), 840 assert(out_of_regions(),
823 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 841 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
824 p2i(_finger), p2i(_heap_end))); 842 p2i(_finger), p2i(_heap_end)));
825 update_g1_committed(true);
826 } 843 }
827 } 844 }
828 845
829 void ConcurrentMark::set_non_marking_state() { 846 void ConcurrentMark::set_non_marking_state() {
830 // We set the global marking state to some default values when we're 847 // We set the global marking state to some default values when we're
839 ShouldNotReachHere(); 856 ShouldNotReachHere();
840 } 857 }
841 858
842 void ConcurrentMark::clearNextBitmap() { 859 void ConcurrentMark::clearNextBitmap() {
843 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 860 G1CollectedHeap* g1h = G1CollectedHeap::heap();
844 G1CollectorPolicy* g1p = g1h->g1_policy();
845 861
846 // Make sure that the concurrent mark thread looks to still be in 862 // Make sure that the concurrent mark thread looks to still be in
847 // the current cycle. 863 // the current cycle.
848 guarantee(cmThread()->during_cycle(), "invariant"); 864 guarantee(cmThread()->during_cycle(), "invariant");
849 865
851 // marking bitmap and getting it ready for the next cycle. During 867 // marking bitmap and getting it ready for the next cycle. During
852 // this time no other cycle can start. So, let's make sure that this 868 // this time no other cycle can start. So, let's make sure that this
853 // is the case. 869 // is the case.
854 guarantee(!g1h->mark_in_progress(), "invariant"); 870 guarantee(!g1h->mark_in_progress(), "invariant");
855 871
856 // clear the mark bitmap (no grey objects to start with). 872 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
857 // We need to do this in chunks and offer to yield in between 873 g1h->heap_region_iterate(&cl);
858 // each chunk. 874
859 HeapWord* start = _nextMarkBitMap->startWord(); 875 // Clear the liveness counting data. If the marking has been aborted, the abort()
860 HeapWord* end = _nextMarkBitMap->endWord(); 876 // call already did that.
861 HeapWord* cur = start; 877 if (cl.complete()) {
862 size_t chunkSize = M; 878 clear_all_count_data();
863 while (cur < end) { 879 }
864 HeapWord* next = cur + chunkSize;
865 if (next > end) {
866 next = end;
867 }
868 MemRegion mr(cur,next);
869 _nextMarkBitMap->clearRange(mr);
870 cur = next;
871 do_yield_check();
872
873 // Repeat the asserts from above. We'll do them as asserts here to
874 // minimize their overhead on the product. However, we'll have
875 // them as guarantees at the beginning / end of the bitmap
876 // clearing to get some checking in the product.
877 assert(cmThread()->during_cycle(), "invariant");
878 assert(!g1h->mark_in_progress(), "invariant");
879 }
880
881 // Clear the liveness counting data
882 clear_all_count_data();
883 880
884 // Repeat the asserts from above. 881 // Repeat the asserts from above.
885 guarantee(cmThread()->during_cycle(), "invariant"); 882 guarantee(cmThread()->during_cycle(), "invariant");
886 guarantee(!g1h->mark_in_progress(), "invariant"); 883 guarantee(!g1h->mark_in_progress(), "invariant");
884 }
885
886 class CheckBitmapClearHRClosure : public HeapRegionClosure {
887 CMBitMap* _bitmap;
888 bool _error;
889 public:
890 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
891 }
892
893 virtual bool doHeapRegion(HeapRegion* r) {
894 // This closure can be called concurrently to the mutator, so we must make sure
895 // that the result of the getNextMarkedWordAddress() call is compared to the
896 // value passed to it as limit to detect any found bits.
897 // We can use the region's orig_end() for the limit and the comparison value
898 // as it always contains the "real" end of the region that never changes and
899 // has no side effects.
900 // Due to the latter, there can also be no problem with the compiler generating
901 // reloads of the orig_end() call.
902 HeapWord* end = r->orig_end();
903 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
904 }
905 };
906
907 bool ConcurrentMark::nextMarkBitmapIsClear() {
908 CheckBitmapClearHRClosure cl(_nextMarkBitMap);
909 _g1h->heap_region_iterate(&cl);
910 return cl.complete();
887 } 911 }
888 912
889 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 913 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
890 public: 914 public:
891 bool doHeapRegion(HeapRegion* r) { 915 bool doHeapRegion(HeapRegion* r) {
974 if (verbose_low()) { 998 if (verbose_low()) {
975 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 999 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
976 } 1000 }
977 1001
978 if (concurrent()) { 1002 if (concurrent()) {
979 ConcurrentGCThread::stsLeave(); 1003 SuspendibleThreadSet::leave();
980 } 1004 }
981 1005
982 bool barrier_aborted = !_first_overflow_barrier_sync.enter(); 1006 bool barrier_aborted = !_first_overflow_barrier_sync.enter();
983 1007
984 if (concurrent()) { 1008 if (concurrent()) {
985 ConcurrentGCThread::stsJoin(); 1009 SuspendibleThreadSet::join();
986 } 1010 }
987 // at this point everyone should have synced up and not be doing any 1011 // at this point everyone should have synced up and not be doing any
988 // more work 1012 // more work
989 1013
990 if (verbose_low()) { 1014 if (verbose_low()) {
1017 // marking. 1041 // marking.
1018 reset_marking_state(true /* clear_overflow */); 1042 reset_marking_state(true /* clear_overflow */);
1019 force_overflow()->update(); 1043 force_overflow()->update();
1020 1044
1021 if (G1Log::fine()) { 1045 if (G1Log::fine()) {
1022 gclog_or_tty->date_stamp(PrintGCDateStamps); 1046 gclog_or_tty->gclog_stamp(concurrent_gc_id());
1023 gclog_or_tty->stamp(PrintGCTimeStamps);
1024 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1047 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1025 } 1048 }
1026 } 1049 }
1027 } 1050 }
1028 1051
1034 if (verbose_low()) { 1057 if (verbose_low()) {
1035 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1058 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1036 } 1059 }
1037 1060
1038 if (concurrent()) { 1061 if (concurrent()) {
1039 ConcurrentGCThread::stsLeave(); 1062 SuspendibleThreadSet::leave();
1040 } 1063 }
1041 1064
1042 bool barrier_aborted = !_second_overflow_barrier_sync.enter(); 1065 bool barrier_aborted = !_second_overflow_barrier_sync.enter();
1043 1066
1044 if (concurrent()) { 1067 if (concurrent()) {
1045 ConcurrentGCThread::stsJoin(); 1068 SuspendibleThreadSet::join();
1046 } 1069 }
1047 // at this point everything should be re-initialized and ready to go 1070 // at this point everything should be re-initialized and ready to go
1048 1071
1049 if (verbose_low()) { 1072 if (verbose_low()) {
1050 if (barrier_aborted) { 1073 if (barrier_aborted) {
1092 "this should only be done by a conc GC thread"); 1115 "this should only be done by a conc GC thread");
1093 ResourceMark rm; 1116 ResourceMark rm;
1094 1117
1095 double start_vtime = os::elapsedVTime(); 1118 double start_vtime = os::elapsedVTime();
1096 1119
1097 ConcurrentGCThread::stsJoin(); 1120 SuspendibleThreadSet::join();
1098 1121
1099 assert(worker_id < _cm->active_tasks(), "invariant"); 1122 assert(worker_id < _cm->active_tasks(), "invariant");
1100 CMTask* the_task = _cm->task(worker_id); 1123 CMTask* the_task = _cm->task(worker_id);
1101 the_task->record_start_time(); 1124 the_task->record_start_time();
1102 if (!_cm->has_aborted()) { 1125 if (!_cm->has_aborted()) {
1103 do { 1126 do {
1104 double start_vtime_sec = os::elapsedVTime(); 1127 double start_vtime_sec = os::elapsedVTime();
1105 double start_time_sec = os::elapsedTime();
1106 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1128 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1107 1129
1108 the_task->do_marking_step(mark_step_duration_ms, 1130 the_task->do_marking_step(mark_step_duration_ms,
1109 true /* do_termination */, 1131 true /* do_termination */,
1110 false /* is_serial*/); 1132 false /* is_serial*/);
1111 1133
1112 double end_time_sec = os::elapsedTime();
1113 double end_vtime_sec = os::elapsedVTime(); 1134 double end_vtime_sec = os::elapsedVTime();
1114 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1135 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1115 double elapsed_time_sec = end_time_sec - start_time_sec;
1116 _cm->clear_has_overflown(); 1136 _cm->clear_has_overflown();
1117 1137
1118 bool ret = _cm->do_yield_check(worker_id); 1138 _cm->do_yield_check(worker_id);
1119 1139
1120 jlong sleep_time_ms; 1140 jlong sleep_time_ms;
1121 if (!_cm->has_aborted() && the_task->has_aborted()) { 1141 if (!_cm->has_aborted() && the_task->has_aborted()) {
1122 sleep_time_ms = 1142 sleep_time_ms =
1123 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1143 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1124 ConcurrentGCThread::stsLeave(); 1144 SuspendibleThreadSet::leave();
1125 os::sleep(Thread::current(), sleep_time_ms, false); 1145 os::sleep(Thread::current(), sleep_time_ms, false);
1126 ConcurrentGCThread::stsJoin(); 1146 SuspendibleThreadSet::join();
1127 } 1147 }
1128 double end_time2_sec = os::elapsedTime();
1129 double elapsed_time2_sec = end_time2_sec - start_time_sec;
1130
1131 #if 0
1132 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
1133 "overhead %1.4lf",
1134 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
1135 the_task->conc_overhead(os::elapsedTime()) * 8.0);
1136 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
1137 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
1138 #endif
1139 } while (!_cm->has_aborted() && the_task->has_aborted()); 1148 } while (!_cm->has_aborted() && the_task->has_aborted());
1140 } 1149 }
1141 the_task->record_end_time(); 1150 the_task->record_end_time();
1142 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1151 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1143 1152
1144 ConcurrentGCThread::stsLeave(); 1153 SuspendibleThreadSet::leave();
1145 1154
1146 double end_vtime = os::elapsedVTime(); 1155 double end_vtime = os::elapsedVTime();
1147 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1156 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1148 } 1157 }
1149 1158
1219 } 1228 }
1220 } 1229 }
1221 }; 1230 };
1222 1231
1223 void ConcurrentMark::scanRootRegions() { 1232 void ConcurrentMark::scanRootRegions() {
1233 // Start of concurrent marking.
1234 ClassLoaderDataGraph::clear_claimed_marks();
1235
1224 // scan_in_progress() will have been set to true only if there was 1236 // scan_in_progress() will have been set to true only if there was
1225 // at least one root region to scan. So, if it's false, we 1237 // at least one root region to scan. So, if it's false, we
1226 // should not attempt to do any further work. 1238 // should not attempt to do any further work.
1227 if (root_regions()->scan_in_progress()) { 1239 if (root_regions()->scan_in_progress()) {
1228 _parallel_marking_threads = calc_parallel_marking_threads(); 1240 _parallel_marking_threads = calc_parallel_marking_threads();
1267 set_concurrency_and_phase(active_workers, true /* concurrent */); 1279 set_concurrency_and_phase(active_workers, true /* concurrent */);
1268 1280
1269 CMConcurrentMarkingTask markingTask(this, cmThread()); 1281 CMConcurrentMarkingTask markingTask(this, cmThread());
1270 if (use_parallel_marking_threads()) { 1282 if (use_parallel_marking_threads()) {
1271 _parallel_workers->set_active_workers((int)active_workers); 1283 _parallel_workers->set_active_workers((int)active_workers);
1272 // Don't set _n_par_threads because it affects MT in proceess_strong_roots() 1284 // Don't set _n_par_threads because it affects MT in process_roots()
1273 // and the decisions on that MT processing is made elsewhere. 1285 // and the decisions on that MT processing is made elsewhere.
1274 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1286 assert(_parallel_workers->active_workers() > 0, "Should have been set");
1275 _parallel_workers->run_task(&markingTask); 1287 _parallel_workers->run_task(&markingTask);
1276 } else { 1288 } else {
1277 markingTask.work(0); 1289 markingTask.work(0);
1298 HandleMark hm; // handle scope 1310 HandleMark hm; // handle scope
1299 Universe::heap()->prepare_for_verify(); 1311 Universe::heap()->prepare_for_verify();
1300 Universe::verify(VerifyOption_G1UsePrevMarking, 1312 Universe::verify(VerifyOption_G1UsePrevMarking,
1301 " VerifyDuringGC:(before)"); 1313 " VerifyDuringGC:(before)");
1302 } 1314 }
1315 g1h->check_bitmaps("Remark Start");
1303 1316
1304 G1CollectorPolicy* g1p = g1h->g1_policy(); 1317 G1CollectorPolicy* g1p = g1h->g1_policy();
1305 g1p->record_concurrent_mark_remark_start(); 1318 g1p->record_concurrent_mark_remark_start();
1306 1319
1307 double start = os::elapsedTime(); 1320 double start = os::elapsedTime();
1346 HandleMark hm; // handle scope 1359 HandleMark hm; // handle scope
1347 Universe::heap()->prepare_for_verify(); 1360 Universe::heap()->prepare_for_verify();
1348 Universe::verify(VerifyOption_G1UseNextMarking, 1361 Universe::verify(VerifyOption_G1UseNextMarking,
1349 " VerifyDuringGC:(after)"); 1362 " VerifyDuringGC:(after)");
1350 } 1363 }
1364 g1h->check_bitmaps("Remark End");
1351 assert(!restart_for_overflow(), "sanity"); 1365 assert(!restart_for_overflow(), "sanity");
1352 // Completely reset the marking state since marking completed 1366 // Completely reset the marking state since marking completed
1353 set_non_marking_state(); 1367 set_non_marking_state();
1354 } 1368 }
1355 1369
1387 // to 1 the bits on the region bitmap that correspond to its 1401 // to 1 the bits on the region bitmap that correspond to its
1388 // associated "continues humongous" regions. 1402 // associated "continues humongous" regions.
1389 void set_bit_for_region(HeapRegion* hr) { 1403 void set_bit_for_region(HeapRegion* hr) {
1390 assert(!hr->continuesHumongous(), "should have filtered those out"); 1404 assert(!hr->continuesHumongous(), "should have filtered those out");
1391 1405
1392 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); 1406 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1393 if (!hr->startsHumongous()) { 1407 if (!hr->startsHumongous()) {
1394 // Normal (non-humongous) case: just set the bit. 1408 // Normal (non-humongous) case: just set the bit.
1395 _region_bm->par_at_put(index, true); 1409 _region_bm->par_at_put(index, true);
1396 } else { 1410 } else {
1397 // Starts humongous case: calculate how many regions are part of 1411 // Starts humongous case: calculate how many regions are part of
1575 // we have missed accounting some objects during the actual marking. 1589 // we have missed accounting some objects during the actual marking.
1576 if (exp_marked_bytes > act_marked_bytes) { 1590 if (exp_marked_bytes > act_marked_bytes) {
1577 if (_verbose) { 1591 if (_verbose) {
1578 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1592 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1579 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1593 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1580 hr->hrs_index(), exp_marked_bytes, act_marked_bytes); 1594 hr->hrm_index(), exp_marked_bytes, act_marked_bytes);
1581 } 1595 }
1582 failures += 1; 1596 failures += 1;
1583 } 1597 }
1584 1598
1585 // Verify the bit, for this region, in the actual and expected 1599 // Verify the bit, for this region, in the actual and expected
1586 // (which was just calculated) region bit maps. 1600 // (which was just calculated) region bit maps.
1587 // We're not OK if the bit in the calculated expected region 1601 // We're not OK if the bit in the calculated expected region
1588 // bitmap is set and the bit in the actual region bitmap is not. 1602 // bitmap is set and the bit in the actual region bitmap is not.
1589 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); 1603 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1590 1604
1591 bool expected = _exp_region_bm->at(index); 1605 bool expected = _exp_region_bm->at(index);
1592 bool actual = _region_bm->at(index); 1606 bool actual = _region_bm->at(index);
1593 if (expected && !actual) { 1607 if (expected && !actual) {
1594 if (_verbose) { 1608 if (_verbose) {
1595 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1609 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1596 "expected: %s, actual: %s", 1610 "expected: %s, actual: %s",
1597 hr->hrs_index(), 1611 hr->hrm_index(),
1598 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1612 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1599 } 1613 }
1600 failures += 1; 1614 failures += 1;
1601 } 1615 }
1602 1616
1613 1627
1614 if (expected && !actual) { 1628 if (expected && !actual) {
1615 if (_verbose) { 1629 if (_verbose) {
1616 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1630 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1617 "expected: %s, actual: %s", 1631 "expected: %s, actual: %s",
1618 hr->hrs_index(), i, 1632 hr->hrm_index(), i,
1619 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1633 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1620 } 1634 }
1621 failures += 1; 1635 failures += 1;
1622 } 1636 }
1623 } 1637 }
1995 HandleMark hm; // handle scope 2009 HandleMark hm; // handle scope
1996 Universe::heap()->prepare_for_verify(); 2010 Universe::heap()->prepare_for_verify();
1997 Universe::verify(VerifyOption_G1UsePrevMarking, 2011 Universe::verify(VerifyOption_G1UsePrevMarking,
1998 " VerifyDuringGC:(before)"); 2012 " VerifyDuringGC:(before)");
1999 } 2013 }
2014 g1h->check_bitmaps("Cleanup Start");
2000 2015
2001 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); 2016 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
2002 g1p->record_concurrent_mark_cleanup_start(); 2017 g1p->record_concurrent_mark_cleanup_start();
2003 2018
2004 double start = os::elapsedTime(); 2019 double start = os::elapsedTime();
2032 if (VerifyDuringGC) { 2047 if (VerifyDuringGC) {
2033 // Verify that the counting data accumulated during marking matches 2048 // Verify that the counting data accumulated during marking matches
2034 // that calculated by walking the marking bitmap. 2049 // that calculated by walking the marking bitmap.
2035 2050
2036 // Bitmaps to hold expected values 2051 // Bitmaps to hold expected values
2037 BitMap expected_region_bm(_region_bm.size(), false); 2052 BitMap expected_region_bm(_region_bm.size(), true);
2038 BitMap expected_card_bm(_card_bm.size(), false); 2053 BitMap expected_card_bm(_card_bm.size(), true);
2039 2054
2040 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 2055 G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
2041 &_region_bm, 2056 &_region_bm,
2042 &_card_bm, 2057 &_card_bm,
2043 &expected_region_bm, 2058 &expected_region_bm,
2135 2150
2136 // Clean up will have freed any regions completely full of garbage. 2151 // Clean up will have freed any regions completely full of garbage.
2137 // Update the soft reference policy with the new heap occupancy. 2152 // Update the soft reference policy with the new heap occupancy.
2138 Universe::update_heap_info_at_gc(); 2153 Universe::update_heap_info_at_gc();
2139 2154
2140 // We need to make this be a "collection" so any collection pause that
2141 // races with it goes around and waits for completeCleanup to finish.
2142 g1h->increment_total_collections();
2143
2144 // We reclaimed old regions so we should calculate the sizes to make
2145 // sure we update the old gen/space data.
2146 g1h->g1mm()->update_sizes();
2147
2148 if (VerifyDuringGC) { 2155 if (VerifyDuringGC) {
2149 HandleMark hm; // handle scope 2156 HandleMark hm; // handle scope
2150 Universe::heap()->prepare_for_verify(); 2157 Universe::heap()->prepare_for_verify();
2151 Universe::verify(VerifyOption_G1UsePrevMarking, 2158 Universe::verify(VerifyOption_G1UsePrevMarking,
2152 " VerifyDuringGC:(after)"); 2159 " VerifyDuringGC:(after)");
2153 } 2160 }
2161 g1h->check_bitmaps("Cleanup End");
2154 2162
2155 g1h->verify_region_sets_optional(); 2163 g1h->verify_region_sets_optional();
2164
2165 // We need to make this be a "collection" so any collection pause that
2166 // races with it goes around and waits for completeCleanup to finish.
2167 g1h->increment_total_collections();
2168
2169 // Clean out dead classes and update Metaspace sizes.
2170 if (ClassUnloadingWithConcurrentMark) {
2171 ClassLoaderDataGraph::purge();
2172 }
2173 MetaspaceGC::compute_new_size();
2174
2175 // We reclaimed old regions so we should calculate the sizes to make
2176 // sure we update the old gen/space data.
2177 g1h->g1mm()->update_sizes();
2178 g1h->allocation_context_stats().update_after_mark();
2179
2156 g1h->trace_heap_after_concurrent_cycle(); 2180 g1h->trace_heap_after_concurrent_cycle();
2157 } 2181 }
2158 2182
2159 void ConcurrentMark::completeCleanup() { 2183 void ConcurrentMark::completeCleanup() {
2160 if (has_aborted()) return; 2184 if (has_aborted()) return;
2168 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2192 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2169 "cleanup list has %u entries", 2193 "cleanup list has %u entries",
2170 _cleanup_list.length()); 2194 _cleanup_list.length());
2171 } 2195 }
2172 2196
2173 // Noone else should be accessing the _cleanup_list at this point, 2197 // No one else should be accessing the _cleanup_list at this point,
2174 // so it's not necessary to take any locks 2198 // so it is not necessary to take any locks
2175 while (!_cleanup_list.is_empty()) { 2199 while (!_cleanup_list.is_empty()) {
2176 HeapRegion* hr = _cleanup_list.remove_head(); 2200 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
2177 assert(hr != NULL, "Got NULL from a non-empty list"); 2201 assert(hr != NULL, "Got NULL from a non-empty list");
2178 hr->par_clear(); 2202 hr->par_clear();
2179 tmp_free_list.add_ordered(hr); 2203 tmp_free_list.add_ordered(hr);
2180 2204
2181 // Instead of adding one region at a time to the secondary_free_list, 2205 // Instead of adding one region at a time to the secondary_free_list,
2380 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2404 ReferenceProcessor* rp = _g1h->ref_processor_cm();
2381 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2405 assert(rp->processing_is_mt(), "shouldn't be here otherwise");
2382 } 2406 }
2383 2407
2384 virtual void work(uint worker_id) { 2408 virtual void work(uint worker_id) {
2409 ResourceMark rm;
2410 HandleMark hm;
2385 CMTask* task = _cm->task(worker_id); 2411 CMTask* task = _cm->task(worker_id);
2386 G1CMIsAliveClosure g1_is_alive(_g1h); 2412 G1CMIsAliveClosure g1_is_alive(_g1h);
2387 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2413 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2388 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2414 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2389 2415
2437 _g1h->set_par_threads(_active_workers); 2463 _g1h->set_par_threads(_active_workers);
2438 _workers->run_task(&enq_task_proxy); 2464 _workers->run_task(&enq_task_proxy);
2439 _g1h->set_par_threads(0); 2465 _g1h->set_par_threads(0);
2440 } 2466 }
2441 2467
2468 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
2469 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
2470 }
2471
2472 // Helper class to get rid of some boilerplate code.
2473 class G1RemarkGCTraceTime : public GCTraceTime {
2474 static bool doit_and_prepend(bool doit) {
2475 if (doit) {
2476 gclog_or_tty->put(' ');
2477 }
2478 return doit;
2479 }
2480
2481 public:
2482 G1RemarkGCTraceTime(const char* title, bool doit)
2483 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
2484 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
2485 }
2486 };
2487
2442 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2488 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2443 if (has_overflown()) { 2489 if (has_overflown()) {
2444 // Skip processing the discovered references if we have 2490 // Skip processing the discovered references if we have
2445 // overflown the global marking stack. Reference objects 2491 // overflown the global marking stack. Reference objects
2446 // only get discovered once so it is OK to not 2492 // only get discovered once so it is OK to not
2462 // tables from the displayed time. 2508 // tables from the displayed time.
2463 { 2509 {
2464 if (G1Log::finer()) { 2510 if (G1Log::finer()) {
2465 gclog_or_tty->put(' '); 2511 gclog_or_tty->put(' ');
2466 } 2512 }
2467 GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm()); 2513 GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id());
2468 2514
2469 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2515 ReferenceProcessor* rp = g1h->ref_processor_cm();
2470 2516
2471 // See the comment in G1CollectedHeap::ref_processing_init() 2517 // See the comment in G1CollectedHeap::ref_processing_init()
2472 // about how reference processing currently works in G1. 2518 // about how reference processing currently works in G1.
2519 const ReferenceProcessorStats& stats = 2565 const ReferenceProcessorStats& stats =
2520 rp->process_discovered_references(&g1_is_alive, 2566 rp->process_discovered_references(&g1_is_alive,
2521 &g1_keep_alive, 2567 &g1_keep_alive,
2522 &g1_drain_mark_stack, 2568 &g1_drain_mark_stack,
2523 executor, 2569 executor,
2524 g1h->gc_timer_cm()); 2570 g1h->gc_timer_cm(),
2571 concurrent_gc_id());
2525 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2572 g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2526 2573
2527 // The do_oop work routines of the keep_alive and drain_marking_stack 2574 // The do_oop work routines of the keep_alive and drain_marking_stack
2528 // oop closures will set the has_overflown flag if we overflow the 2575 // oop closures will set the has_overflown flag if we overflow the
2529 // global marking stack. 2576 // global marking stack.
2548 if (has_overflown()) { 2595 if (has_overflown()) {
2549 // We can not trust g1_is_alive if the marking stack overflowed 2596 // We can not trust g1_is_alive if the marking stack overflowed
2550 return; 2597 return;
2551 } 2598 }
2552 2599
2553 g1h->unlink_string_and_symbol_table(&g1_is_alive, 2600 assert(_markStack.isEmpty(), "Marking should have completed");
2554 /* process_strings */ false, // currently strings are always roots 2601
2555 /* process_symbols */ true); 2602 // Unload Klasses, String, Symbols, Code Cache, etc.
2603 {
2604 G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
2605
2606 if (ClassUnloadingWithConcurrentMark) {
2607 // Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack
2608 // part is too slow to be done serially, so it is handled during the weakRefsWorkParallelPart phase.
2609 // Defer the cleaning until we have complete on_stack data.
2610 MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */);
2611
2612 bool purged_classes;
2613
2614 {
2615 G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
2616 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
2617 }
2618
2619 {
2620 G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
2621 weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2622 }
2623
2624 {
2625 G1RemarkGCTraceTime trace("Deallocate Metadata", G1Log::finest());
2626 ClassLoaderDataGraph::free_deallocate_lists();
2627 }
2628 }
2629
2630 if (G1StringDedup::is_enabled()) {
2631 G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
2632 G1StringDedup::unlink(&g1_is_alive);
2633 }
2634 }
2556 } 2635 }
2557 2636
2558 void ConcurrentMark::swapMarkBitMaps() { 2637 void ConcurrentMark::swapMarkBitMaps() {
2559 CMBitMapRO* temp = _prevMarkBitMap; 2638 CMBitMapRO* temp = _prevMarkBitMap;
2560 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2639 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
2561 _nextMarkBitMap = (CMBitMap*) temp; 2640 _nextMarkBitMap = (CMBitMap*) temp;
2562 } 2641 }
2642
2643 class CMObjectClosure;
2644
2645 // Closure for iterating over objects, currently only used for
2646 // processing SATB buffers.
2647 class CMObjectClosure : public ObjectClosure {
2648 private:
2649 CMTask* _task;
2650
2651 public:
2652 void do_object(oop obj) {
2653 _task->deal_with_reference(obj);
2654 }
2655
2656 CMObjectClosure(CMTask* task) : _task(task) { }
2657 };
2658
2659 class G1RemarkThreadsClosure : public ThreadClosure {
2660 CMObjectClosure _cm_obj;
2661 G1CMOopClosure _cm_cl;
2662 MarkingCodeBlobClosure _code_cl;
2663 int _thread_parity;
2664 bool _is_par;
2665
2666 public:
2667 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
2668 _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
2669 _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
2670
2671 void do_thread(Thread* thread) {
2672 if (thread->is_Java_thread()) {
2673 if (thread->claim_oops_do(_is_par, _thread_parity)) {
2674 JavaThread* jt = (JavaThread*)thread;
2675
2676 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
2677 // however the liveness of oops reachable from nmethods have very complex lifecycles:
2678 // * Alive if on the stack of an executing method
2679 // * Weakly reachable otherwise
2680 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
2681 // live by the SATB invariant but other oops recorded in nmethods may behave differently.
2682 jt->nmethods_do(&_code_cl);
2683
2684 jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
2685 }
2686 } else if (thread->is_VM_thread()) {
2687 if (thread->claim_oops_do(_is_par, _thread_parity)) {
2688 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
2689 }
2690 }
2691 }
2692 };
2563 2693
2564 class CMRemarkTask: public AbstractGangTask { 2694 class CMRemarkTask: public AbstractGangTask {
2565 private: 2695 private:
2566 ConcurrentMark* _cm; 2696 ConcurrentMark* _cm;
2567 bool _is_serial; 2697 bool _is_serial;
2570 // Since all available tasks are actually started, we should 2700 // Since all available tasks are actually started, we should
2571 // only proceed if we're supposed to be actived. 2701 // only proceed if we're supposed to be actived.
2572 if (worker_id < _cm->active_tasks()) { 2702 if (worker_id < _cm->active_tasks()) {
2573 CMTask* task = _cm->task(worker_id); 2703 CMTask* task = _cm->task(worker_id);
2574 task->record_start_time(); 2704 task->record_start_time();
2705 {
2706 ResourceMark rm;
2707 HandleMark hm;
2708
2709 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
2710 Threads::threads_do(&threads_f);
2711 }
2712
2575 do { 2713 do {
2576 task->do_marking_step(1000000000.0 /* something very large */, 2714 task->do_marking_step(1000000000.0 /* something very large */,
2577 true /* do_termination */, 2715 true /* do_termination */,
2578 _is_serial); 2716 _is_serial);
2579 } while (task->has_aborted() && !_cm->has_overflown()); 2717 } while (task->has_aborted() && !_cm->has_overflown());
2591 2729
2592 void ConcurrentMark::checkpointRootsFinalWork() { 2730 void ConcurrentMark::checkpointRootsFinalWork() {
2593 ResourceMark rm; 2731 ResourceMark rm;
2594 HandleMark hm; 2732 HandleMark hm;
2595 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2733 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2734
2735 G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
2596 2736
2597 g1h->ensure_parsability(false); 2737 g1h->ensure_parsability(false);
2598 2738
2599 if (G1CollectedHeap::use_parallel_gc_threads()) { 2739 if (G1CollectedHeap::use_parallel_gc_threads()) {
2600 G1CollectedHeap::StrongRootsScope srs(g1h); 2740 G1CollectedHeap::StrongRootsScope srs(g1h);
2671 str = ""; 2811 str = "";
2672 } else if (!_g1h->is_in_g1_reserved(obj)) { 2812 } else if (!_g1h->is_in_g1_reserved(obj)) {
2673 str = " O"; 2813 str = " O";
2674 } else { 2814 } else {
2675 HeapRegion* hr = _g1h->heap_region_containing(obj); 2815 HeapRegion* hr = _g1h->heap_region_containing(obj);
2676 guarantee(hr != NULL, "invariant");
2677 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); 2816 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
2678 bool marked = _g1h->is_marked(obj, _vo); 2817 bool marked = _g1h->is_marked(obj, _vo);
2679 2818
2680 if (over_tams) { 2819 if (over_tams) {
2681 str = " >"; 2820 str = " >";
2810 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2949 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2811 } 2950 }
2812 2951
2813 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2952 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2814 _nextMarkBitMap->clearRange(mr); 2953 _nextMarkBitMap->clearRange(mr);
2815 }
2816
2817 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
2818 clearRangePrevBitmap(mr);
2819 clearRangeNextBitmap(mr);
2820 } 2954 }
2821 2955
2822 HeapRegion* 2956 HeapRegion*
2823 ConcurrentMark::claim_region(uint worker_id) { 2957 ConcurrentMark::claim_region(uint worker_id) {
2824 // "checkpoint" the finger 2958 // "checkpoint" the finger
2850 // If it comes across a region that suddenly becomes CH, the 2984 // If it comes across a region that suddenly becomes CH, the
2851 // scenario will be similar to b). So, the race between 2985 // scenario will be similar to b). So, the race between
2852 // claim_region() and a humongous object allocation might force us 2986 // claim_region() and a humongous object allocation might force us
2853 // to do a bit of unnecessary work (due to some unnecessary bitmap 2987 // to do a bit of unnecessary work (due to some unnecessary bitmap
2854 // iterations) but it should not introduce and correctness issues. 2988 // iterations) but it should not introduce and correctness issues.
2855 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2989 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2856 HeapWord* bottom = curr_region->bottom(); 2990
2857 HeapWord* end = curr_region->end(); 2991 // Above heap_region_containing_raw may return NULL as we always scan claim
2858 HeapWord* limit = curr_region->next_top_at_mark_start(); 2992 // until the end of the heap. In this case, just jump to the next region.
2859 2993 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2860 if (verbose_low()) {
2861 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2862 "["PTR_FORMAT", "PTR_FORMAT"), "
2863 "limit = "PTR_FORMAT,
2864 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
2865 }
2866 2994
2867 // Is the gap between reading the finger and doing the CAS too long? 2995 // Is the gap between reading the finger and doing the CAS too long?
2868 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2996 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2869 if (res == finger) { 2997 if (res == finger && curr_region != NULL) {
2870 // we succeeded 2998 // we succeeded
2999 HeapWord* bottom = curr_region->bottom();
3000 HeapWord* limit = curr_region->next_top_at_mark_start();
3001
3002 if (verbose_low()) {
3003 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
3004 "["PTR_FORMAT", "PTR_FORMAT"), "
3005 "limit = "PTR_FORMAT,
3006 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
3007 }
2871 3008
2872 // notice that _finger == end cannot be guaranteed here since, 3009 // notice that _finger == end cannot be guaranteed here since,
2873 // someone else might have moved the finger even further 3010 // someone else might have moved the finger even further
2874 assert(_finger >= end, "the finger should have moved forward"); 3011 assert(_finger >= end, "the finger should have moved forward");
2875 3012
2896 return NULL; 3033 return NULL;
2897 } 3034 }
2898 } else { 3035 } else {
2899 assert(_finger > finger, "the finger should have moved forward"); 3036 assert(_finger > finger, "the finger should have moved forward");
2900 if (verbose_low()) { 3037 if (verbose_low()) {
2901 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 3038 if (curr_region == NULL) {
2902 "global finger = "PTR_FORMAT", " 3039 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
2903 "our finger = "PTR_FORMAT, 3040 "global finger = "PTR_FORMAT", "
2904 worker_id, p2i(_finger), p2i(finger)); 3041 "our finger = "PTR_FORMAT,
3042 worker_id, p2i(_finger), p2i(finger));
3043 } else {
3044 gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
3045 "global finger = "PTR_FORMAT", "
3046 "our finger = "PTR_FORMAT,
3047 worker_id, p2i(_finger), p2i(finger));
3048 }
2905 } 3049 }
2906 3050
2907 // read it again 3051 // read it again
2908 finger = _finger; 3052 finger = _finger;
2909 } 3053 }
3014 // given that the global finger could be pointing to a free region 3158 // given that the global finger could be pointing to a free region
3015 // which subsequently becomes continues humongous. If that 3159 // which subsequently becomes continues humongous. If that
3016 // happens, heap_region_containing() will return the bottom of the 3160 // happens, heap_region_containing() will return the bottom of the
3017 // corresponding starts humongous region and the check below will 3161 // corresponding starts humongous region and the check below will
3018 // not hold any more. 3162 // not hold any more.
3163 // Since we always iterate over all regions, we might get a NULL HeapRegion
3164 // here.
3019 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 3165 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
3020 guarantee(global_finger == global_hr->bottom(), 3166 guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
3021 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 3167 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
3022 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 3168 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
3023 } 3169 }
3024 3170
3025 // Verify the task fingers 3171 // Verify the task fingers
3028 CMTask* task = _tasks[i]; 3174 CMTask* task = _tasks[i];
3029 HeapWord* task_finger = task->finger(); 3175 HeapWord* task_finger = task->finger();
3030 if (task_finger != NULL && task_finger < _heap_end) { 3176 if (task_finger != NULL && task_finger < _heap_end) {
3031 // See above note on the global finger verification. 3177 // See above note on the global finger verification.
3032 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 3178 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
3033 guarantee(task_finger == task_hr->bottom() || 3179 guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
3034 !task_hr->in_collection_set(), 3180 !task_hr->in_collection_set(),
3035 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 3181 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
3036 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 3182 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
3037 } 3183 }
3038 } 3184 }
3107 } 3253 }
3108 3254
3109 assert(limit_idx <= end_idx, "or else use atomics"); 3255 assert(limit_idx <= end_idx, "or else use atomics");
3110 3256
3111 // Aggregate the "stripe" in the count data associated with hr. 3257 // Aggregate the "stripe" in the count data associated with hr.
3112 uint hrs_index = hr->hrs_index(); 3258 uint hrm_index = hr->hrm_index();
3113 size_t marked_bytes = 0; 3259 size_t marked_bytes = 0;
3114 3260
3115 for (uint i = 0; i < _max_worker_id; i += 1) { 3261 for (uint i = 0; i < _max_worker_id; i += 1) {
3116 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 3262 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
3117 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 3263 BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
3118 3264
3119 // Fetch the marked_bytes in this region for task i and 3265 // Fetch the marked_bytes in this region for task i and
3120 // add it to the running total for this region. 3266 // add it to the running total for this region.
3121 marked_bytes += marked_bytes_array[hrs_index]; 3267 marked_bytes += marked_bytes_array[hrm_index];
3122 3268
3123 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 3269 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
3124 // into the global card bitmap. 3270 // into the global card bitmap.
3125 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 3271 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
3126 3272
3239 } 3385 }
3240 } 3386 }
3241 3387
3242 // abandon current marking iteration due to a Full GC 3388 // abandon current marking iteration due to a Full GC
3243 void ConcurrentMark::abort() { 3389 void ConcurrentMark::abort() {
3244 // Clear all marks to force marking thread to do nothing 3390 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
3391 // concurrent bitmap clearing.
3245 _nextMarkBitMap->clearAll(); 3392 _nextMarkBitMap->clearAll();
3393
3394 // Note we cannot clear the previous marking bitmap here
3395 // since VerifyDuringGC verifies the objects marked during
3396 // a full GC against the previous bitmap.
3397
3246 // Clear the liveness counting data 3398 // Clear the liveness counting data
3247 clear_all_count_data(); 3399 clear_all_count_data();
3248 // Empty mark stack 3400 // Empty mark stack
3249 reset_marking_state(); 3401 reset_marking_state();
3250 for (uint i = 0; i < _max_worker_id; ++i) { 3402 for (uint i = 0; i < _max_worker_id; ++i) {
3251 _tasks[i]->clear_region_fields(); 3403 _tasks[i]->clear_region_fields();
3252 } 3404 }
3253 _first_overflow_barrier_sync.abort(); 3405 _first_overflow_barrier_sync.abort();
3254 _second_overflow_barrier_sync.abort(); 3406 _second_overflow_barrier_sync.abort();
3407 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
3408 if (!gc_id.is_undefined()) {
3409 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
3410 // to detect that it was aborted. Only keep track of the first GC id that we aborted.
3411 _aborted_gc_id = gc_id;
3412 }
3255 _has_aborted = true; 3413 _has_aborted = true;
3256 3414
3257 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3415 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3258 satb_mq_set.abandon_partial_marking(); 3416 satb_mq_set.abandon_partial_marking();
3259 // This can be called either during or outside marking, we'll read 3417 // This can be called either during or outside marking, we'll read
3262 false, /* new active value */ 3420 false, /* new active value */
3263 satb_mq_set.is_active() /* expected_active */); 3421 satb_mq_set.is_active() /* expected_active */);
3264 3422
3265 _g1h->trace_heap_after_concurrent_cycle(); 3423 _g1h->trace_heap_after_concurrent_cycle();
3266 _g1h->register_concurrent_cycle_end(); 3424 _g1h->register_concurrent_cycle_end();
3425 }
3426
3427 const GCId& ConcurrentMark::concurrent_gc_id() {
3428 if (has_aborted()) {
3429 return _aborted_gc_id;
3430 }
3431 return _g1h->gc_tracer_cm()->gc_id();
3267 } 3432 }
3268 3433
3269 static void print_ms_time_info(const char* prefix, const char* name, 3434 static void print_ms_time_info(const char* prefix, const char* name,
3270 NumberSeq& ns) { 3435 NumberSeq& ns) {
3271 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3436 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3320 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3485 _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3321 } 3486 }
3322 3487
3323 // We take a break if someone is trying to stop the world. 3488 // We take a break if someone is trying to stop the world.
3324 bool ConcurrentMark::do_yield_check(uint worker_id) { 3489 bool ConcurrentMark::do_yield_check(uint worker_id) {
3325 if (should_yield()) { 3490 if (SuspendibleThreadSet::should_yield()) {
3326 if (worker_id == 0) { 3491 if (worker_id == 0) {
3327 _g1h->g1_policy()->record_concurrent_pause(); 3492 _g1h->g1_policy()->record_concurrent_pause();
3328 } 3493 }
3329 cmThread()->yield(); 3494 SuspendibleThreadSet::yield();
3330 return true; 3495 return true;
3331 } else { 3496 } else {
3332 return false; 3497 return false;
3333 } 3498 }
3334 }
3335
3336 bool ConcurrentMark::should_yield() {
3337 return cmThread()->should_yield();
3338 }
3339
3340 bool ConcurrentMark::containing_card_is_marked(void* p) {
3341 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
3342 return _card_bm.at(offset >> CardTableModRefBS::card_shift);
3343 }
3344
3345 bool ConcurrentMark::containing_cards_are_marked(void* start,
3346 void* last) {
3347 return containing_card_is_marked(start) &&
3348 containing_card_is_marked(last);
3349 } 3499 }
3350 3500
3351 #ifndef PRODUCT 3501 #ifndef PRODUCT
3352 // for debugging purposes 3502 // for debugging purposes
3353 void ConcurrentMark::print_finger() { 3503 void ConcurrentMark::print_finger() {
3408 // the iteration 3558 // the iteration
3409 return !_task->has_aborted(); 3559 return !_task->has_aborted();
3410 } 3560 }
3411 }; 3561 };
3412 3562
3413 // Closure for iterating over objects, currently only used for
3414 // processing SATB buffers.
3415 class CMObjectClosure : public ObjectClosure {
3416 private:
3417 CMTask* _task;
3418
3419 public:
3420 void do_object(oop obj) {
3421 _task->deal_with_reference(obj);
3422 }
3423
3424 CMObjectClosure(CMTask* task) : _task(task) { }
3425 };
3426
3427 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3563 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3428 ConcurrentMark* cm, 3564 ConcurrentMark* cm,
3429 CMTask* task) 3565 CMTask* task)
3430 : _g1h(g1h), _cm(cm), _task(task) { 3566 : _g1h(g1h), _cm(cm), _task(task) {
3431 assert(_ref_processor == NULL, "should be initialized to NULL"); 3567 assert(_ref_processor == NULL, "should be initialized to NULL");
3435 assert(_ref_processor != NULL, "should not be NULL"); 3571 assert(_ref_processor != NULL, "should not be NULL");
3436 } 3572 }
3437 } 3573 }
3438 3574
3439 void CMTask::setup_for_region(HeapRegion* hr) { 3575 void CMTask::setup_for_region(HeapRegion* hr) {
3440 // Separated the asserts so that we know which one fires.
3441 assert(hr != NULL, 3576 assert(hr != NULL,
3442 "claim_region() should have filtered out continues humongous regions"); 3577 "claim_region() should have filtered out NULL regions");
3443 assert(!hr->continuesHumongous(), 3578 assert(!hr->continuesHumongous(),
3444 "claim_region() should have filtered out continues humongous regions"); 3579 "claim_region() should have filtered out continues humongous regions");
3445 3580
3446 if (_cm->verbose_low()) { 3581 if (_cm->verbose_low()) {
3447 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3582 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3613 _interval_start_time_ms = curr_time_ms; 3748 _interval_start_time_ms = curr_time_ms;
3614 _all_clock_intervals_ms.add(last_interval_ms); 3749 _all_clock_intervals_ms.add(last_interval_ms);
3615 3750
3616 if (_cm->verbose_medium()) { 3751 if (_cm->verbose_medium()) {
3617 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3752 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3618 "scanned = %d%s, refs reached = %d%s", 3753 "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
3619 _worker_id, last_interval_ms, 3754 _worker_id, last_interval_ms,
3620 _words_scanned, 3755 _words_scanned,
3621 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3756 (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3622 _refs_reached, 3757 _refs_reached,
3623 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3758 (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3624 } 3759 }
3625 #endif // _MARKING_STATS_ 3760 #endif // _MARKING_STATS_
3626 3761
3627 // (4) We check whether we should yield. If we have to, then we abort. 3762 // (4) We check whether we should yield. If we have to, then we abort.
3628 if (_cm->should_yield()) { 3763 if (SuspendibleThreadSet::should_yield()) {
3629 // We should yield. To do this we abort the task. The caller is 3764 // We should yield. To do this we abort the task. The caller is
3630 // responsible for yielding. 3765 // responsible for yielding.
3631 set_has_aborted(); 3766 set_has_aborted();
3632 statsOnly( ++_aborted_yield ); 3767 statsOnly( ++_aborted_yield );
3633 return; 3768 return;
3884 if (_cm->verbose_medium()) { 4019 if (_cm->verbose_medium()) {
3885 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 4020 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3886 } 4021 }
3887 statsOnly( ++_satb_buffers_processed ); 4022 statsOnly( ++_satb_buffers_processed );
3888 regular_clock_call(); 4023 regular_clock_call();
3889 }
3890 }
3891
3892 if (!concurrent() && !has_aborted()) {
3893 // We should only do this during remark.
3894 if (G1CollectedHeap::use_parallel_gc_threads()) {
3895 satb_mq_set.par_iterate_closure_all_threads(_worker_id);
3896 } else {
3897 satb_mq_set.iterate_closure_all_threads();
3898 } 4024 }
3899 } 4025 }
3900 4026
3901 _draining_satb_buffers = false; 4027 _draining_satb_buffers = false;
3902 4028
4553 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4679 _total_prev_live_bytes(0), _total_next_live_bytes(0),
4554 _hum_used_bytes(0), _hum_capacity_bytes(0), 4680 _hum_used_bytes(0), _hum_capacity_bytes(0),
4555 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4681 _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4556 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4682 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4557 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4683 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4558 MemRegion g1_committed = g1h->g1_committed();
4559 MemRegion g1_reserved = g1h->g1_reserved(); 4684 MemRegion g1_reserved = g1h->g1_reserved();
4560 double now = os::elapsedTime(); 4685 double now = os::elapsedTime();
4561 4686
4562 // Print the header of the output. 4687 // Print the header of the output.
4563 _out->cr(); 4688 _out->cr();
4564 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4689 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4565 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4690 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4566 G1PPRL_SUM_ADDR_FORMAT("committed")
4567 G1PPRL_SUM_ADDR_FORMAT("reserved") 4691 G1PPRL_SUM_ADDR_FORMAT("reserved")
4568 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4692 G1PPRL_SUM_BYTE_FORMAT("region-size"),
4569 p2i(g1_committed.start()), p2i(g1_committed.end()),
4570 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4693 p2i(g1_reserved.start()), p2i(g1_reserved.end()),
4571 HeapRegion::GrainBytes); 4694 HeapRegion::GrainBytes);
4572 _out->print_cr(G1PPRL_LINE_PREFIX); 4695 _out->print_cr(G1PPRL_LINE_PREFIX);
4573 _out->print_cr(G1PPRL_LINE_PREFIX 4696 _out->print_cr(G1PPRL_LINE_PREFIX
4574 G1PPRL_TYPE_H_FORMAT 4697 G1PPRL_TYPE_H_FORMAT
4625 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4748 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4626 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4749 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4627 } 4750 }
4628 4751
4629 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4752 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4630 const char* type = ""; 4753 const char* type = r->get_type_str();
4631 HeapWord* bottom = r->bottom(); 4754 HeapWord* bottom = r->bottom();
4632 HeapWord* end = r->end(); 4755 HeapWord* end = r->end();
4633 size_t capacity_bytes = r->capacity(); 4756 size_t capacity_bytes = r->capacity();
4634 size_t used_bytes = r->used(); 4757 size_t used_bytes = r->used();
4635 size_t prev_live_bytes = r->live_bytes(); 4758 size_t prev_live_bytes = r->live_bytes();
4636 size_t next_live_bytes = r->next_live_bytes(); 4759 size_t next_live_bytes = r->next_live_bytes();
4637 double gc_eff = r->gc_efficiency(); 4760 double gc_eff = r->gc_efficiency();
4638 size_t remset_bytes = r->rem_set()->mem_size(); 4761 size_t remset_bytes = r->rem_set()->mem_size();
4639 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4762 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
4640 4763
4641 if (r->used() == 0) { 4764 if (r->startsHumongous()) {
4642 type = "FREE";
4643 } else if (r->is_survivor()) {
4644 type = "SURV";
4645 } else if (r->is_young()) {
4646 type = "EDEN";
4647 } else if (r->startsHumongous()) {
4648 type = "HUMS";
4649
4650 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4765 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4651 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4766 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4652 "they should have been zeroed after the last time we used them"); 4767 "they should have been zeroed after the last time we used them");
4653 // Set up the _hum_* fields. 4768 // Set up the _hum_* fields.
4654 _hum_capacity_bytes = capacity_bytes; 4769 _hum_capacity_bytes = capacity_bytes;
4657 _hum_next_live_bytes = next_live_bytes; 4772 _hum_next_live_bytes = next_live_bytes;
4658 get_hum_bytes(&used_bytes, &capacity_bytes, 4773 get_hum_bytes(&used_bytes, &capacity_bytes,
4659 &prev_live_bytes, &next_live_bytes); 4774 &prev_live_bytes, &next_live_bytes);
4660 end = bottom + HeapRegion::GrainWords; 4775 end = bottom + HeapRegion::GrainWords;
4661 } else if (r->continuesHumongous()) { 4776 } else if (r->continuesHumongous()) {
4662 type = "HUMC";
4663 get_hum_bytes(&used_bytes, &capacity_bytes, 4777 get_hum_bytes(&used_bytes, &capacity_bytes,
4664 &prev_live_bytes, &next_live_bytes); 4778 &prev_live_bytes, &next_live_bytes);
4665 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4779 assert(end == bottom + HeapRegion::GrainWords, "invariant");
4666 } else {
4667 type = "OLD";
4668 } 4780 }
4669 4781
4670 _total_used_bytes += used_bytes; 4782 _total_used_bytes += used_bytes;
4671 _total_capacity_bytes += capacity_bytes; 4783 _total_capacity_bytes += capacity_bytes;
4672 _total_prev_live_bytes += prev_live_bytes; 4784 _total_prev_live_bytes += prev_live_bytes;