Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 3772:6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
Summary: Perform a heap verification after the first phase of G1's full GC using objects' mark words to determine liveness. The third parameter of the heap verification routines, which was used in G1 to determine which marking bitmap to use in liveness calculations, has been changed from a boolean to an enum with values defined for using the mark word, and the 'prev' and 'next' bitmaps.
Reviewed-by: tonyp, ysr
author | johnc |
---|---|
date | Tue, 14 Jun 2011 11:01:10 -0700 |
parents | 842b840e67db |
children | 23d434c6290d |
rev | line source |
---|---|
342 | 1 /* |
2149 | 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "classfile/symbolTable.hpp" | |
3771 | 27 #include "gc_implementation/g1/concurrentMark.inline.hpp" |
1972 | 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" |
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" | |
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" | |
3771 | 31 #include "gc_implementation/g1/g1OopClosures.inline.hpp" |
1972 | 32 #include "gc_implementation/g1/g1RemSet.hpp" |
33 #include "gc_implementation/g1/heapRegionRemSet.hpp" | |
34 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
2037
diff
changeset
|
35 #include "gc_implementation/shared/vmGCOperations.hpp" |
1972 | 36 #include "memory/genOopClosures.inline.hpp" |
37 #include "memory/referencePolicy.hpp" | |
38 #include "memory/resourceArea.hpp" | |
39 #include "oops/oop.inline.hpp" | |
40 #include "runtime/handles.inline.hpp" | |
41 #include "runtime/java.hpp" | |
342 | 42 |
43 // | |
44 // CMS Bit Map Wrapper | |
45 | |
46 CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter): | |
47 _bm((uintptr_t*)NULL,0), | |
48 _shifter(shifter) { | |
49 _bmStartWord = (HeapWord*)(rs.base()); | |
50 _bmWordSize = rs.size()/HeapWordSize; // rs.size() is in bytes | |
51 ReservedSpace brs(ReservedSpace::allocation_align_size_up( | |
52 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); | |
53 | |
54 guarantee(brs.is_reserved(), "couldn't allocate CMS bit map"); | |
55 // For now we'll just commit all of the bit map up fromt. | |
56 // Later on we'll try to be more parsimonious with swap. | |
57 guarantee(_virtual_space.initialize(brs, brs.size()), | |
58 "couldn't reseve backing store for CMS bit map"); | |
59 assert(_virtual_space.committed_size() == brs.size(), | |
60 "didn't reserve backing store for all of CMS bit map?"); | |
61 _bm.set_map((uintptr_t*)_virtual_space.low()); | |
62 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= | |
63 _bmWordSize, "inconsistency in bit map sizing"); | |
64 _bm.set_size(_bmWordSize >> _shifter); | |
65 } | |
66 | |
67 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr, | |
68 HeapWord* limit) const { | |
69 // First we must round addr *up* to a possible object boundary. | |
70 addr = (HeapWord*)align_size_up((intptr_t)addr, | |
71 HeapWordSize << _shifter); | |
72 size_t addrOffset = heapWordToOffset(addr); | |
73 if (limit == NULL) limit = _bmStartWord + _bmWordSize; | |
74 size_t limitOffset = heapWordToOffset(limit); | |
75 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); | |
76 HeapWord* nextAddr = offsetToHeapWord(nextOffset); | |
77 assert(nextAddr >= addr, "get_next_one postcondition"); | |
78 assert(nextAddr == limit || isMarked(nextAddr), | |
79 "get_next_one postcondition"); | |
80 return nextAddr; | |
81 } | |
82 | |
83 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr, | |
84 HeapWord* limit) const { | |
85 size_t addrOffset = heapWordToOffset(addr); | |
86 if (limit == NULL) limit = _bmStartWord + _bmWordSize; | |
87 size_t limitOffset = heapWordToOffset(limit); | |
88 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); | |
89 HeapWord* nextAddr = offsetToHeapWord(nextOffset); | |
90 assert(nextAddr >= addr, "get_next_one postcondition"); | |
91 assert(nextAddr == limit || !isMarked(nextAddr), | |
92 "get_next_one postcondition"); | |
93 return nextAddr; | |
94 } | |
95 | |
96 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { | |
97 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); | |
98 return (int) (diff >> _shifter); | |
99 } | |
100 | |
101 bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) { | |
102 HeapWord* left = MAX2(_bmStartWord, mr.start()); | |
103 HeapWord* right = MIN2(_bmStartWord + _bmWordSize, mr.end()); | |
104 if (right > left) { | |
105 // Right-open interval [leftOffset, rightOffset). | |
106 return _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right)); | |
107 } else { | |
108 return true; | |
109 } | |
110 } | |
111 | |
112 void CMBitMapRO::mostly_disjoint_range_union(BitMap* from_bitmap, | |
113 size_t from_start_index, | |
114 HeapWord* to_start_word, | |
115 size_t word_num) { | |
116 _bm.mostly_disjoint_range_union(from_bitmap, | |
117 from_start_index, | |
118 heapWordToOffset(to_start_word), | |
119 word_num); | |
120 } | |
121 | |
122 #ifndef PRODUCT | |
123 bool CMBitMapRO::covers(ReservedSpace rs) const { | |
124 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); | |
645
c3a720eefe82
6816308: Changes to allow builds with latest Windows SDK 6.1 on 64bit Windows 2003
kvn
parents:
619
diff
changeset
|
125 assert(((size_t)_bm.size() * (size_t)(1 << _shifter)) == _bmWordSize, |
342 | 126 "size inconsistency"); |
127 return _bmStartWord == (HeapWord*)(rs.base()) && | |
128 _bmWordSize == rs.size()>>LogHeapWordSize; | |
129 } | |
130 #endif | |
131 | |
132 void CMBitMap::clearAll() { | |
133 _bm.clear(); | |
134 return; | |
135 } | |
136 | |
137 void CMBitMap::markRange(MemRegion mr) { | |
138 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); | |
139 assert(!mr.is_empty(), "unexpected empty region"); | |
140 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == | |
141 ((HeapWord *) mr.end())), | |
142 "markRange memory region end is not card aligned"); | |
143 // convert address range into offset range | |
144 _bm.at_put_range(heapWordToOffset(mr.start()), | |
145 heapWordToOffset(mr.end()), true); | |
146 } | |
147 | |
148 void CMBitMap::clearRange(MemRegion mr) { | |
149 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); | |
150 assert(!mr.is_empty(), "unexpected empty region"); | |
151 // convert address range into offset range | |
152 _bm.at_put_range(heapWordToOffset(mr.start()), | |
153 heapWordToOffset(mr.end()), false); | |
154 } | |
155 | |
156 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, | |
157 HeapWord* end_addr) { | |
158 HeapWord* start = getNextMarkedWordAddress(addr); | |
159 start = MIN2(start, end_addr); | |
160 HeapWord* end = getNextUnmarkedWordAddress(start); | |
161 end = MIN2(end, end_addr); | |
162 assert(start <= end, "Consistency check"); | |
163 MemRegion mr(start, end); | |
164 if (!mr.is_empty()) { | |
165 clearRange(mr); | |
166 } | |
167 return mr; | |
168 } | |
169 | |
170 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : | |
171 _base(NULL), _cm(cm) | |
172 #ifdef ASSERT | |
173 , _drain_in_progress(false) | |
174 , _drain_in_progress_yields(false) | |
175 #endif | |
176 {} | |
177 | |
178 void CMMarkStack::allocate(size_t size) { | |
179 _base = NEW_C_HEAP_ARRAY(oop, size); | |
180 if (_base == NULL) | |
181 vm_exit_during_initialization("Failed to allocate " | |
182 "CM region mark stack"); | |
183 _index = 0; | |
184 // QQQQ cast ... | |
185 _capacity = (jint) size; | |
186 _oops_do_bound = -1; | |
187 NOT_PRODUCT(_max_depth = 0); | |
188 } | |
189 | |
190 CMMarkStack::~CMMarkStack() { | |
191 if (_base != NULL) FREE_C_HEAP_ARRAY(oop, _base); | |
192 } | |
193 | |
194 void CMMarkStack::par_push(oop ptr) { | |
195 while (true) { | |
196 if (isFull()) { | |
197 _overflow = true; | |
198 return; | |
199 } | |
200 // Otherwise... | |
201 jint index = _index; | |
202 jint next_index = index+1; | |
203 jint res = Atomic::cmpxchg(next_index, &_index, index); | |
204 if (res == index) { | |
205 _base[index] = ptr; | |
206 // Note that we don't maintain this atomically. We could, but it | |
207 // doesn't seem necessary. | |
208 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); | |
209 return; | |
210 } | |
211 // Otherwise, we need to try again. | |
212 } | |
213 } | |
214 | |
215 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { | |
216 while (true) { | |
217 if (isFull()) { | |
218 _overflow = true; | |
219 return; | |
220 } | |
221 // Otherwise... | |
222 jint index = _index; | |
223 jint next_index = index + n; | |
224 if (next_index > _capacity) { | |
225 _overflow = true; | |
226 return; | |
227 } | |
228 jint res = Atomic::cmpxchg(next_index, &_index, index); | |
229 if (res == index) { | |
230 for (int i = 0; i < n; i++) { | |
231 int ind = index + i; | |
232 assert(ind < _capacity, "By overflow test above."); | |
233 _base[ind] = ptr_arr[i]; | |
234 } | |
235 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); | |
236 return; | |
237 } | |
238 // Otherwise, we need to try again. | |
239 } | |
240 } | |
241 | |
242 | |
243 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { | |
244 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
245 jint start = _index; | |
246 jint next_index = start + n; | |
247 if (next_index > _capacity) { | |
248 _overflow = true; | |
249 return; | |
250 } | |
251 // Otherwise. | |
252 _index = next_index; | |
253 for (int i = 0; i < n; i++) { | |
254 int ind = start + i; | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
255 assert(ind < _capacity, "By overflow test above."); |
342 | 256 _base[ind] = ptr_arr[i]; |
257 } | |
258 } | |
259 | |
260 | |
261 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { | |
262 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
263 jint index = _index; | |
264 if (index == 0) { | |
265 *n = 0; | |
266 return false; | |
267 } else { | |
268 int k = MIN2(max, index); | |
269 jint new_ind = index - k; | |
270 for (int j = 0; j < k; j++) { | |
271 ptr_arr[j] = _base[new_ind + j]; | |
272 } | |
273 _index = new_ind; | |
274 *n = k; | |
275 return true; | |
276 } | |
277 } | |
278 | |
279 | |
280 CMRegionStack::CMRegionStack() : _base(NULL) {} | |
281 | |
282 void CMRegionStack::allocate(size_t size) { | |
283 _base = NEW_C_HEAP_ARRAY(MemRegion, size); | |
284 if (_base == NULL) | |
285 vm_exit_during_initialization("Failed to allocate " | |
286 "CM region mark stack"); | |
287 _index = 0; | |
288 // QQQQ cast ... | |
289 _capacity = (jint) size; | |
290 } | |
291 | |
292 CMRegionStack::~CMRegionStack() { | |
293 if (_base != NULL) FREE_C_HEAP_ARRAY(oop, _base); | |
294 } | |
295 | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
296 void CMRegionStack::push_lock_free(MemRegion mr) { |
342 | 297 assert(mr.word_size() > 0, "Precondition"); |
298 while (true) { | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
299 jint index = _index; |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
300 |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
301 if (index >= _capacity) { |
342 | 302 _overflow = true; |
303 return; | |
304 } | |
305 // Otherwise... | |
306 jint next_index = index+1; | |
307 jint res = Atomic::cmpxchg(next_index, &_index, index); | |
308 if (res == index) { | |
309 _base[index] = mr; | |
310 return; | |
311 } | |
312 // Otherwise, we need to try again. | |
313 } | |
314 } | |
315 | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
316 // Lock-free pop of the region stack. Called during the concurrent |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
317 // marking / remark phases. Should only be called in tandem with |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
318 // other lock-free pops. |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
319 MemRegion CMRegionStack::pop_lock_free() { |
342 | 320 while (true) { |
321 jint index = _index; | |
322 | |
323 if (index == 0) { | |
324 return MemRegion(); | |
325 } | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
326 // Otherwise... |
342 | 327 jint next_index = index-1; |
328 jint res = Atomic::cmpxchg(next_index, &_index, index); | |
329 if (res == index) { | |
330 MemRegion mr = _base[next_index]; | |
331 if (mr.start() != NULL) { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
332 assert(mr.end() != NULL, "invariant"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
333 assert(mr.word_size() > 0, "invariant"); |
342 | 334 return mr; |
335 } else { | |
336 // that entry was invalidated... let's skip it | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
337 assert(mr.end() == NULL, "invariant"); |
342 | 338 } |
339 } | |
340 // Otherwise, we need to try again. | |
341 } | |
342 } | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
343 |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
344 #if 0 |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
345 // The routines that manipulate the region stack with a lock are |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
346 // not currently used. They should be retained, however, as a |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
347 // diagnostic aid. |
1358
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
348 |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
349 void CMRegionStack::push_with_lock(MemRegion mr) { |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
350 assert(mr.word_size() > 0, "Precondition"); |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
351 MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag); |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
352 |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
353 if (isFull()) { |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
354 _overflow = true; |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
355 return; |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
356 } |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
357 |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
358 _base[_index] = mr; |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
359 _index += 1; |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
360 } |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
361 |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
362 MemRegion CMRegionStack::pop_with_lock() { |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
363 MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag); |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
364 |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
365 while (true) { |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
366 if (_index == 0) { |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
367 return MemRegion(); |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
368 } |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
369 _index -= 1; |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
370 |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
371 MemRegion mr = _base[_index]; |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
372 if (mr.start() != NULL) { |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
373 assert(mr.end() != NULL, "invariant"); |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
374 assert(mr.word_size() > 0, "invariant"); |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
375 return mr; |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
376 } else { |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
377 // that entry was invalidated... let's skip it |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
378 assert(mr.end() == NULL, "invariant"); |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
379 } |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
380 } |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1317
diff
changeset
|
381 } |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
382 #endif |
342 | 383 |
384 bool CMRegionStack::invalidate_entries_into_cset() { | |
385 bool result = false; | |
386 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
387 for (int i = 0; i < _oops_do_bound; ++i) { | |
388 MemRegion mr = _base[i]; | |
389 if (mr.start() != NULL) { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
390 assert(mr.end() != NULL, "invariant"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
391 assert(mr.word_size() > 0, "invariant"); |
342 | 392 HeapRegion* hr = g1h->heap_region_containing(mr.start()); |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
393 assert(hr != NULL, "invariant"); |
342 | 394 if (hr->in_collection_set()) { |
395 // The region points into the collection set | |
396 _base[i] = MemRegion(); | |
397 result = true; | |
398 } | |
399 } else { | |
400 // that entry was invalidated... let's skip it | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
401 assert(mr.end() == NULL, "invariant"); |
342 | 402 } |
403 } | |
404 return result; | |
405 } | |
406 | |
407 template<class OopClosureClass> | |
408 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { | |
409 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after | |
410 || SafepointSynchronize::is_at_safepoint(), | |
411 "Drain recursion must be yield-safe."); | |
412 bool res = true; | |
413 debug_only(_drain_in_progress = true); | |
414 debug_only(_drain_in_progress_yields = yield_after); | |
415 while (!isEmpty()) { | |
416 oop newOop = pop(); | |
417 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); | |
418 assert(newOop->is_oop(), "Expected an oop"); | |
419 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), | |
420 "only grey objects on this stack"); | |
421 // iterate over the oops in this oop, marking and pushing | |
422 // the ones in CMS generation. | |
423 newOop->oop_iterate(cl); | |
424 if (yield_after && _cm->do_yield_check()) { | |
425 res = false; break; | |
426 } | |
427 } | |
428 debug_only(_drain_in_progress = false); | |
429 return res; | |
430 } | |
431 | |
432 void CMMarkStack::oops_do(OopClosure* f) { | |
433 if (_index == 0) return; | |
434 assert(_oops_do_bound != -1 && _oops_do_bound <= _index, | |
435 "Bound must be set."); | |
436 for (int i = 0; i < _oops_do_bound; i++) { | |
437 f->do_oop(&_base[i]); | |
438 } | |
439 _oops_do_bound = -1; | |
440 } | |
441 | |
442 bool ConcurrentMark::not_yet_marked(oop obj) const { | |
443 return (_g1h->is_obj_ill(obj) | |
444 || (_g1h->is_in_permanent(obj) | |
445 && !nextMarkBitMap()->isMarked((HeapWord*)obj))); | |
446 } | |
447 | |
448 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
449 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
450 #endif // _MSC_VER | |
451 | |
452 ConcurrentMark::ConcurrentMark(ReservedSpace rs, | |
453 int max_regions) : | |
454 _markBitMap1(rs, MinObjAlignment - 1), | |
455 _markBitMap2(rs, MinObjAlignment - 1), | |
456 | |
457 _parallel_marking_threads(0), | |
458 _sleep_factor(0.0), | |
459 _marking_task_overhead(1.0), | |
460 _cleanup_sleep_factor(0.0), | |
461 _cleanup_task_overhead(1.0), | |
2152 | 462 _cleanup_list("Cleanup List"), |
342 | 463 _region_bm(max_regions, false /* in_resource_area*/), |
464 _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >> | |
465 CardTableModRefBS::card_shift, | |
466 false /* in_resource_area*/), | |
467 _prevMarkBitMap(&_markBitMap1), | |
468 _nextMarkBitMap(&_markBitMap2), | |
469 _at_least_one_mark_complete(false), | |
470 | |
471 _markStack(this), | |
472 _regionStack(), | |
473 // _finger set in set_non_marking_state | |
474 | |
475 _max_task_num(MAX2(ParallelGCThreads, (size_t)1)), | |
476 // _active_tasks set in set_non_marking_state | |
477 // _tasks set inside the constructor | |
478 _task_queues(new CMTaskQueueSet((int) _max_task_num)), | |
479 _terminator(ParallelTaskTerminator((int) _max_task_num, _task_queues)), | |
480 | |
481 _has_overflown(false), | |
482 _concurrent(false), | |
619
7ea5ca260b28
6814467: G1: small fixes related to concurrent marking verboseness
tonyp
parents:
470
diff
changeset
|
483 _has_aborted(false), |
7ea5ca260b28
6814467: G1: small fixes related to concurrent marking verboseness
tonyp
parents:
470
diff
changeset
|
484 _restart_for_overflow(false), |
7ea5ca260b28
6814467: G1: small fixes related to concurrent marking verboseness
tonyp
parents:
470
diff
changeset
|
485 _concurrent_marking_in_progress(false), |
7ea5ca260b28
6814467: G1: small fixes related to concurrent marking verboseness
tonyp
parents:
470
diff
changeset
|
486 _should_gray_objects(false), |
342 | 487 |
488 // _verbose_level set below | |
489 | |
490 _init_times(), | |
491 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), | |
492 _cleanup_times(), | |
493 _total_counting_time(0.0), | |
494 _total_rs_scrub_time(0.0), | |
495 | |
936 | 496 _parallel_workers(NULL) |
342 | 497 { |
498 CMVerboseLevel verbose_level = | |
499 (CMVerboseLevel) G1MarkingVerboseLevel; | |
500 if (verbose_level < no_verbose) | |
501 verbose_level = no_verbose; | |
502 if (verbose_level > high_verbose) | |
503 verbose_level = high_verbose; | |
504 _verbose_level = verbose_level; | |
505 | |
506 if (verbose_low()) | |
507 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " | |
508 "heap end = "PTR_FORMAT, _heap_start, _heap_end); | |
509 | |
1284 | 510 _markStack.allocate(MarkStackSize); |
751 | 511 _regionStack.allocate(G1MarkRegionStackSize); |
342 | 512 |
513 // Create & start a ConcurrentMark thread. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
514 _cmThread = new ConcurrentMarkThread(this); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
515 assert(cmThread() != NULL, "CM Thread should have been created"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
516 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
517 |
342 | 518 _g1h = G1CollectedHeap::heap(); |
519 assert(CGC_lock != NULL, "Where's the CGC_lock?"); | |
520 assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency"); | |
521 assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency"); | |
522 | |
523 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); | |
1282 | 524 satb_qs.set_buffer_size(G1SATBBufferSize); |
342 | 525 |
526 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num); | |
527 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num); | |
528 | |
529 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail | |
530 _active_tasks = _max_task_num; | |
531 for (int i = 0; i < (int) _max_task_num; ++i) { | |
532 CMTaskQueue* task_queue = new CMTaskQueue(); | |
533 task_queue->initialize(); | |
534 _task_queues->register_queue(i, task_queue); | |
535 | |
536 _tasks[i] = new CMTask(i, this, task_queue, _task_queues); | |
537 _accum_task_vtime[i] = 0.0; | |
538 } | |
539 | |
1284 | 540 if (ConcGCThreads > ParallelGCThreads) { |
541 vm_exit_during_initialization("Can't have more ConcGCThreads " | |
342 | 542 "than ParallelGCThreads."); |
543 } | |
544 if (ParallelGCThreads == 0) { | |
545 // if we are not running with any parallel GC threads we will not | |
546 // spawn any marking threads either | |
547 _parallel_marking_threads = 0; | |
548 _sleep_factor = 0.0; | |
549 _marking_task_overhead = 1.0; | |
550 } else { | |
1284 | 551 if (ConcGCThreads > 0) { |
552 // notice that ConcGCThreads overwrites G1MarkingOverheadPercent | |
342 | 553 // if both are set |
554 | |
1284 | 555 _parallel_marking_threads = ConcGCThreads; |
342 | 556 _sleep_factor = 0.0; |
557 _marking_task_overhead = 1.0; | |
751 | 558 } else if (G1MarkingOverheadPercent > 0) { |
342 | 559 // we will calculate the number of parallel marking threads |
560 // based on a target overhead with respect to the soft real-time | |
561 // goal | |
562 | |
751 | 563 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; |
342 | 564 double overall_cm_overhead = |
751 | 565 (double) MaxGCPauseMillis * marking_overhead / |
566 (double) GCPauseIntervalMillis; | |
342 | 567 double cpu_ratio = 1.0 / (double) os::processor_count(); |
568 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); | |
569 double marking_task_overhead = | |
570 overall_cm_overhead / marking_thread_num * | |
571 (double) os::processor_count(); | |
572 double sleep_factor = | |
573 (1.0 - marking_task_overhead) / marking_task_overhead; | |
574 | |
575 _parallel_marking_threads = (size_t) marking_thread_num; | |
576 _sleep_factor = sleep_factor; | |
577 _marking_task_overhead = marking_task_overhead; | |
578 } else { | |
579 _parallel_marking_threads = MAX2((ParallelGCThreads + 2) / 4, (size_t)1); | |
580 _sleep_factor = 0.0; | |
581 _marking_task_overhead = 1.0; | |
582 } | |
583 | |
584 if (parallel_marking_threads() > 1) | |
585 _cleanup_task_overhead = 1.0; | |
586 else | |
587 _cleanup_task_overhead = marking_task_overhead(); | |
588 _cleanup_sleep_factor = | |
589 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); | |
590 | |
591 #if 0 | |
592 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); | |
593 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); | |
594 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); | |
595 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); | |
596 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); | |
597 #endif | |
598 | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
599 guarantee(parallel_marking_threads() > 0, "peace of mind"); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
600 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
601 (int) _parallel_marking_threads, false, true); |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
602 if (_parallel_workers == NULL) { |
342 | 603 vm_exit_during_initialization("Failed necessary allocation."); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
604 } else { |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
605 _parallel_workers->initialize_workers(); |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
606 } |
342 | 607 } |
608 | |
609 // so that the call below can read a sensible value | |
610 _heap_start = (HeapWord*) rs.base(); | |
611 set_non_marking_state(); | |
612 } | |
613 | |
614 void ConcurrentMark::update_g1_committed(bool force) { | |
615 // If concurrent marking is not in progress, then we do not need to | |
616 // update _heap_end. This has a subtle and important | |
617 // side-effect. Imagine that two evacuation pauses happen between | |
618 // marking completion and remark. The first one can grow the | |
619 // heap (hence now the finger is below the heap end). Then, the | |
620 // second one could unnecessarily push regions on the region | |
621 // stack. This causes the invariant that the region stack is empty | |
622 // at the beginning of remark to be false. By ensuring that we do | |
623 // not observe heap expansions after marking is complete, then we do | |
624 // not have this problem. | |
625 if (!concurrent_marking_in_progress() && !force) | |
626 return; | |
627 | |
628 MemRegion committed = _g1h->g1_committed(); | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
629 assert(committed.start() == _heap_start, "start shouldn't change"); |
342 | 630 HeapWord* new_end = committed.end(); |
631 if (new_end > _heap_end) { | |
632 // The heap has been expanded. | |
633 | |
634 _heap_end = new_end; | |
635 } | |
636 // Notice that the heap can also shrink. However, this only happens | |
637 // during a Full GC (at least currently) and the entire marking | |
638 // phase will bail out and the task will not be restarted. So, let's | |
639 // do nothing. | |
640 } | |
641 | |
642 void ConcurrentMark::reset() { | |
643 // Starting values for these two. This should be called in a STW | |
644 // phase. CM will be notified of any future g1_committed expansions | |
645 // will be at the end of evacuation pauses, when tasks are | |
646 // inactive. | |
647 MemRegion committed = _g1h->g1_committed(); | |
648 _heap_start = committed.start(); | |
649 _heap_end = committed.end(); | |
650 | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
651 // Separated the asserts so that we know which one fires. |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
652 assert(_heap_start != NULL, "heap bounds should look ok"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
653 assert(_heap_end != NULL, "heap bounds should look ok"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
654 assert(_heap_start < _heap_end, "heap bounds should look ok"); |
342 | 655 |
656 // reset all the marking data structures and any necessary flags | |
657 clear_marking_state(); | |
658 | |
659 if (verbose_low()) | |
660 gclog_or_tty->print_cr("[global] resetting"); | |
661 | |
662 // We do reset all of them, since different phases will use | |
663 // different number of active threads. So, it's easiest to have all | |
664 // of them ready. | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
665 for (int i = 0; i < (int) _max_task_num; ++i) { |
342 | 666 _tasks[i]->reset(_nextMarkBitMap); |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
667 } |
342 | 668 |
669 // we need this to make sure that the flag is on during the evac | |
670 // pause with initial mark piggy-backed | |
671 set_concurrent_marking_in_progress(); | |
672 } | |
673 | |
674 void ConcurrentMark::set_phase(size_t active_tasks, bool concurrent) { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
675 assert(active_tasks <= _max_task_num, "we should not have more"); |
342 | 676 |
677 _active_tasks = active_tasks; | |
678 // Need to update the three data structures below according to the | |
679 // number of active threads for this phase. | |
680 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); | |
681 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); | |
682 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); | |
683 | |
684 _concurrent = concurrent; | |
685 // We propagate this to all tasks, not just the active ones. | |
686 for (int i = 0; i < (int) _max_task_num; ++i) | |
687 _tasks[i]->set_concurrent(concurrent); | |
688 | |
689 if (concurrent) { | |
690 set_concurrent_marking_in_progress(); | |
691 } else { | |
692 // We currently assume that the concurrent flag has been set to | |
693 // false before we start remark. At this point we should also be | |
694 // in a STW phase. | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
695 assert(!concurrent_marking_in_progress(), "invariant"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
696 assert(_finger == _heap_end, "only way to get here"); |
342 | 697 update_g1_committed(true); |
698 } | |
699 } | |
700 | |
701 void ConcurrentMark::set_non_marking_state() { | |
702 // We set the global marking state to some default values when we're | |
703 // not doing marking. | |
704 clear_marking_state(); | |
705 _active_tasks = 0; | |
706 clear_concurrent_marking_in_progress(); | |
707 } | |
708 | |
709 ConcurrentMark::~ConcurrentMark() { | |
710 for (int i = 0; i < (int) _max_task_num; ++i) { | |
711 delete _task_queues->queue(i); | |
712 delete _tasks[i]; | |
713 } | |
714 delete _task_queues; | |
715 FREE_C_HEAP_ARRAY(CMTask*, _max_task_num); | |
716 } | |
717 | |
718 // This closure is used to mark refs into the g1 generation | |
719 // from external roots in the CMS bit map. | |
720 // Called at the first checkpoint. | |
721 // | |
722 | |
723 void ConcurrentMark::clearNextBitmap() { | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
724 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
725 G1CollectorPolicy* g1p = g1h->g1_policy(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
726 |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
727 // Make sure that the concurrent mark thread looks to still be in |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
728 // the current cycle. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
729 guarantee(cmThread()->during_cycle(), "invariant"); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
730 |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
731 // We are finishing up the current cycle by clearing the next |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
732 // marking bitmap and getting it ready for the next cycle. During |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
733 // this time no other cycle can start. So, let's make sure that this |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
734 // is the case. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
735 guarantee(!g1h->mark_in_progress(), "invariant"); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
736 |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
737 // clear the mark bitmap (no grey objects to start with). |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
738 // We need to do this in chunks and offer to yield in between |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
739 // each chunk. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
740 HeapWord* start = _nextMarkBitMap->startWord(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
741 HeapWord* end = _nextMarkBitMap->endWord(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
742 HeapWord* cur = start; |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
743 size_t chunkSize = M; |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
744 while (cur < end) { |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
745 HeapWord* next = cur + chunkSize; |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
746 if (next > end) |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
747 next = end; |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
748 MemRegion mr(cur,next); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
749 _nextMarkBitMap->clearRange(mr); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
750 cur = next; |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
751 do_yield_check(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
752 |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
753 // Repeat the asserts from above. We'll do them as asserts here to |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
754 // minimize their overhead on the product. However, we'll have |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
755 // them as guarantees at the beginning / end of the bitmap |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
756 // clearing to get some checking in the product. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
757 assert(cmThread()->during_cycle(), "invariant"); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
758 assert(!g1h->mark_in_progress(), "invariant"); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
759 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
760 |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
761 // Repeat the asserts from above. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
762 guarantee(cmThread()->during_cycle(), "invariant"); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1358
diff
changeset
|
763 guarantee(!g1h->mark_in_progress(), "invariant"); |
342 | 764 } |
765 | |
766 class NoteStartOfMarkHRClosure: public HeapRegionClosure { | |
767 public: | |
768 bool doHeapRegion(HeapRegion* r) { | |
769 if (!r->continuesHumongous()) { | |
770 r->note_start_of_marking(true); | |
771 } | |
772 return false; | |
773 } | |
774 }; | |
775 | |
776 void ConcurrentMark::checkpointRootsInitialPre() { | |
777 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
778 G1CollectorPolicy* g1p = g1h->g1_policy(); | |
779 | |
780 _has_aborted = false; | |
781 | |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1388
diff
changeset
|
782 #ifndef PRODUCT |
1044 | 783 if (G1PrintReachableAtInitialMark) { |
1388 | 784 print_reachable("at-cycle-start", |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
785 VerifyOption_G1UsePrevMarking, true /* all */); |
1044 | 786 } |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1388
diff
changeset
|
787 #endif |
342 | 788 |
789 // Initialise marking structures. This has to be done in a STW phase. | |
790 reset(); | |
791 } | |
792 | |
793 class CMMarkRootsClosure: public OopsInGenClosure { | |
794 private: | |
795 ConcurrentMark* _cm; | |
796 G1CollectedHeap* _g1h; | |
797 bool _do_barrier; | |
798 | |
799 public: | |
800 CMMarkRootsClosure(ConcurrentMark* cm, | |
801 G1CollectedHeap* g1h, | |
802 bool do_barrier) : _cm(cm), _g1h(g1h), | |
803 _do_barrier(do_barrier) { } | |
804 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
805 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
806 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
807 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
808 template <class T> void do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
809 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
810 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
811 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
812 assert(obj->is_oop() || obj->mark() == NULL, |
342 | 813 "expected an oop, possibly with mark word displaced"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
814 HeapWord* addr = (HeapWord*)obj; |
342 | 815 if (_g1h->is_in_g1_reserved(addr)) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
816 _cm->grayRoot(obj); |
342 | 817 } |
818 } | |
819 if (_do_barrier) { | |
820 assert(!_g1h->is_in_g1_reserved(p), | |
821 "Should be called on external roots"); | |
822 do_barrier(p); | |
823 } | |
824 } | |
825 }; | |
826 | |
827 void ConcurrentMark::checkpointRootsInitialPost() { | |
828 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
829 | |
3316
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
830 // If we force an overflow during remark, the remark operation will |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
831 // actually abort and we'll restart concurrent marking. If we always |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
832 // force an oveflow during remark we'll never actually complete the |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
833 // marking phase. So, we initilize this here, at the start of the |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
834 // cycle, so that at the remaining overflow number will decrease at |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
835 // every remark and we'll eventually not need to cause one. |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
836 force_overflow_stw()->init(); |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
837 |
342 | 838 // For each region note start of marking. |
839 NoteStartOfMarkHRClosure startcl; | |
840 g1h->heap_region_iterate(&startcl); | |
841 | |
842 // Start weak-reference discovery. | |
843 ReferenceProcessor* rp = g1h->ref_processor(); | |
844 rp->verify_no_references_recorded(); | |
845 rp->enable_discovery(); // enable ("weak") refs discovery | |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
846 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle |
342 | 847 |
848 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); | |
1317
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1314
diff
changeset
|
849 // This is the start of the marking cycle, we're expected all |
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1314
diff
changeset
|
850 // threads to have SATB queues with active set to false. |
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1314
diff
changeset
|
851 satb_mq_set.set_active_all_threads(true, /* new active value */ |
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1314
diff
changeset
|
852 false /* expected_active */); |
342 | 853 |
854 // update_g1_committed() will be called at the end of an evac pause | |
855 // when marking is on. So, it's also called at the end of the | |
856 // initial-mark pause to update the heap end, if the heap expands | |
857 // during it. No need to call it here. | |
858 } | |
859 | |
860 // Checkpoint the roots into this generation from outside | |
861 // this generation. [Note this initial checkpoint need only | |
862 // be approximate -- we'll do a catch up phase subsequently.] | |
863 void ConcurrentMark::checkpointRootsInitial() { | |
864 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); | |
865 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
866 | |
867 double start = os::elapsedTime(); | |
868 | |
869 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); | |
870 g1p->record_concurrent_mark_init_start(); | |
871 checkpointRootsInitialPre(); | |
872 | |
873 // YSR: when concurrent precleaning is in place, we'll | |
874 // need to clear the cached card table here | |
875 | |
876 ResourceMark rm; | |
877 HandleMark hm; | |
878 | |
879 g1h->ensure_parsability(false); | |
880 g1h->perm_gen()->save_marks(); | |
881 | |
882 CMMarkRootsClosure notOlder(this, g1h, false); | |
883 CMMarkRootsClosure older(this, g1h, true); | |
884 | |
885 g1h->set_marking_started(); | |
886 g1h->rem_set()->prepare_for_younger_refs_iterate(false); | |
887 | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
912
diff
changeset
|
888 g1h->process_strong_roots(true, // activate StrongRootsScope |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
912
diff
changeset
|
889 false, // fake perm gen collection |
342 | 890 SharedHeap::SO_AllClasses, |
891 ¬Older, // Regular roots | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
912
diff
changeset
|
892 NULL, // do not visit active blobs |
342 | 893 &older // Perm Gen Roots |
894 ); | |
895 checkpointRootsInitialPost(); | |
896 | |
897 // Statistics. | |
898 double end = os::elapsedTime(); | |
899 _init_times.add((end - start) * 1000.0); | |
900 | |
901 g1p->record_concurrent_mark_init_end(); | |
902 } | |
903 | |
904 /* | |
3316
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
905 * Notice that in the next two methods, we actually leave the STS |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
906 * during the barrier sync and join it immediately afterwards. If we |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
907 * do not do this, the following deadlock can occur: one thread could |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
908 * be in the barrier sync code, waiting for the other thread to also |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
909 * sync up, whereas another one could be trying to yield, while also |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
910 * waiting for the other threads to sync up too. |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
911 * |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
912 * Note, however, that this code is also used during remark and in |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
913 * this case we should not attempt to leave / enter the STS, otherwise |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
914 * we'll either hit an asseert (debug / fastdebug) or deadlock |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
915 * (product). So we should only leave / enter the STS if we are |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
916 * operating concurrently. |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
917 * |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
918 * Because the thread that does the sync barrier has left the STS, it |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
919 * is possible to be suspended for a Full GC or an evacuation pause |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
920 * could occur. This is actually safe, since the entering the sync |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
921 * barrier is one of the last things do_marking_step() does, and it |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
922 * doesn't manipulate any data structures afterwards. |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
923 */ |
342 | 924 |
925 void ConcurrentMark::enter_first_sync_barrier(int task_num) { | |
926 if (verbose_low()) | |
927 gclog_or_tty->print_cr("[%d] entering first barrier", task_num); | |
928 | |
3316
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
929 if (concurrent()) { |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
930 ConcurrentGCThread::stsLeave(); |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
931 } |
342 | 932 _first_overflow_barrier_sync.enter(); |
3316
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
933 if (concurrent()) { |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
934 ConcurrentGCThread::stsJoin(); |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
935 } |
342 | 936 // at this point everyone should have synced up and not be doing any |
937 // more work | |
938 | |
939 if (verbose_low()) | |
940 gclog_or_tty->print_cr("[%d] leaving first barrier", task_num); | |
941 | |
942 // let task 0 do this | |
943 if (task_num == 0) { | |
944 // task 0 is responsible for clearing the global data structures | |
3316
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
945 // We should be here because of an overflow. During STW we should |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
946 // not clear the overflow flag since we rely on it being true when |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
947 // we exit this method to abort the pause and restart concurent |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
948 // marking. |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
949 clear_marking_state(concurrent() /* clear_overflow */); |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
950 force_overflow()->update(); |
342 | 951 |
952 if (PrintGC) { | |
953 gclog_or_tty->date_stamp(PrintGCDateStamps); | |
954 gclog_or_tty->stamp(PrintGCTimeStamps); | |
955 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); | |
956 } | |
957 } | |
958 | |
959 // after this, each task should reset its own data structures then | |
960 // then go into the second barrier | |
961 } | |
962 | |
963 void ConcurrentMark::enter_second_sync_barrier(int task_num) { | |
964 if (verbose_low()) | |
965 gclog_or_tty->print_cr("[%d] entering second barrier", task_num); | |
966 | |
3316
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
967 if (concurrent()) { |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
968 ConcurrentGCThread::stsLeave(); |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
969 } |
342 | 970 _second_overflow_barrier_sync.enter(); |
3316
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
971 if (concurrent()) { |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
972 ConcurrentGCThread::stsJoin(); |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
973 } |
342 | 974 // at this point everything should be re-initialised and ready to go |
975 | |
976 if (verbose_low()) | |
977 gclog_or_tty->print_cr("[%d] leaving second barrier", task_num); | |
978 } | |
979 | |
3316
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
980 #ifndef PRODUCT |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
981 void ForceOverflowSettings::init() { |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
982 _num_remaining = G1ConcMarkForceOverflow; |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
983 _force = false; |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
984 update(); |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
985 } |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
986 |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
987 void ForceOverflowSettings::update() { |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
988 if (_num_remaining > 0) { |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
989 _num_remaining -= 1; |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
990 _force = true; |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
991 } else { |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
992 _force = false; |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
993 } |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
994 } |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
995 |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
996 bool ForceOverflowSettings::should_force() { |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
997 if (_force) { |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
998 _force = false; |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
999 return true; |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
1000 } else { |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
1001 return false; |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
1002 } |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
1003 } |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
1004 #endif // !PRODUCT |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
1005 |
342 | 1006 void ConcurrentMark::grayRoot(oop p) { |
1007 HeapWord* addr = (HeapWord*) p; | |
1008 // We can't really check against _heap_start and _heap_end, since it | |
1009 // is possible during an evacuation pause with piggy-backed | |
1010 // initial-mark that the committed space is expanded during the | |
1011 // pause without CM observing this change. So the assertions below | |
1012 // is a bit conservative; but better than nothing. | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1013 assert(_g1h->g1_committed().contains(addr), |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1014 "address should be within the heap bounds"); |
342 | 1015 |
1016 if (!_nextMarkBitMap->isMarked(addr)) | |
1017 _nextMarkBitMap->parMark(addr); | |
1018 } | |
1019 | |
1020 void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) { | |
1021 // The objects on the region have already been marked "in bulk" by | |
1022 // the caller. We only need to decide whether to push the region on | |
1023 // the region stack or not. | |
1024 | |
1025 if (!concurrent_marking_in_progress() || !_should_gray_objects) | |
1026 // We're done with marking and waiting for remark. We do not need to | |
1027 // push anything else on the region stack. | |
1028 return; | |
1029 | |
1030 HeapWord* finger = _finger; | |
1031 | |
1032 if (verbose_low()) | |
1033 gclog_or_tty->print_cr("[global] attempting to push " | |
1034 "region ["PTR_FORMAT", "PTR_FORMAT"), finger is at " | |
1035 PTR_FORMAT, mr.start(), mr.end(), finger); | |
1036 | |
1037 if (mr.start() < finger) { | |
1038 // The finger is always heap region aligned and it is not possible | |
1039 // for mr to span heap regions. | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1040 assert(mr.end() <= finger, "invariant"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1041 |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1042 // Separated the asserts so that we know which one fires. |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1043 assert(mr.start() <= mr.end(), |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1044 "region boundaries should fall within the committed space"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1045 assert(_heap_start <= mr.start(), |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1046 "region boundaries should fall within the committed space"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1047 assert(mr.end() <= _heap_end, |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1048 "region boundaries should fall within the committed space"); |
342 | 1049 if (verbose_low()) |
1050 gclog_or_tty->print_cr("[global] region ["PTR_FORMAT", "PTR_FORMAT") " | |
1051 "below the finger, pushing it", | |
1052 mr.start(), mr.end()); | |
1053 | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
1054 if (!region_stack_push_lock_free(mr)) { |
342 | 1055 if (verbose_low()) |
1056 gclog_or_tty->print_cr("[global] region stack has overflown."); | |
1057 } | |
1058 } | |
1059 } | |
1060 | |
1061 void ConcurrentMark::markAndGrayObjectIfNecessary(oop p) { | |
1062 // The object is not marked by the caller. We need to at least mark | |
1063 // it and maybe push in on the stack. | |
1064 | |
1065 HeapWord* addr = (HeapWord*)p; | |
1066 if (!_nextMarkBitMap->isMarked(addr)) { | |
1067 // We definitely need to mark it, irrespective whether we bail out | |
1068 // because we're done with marking. | |
1069 if (_nextMarkBitMap->parMark(addr)) { | |
1070 if (!concurrent_marking_in_progress() || !_should_gray_objects) | |
1071 // If we're done with concurrent marking and we're waiting for | |
1072 // remark, then we're not pushing anything on the stack. | |
1073 return; | |
1074 | |
1075 // No OrderAccess:store_load() is needed. It is implicit in the | |
1076 // CAS done in parMark(addr) above | |
1077 HeapWord* finger = _finger; | |
1078 | |
1079 if (addr < finger) { | |
1080 if (!mark_stack_push(oop(addr))) { | |
1081 if (verbose_low()) | |
1082 gclog_or_tty->print_cr("[global] global stack overflow " | |
1083 "during parMark"); | |
1084 } | |
1085 } | |
1086 } | |
1087 } | |
1088 } | |
1089 | |
1090 class CMConcurrentMarkingTask: public AbstractGangTask { | |
1091 private: | |
1092 ConcurrentMark* _cm; | |
1093 ConcurrentMarkThread* _cmt; | |
1094 | |
1095 public: | |
1096 void work(int worker_i) { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1097 assert(Thread::current()->is_ConcurrentGC_thread(), |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1098 "this should only be done by a conc GC thread"); |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1972
diff
changeset
|
1099 ResourceMark rm; |
342 | 1100 |
1101 double start_vtime = os::elapsedVTime(); | |
1102 | |
1103 ConcurrentGCThread::stsJoin(); | |
1104 | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1105 assert((size_t) worker_i < _cm->active_tasks(), "invariant"); |
342 | 1106 CMTask* the_task = _cm->task(worker_i); |
1107 the_task->record_start_time(); | |
1108 if (!_cm->has_aborted()) { | |
1109 do { | |
1110 double start_vtime_sec = os::elapsedVTime(); | |
1111 double start_time_sec = os::elapsedTime(); | |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1112 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1113 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1114 the_task->do_marking_step(mark_step_duration_ms, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1115 true /* do_stealing */, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1116 true /* do_termination */); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1117 |
342 | 1118 double end_time_sec = os::elapsedTime(); |
1119 double end_vtime_sec = os::elapsedVTime(); | |
1120 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; | |
1121 double elapsed_time_sec = end_time_sec - start_time_sec; | |
1122 _cm->clear_has_overflown(); | |
1123 | |
1124 bool ret = _cm->do_yield_check(worker_i); | |
1125 | |
1126 jlong sleep_time_ms; | |
1127 if (!_cm->has_aborted() && the_task->has_aborted()) { | |
1128 sleep_time_ms = | |
1129 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); | |
1130 ConcurrentGCThread::stsLeave(); | |
1131 os::sleep(Thread::current(), sleep_time_ms, false); | |
1132 ConcurrentGCThread::stsJoin(); | |
1133 } | |
1134 double end_time2_sec = os::elapsedTime(); | |
1135 double elapsed_time2_sec = end_time2_sec - start_time_sec; | |
1136 | |
1137 #if 0 | |
1138 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, " | |
1139 "overhead %1.4lf", | |
1140 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms, | |
1141 the_task->conc_overhead(os::elapsedTime()) * 8.0); | |
1142 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms", | |
1143 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0); | |
1144 #endif | |
1145 } while (!_cm->has_aborted() && the_task->has_aborted()); | |
1146 } | |
1147 the_task->record_end_time(); | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1148 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); |
342 | 1149 |
1150 ConcurrentGCThread::stsLeave(); | |
1151 | |
1152 double end_vtime = os::elapsedVTime(); | |
1153 _cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime); | |
1154 } | |
1155 | |
1156 CMConcurrentMarkingTask(ConcurrentMark* cm, | |
1157 ConcurrentMarkThread* cmt) : | |
1158 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } | |
1159 | |
1160 ~CMConcurrentMarkingTask() { } | |
1161 }; | |
1162 | |
1163 void ConcurrentMark::markFromRoots() { | |
1164 // we might be tempted to assert that: | |
1165 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), | |
1166 // "inconsistent argument?"); | |
1167 // However that wouldn't be right, because it's possible that | |
1168 // a safepoint is indeed in progress as a younger generation | |
1169 // stop-the-world GC happens even as we mark in this generation. | |
1170 | |
1171 _restart_for_overflow = false; | |
1172 | |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1173 size_t active_workers = MAX2((size_t) 1, parallel_marking_threads()); |
3316
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
1174 force_overflow_conc()->init(); |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1175 set_phase(active_workers, true /* concurrent */); |
342 | 1176 |
1177 CMConcurrentMarkingTask markingTask(this, cmThread()); | |
1178 if (parallel_marking_threads() > 0) | |
1179 _parallel_workers->run_task(&markingTask); | |
1180 else | |
1181 markingTask.work(0); | |
1182 print_stats(); | |
1183 } | |
1184 | |
1185 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { | |
1186 // world is stopped at this checkpoint | |
1187 assert(SafepointSynchronize::is_at_safepoint(), | |
1188 "world should be stopped"); | |
1189 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
1190 | |
1191 // If a full collection has happened, we shouldn't do this. | |
1192 if (has_aborted()) { | |
1193 g1h->set_marking_complete(); // So bitmap clearing isn't confused | |
1194 return; | |
1195 } | |
1196 | |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
2037
diff
changeset
|
1197 SvcGCMarker sgcm(SvcGCMarker::OTHER); |
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
2037
diff
changeset
|
1198 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1199 if (VerifyDuringGC) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1200 HandleMark hm; // handle scope |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1201 gclog_or_tty->print(" VerifyDuringGC:(before)"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1202 Universe::heap()->prepare_for_verify(); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
1203 Universe::verify(/* allow dirty */ true, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
1204 /* silent */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
1205 /* option */ VerifyOption_G1UsePrevMarking); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1206 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1207 |
342 | 1208 G1CollectorPolicy* g1p = g1h->g1_policy(); |
1209 g1p->record_concurrent_mark_remark_start(); | |
1210 | |
1211 double start = os::elapsedTime(); | |
1212 | |
1213 checkpointRootsFinalWork(); | |
1214 | |
1215 double mark_work_end = os::elapsedTime(); | |
1216 | |
1217 weakRefsWork(clear_all_soft_refs); | |
1218 | |
1219 if (has_overflown()) { | |
1220 // Oops. We overflowed. Restart concurrent marking. | |
1221 _restart_for_overflow = true; | |
1222 // Clear the flag. We do not need it any more. | |
1223 clear_has_overflown(); | |
1224 if (G1TraceMarkStackOverflow) | |
1225 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); | |
1226 } else { | |
2149 | 1227 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
342 | 1228 // We're done with marking. |
1317
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1314
diff
changeset
|
1229 // This is the end of the marking cycle, we're expected all |
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1314
diff
changeset
|
1230 // threads to have SATB queues with active set to true. |
2149 | 1231 satb_mq_set.set_active_all_threads(false, /* new active value */ |
1232 true /* expected_active */); | |
811 | 1233 |
1234 if (VerifyDuringGC) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1235 HandleMark hm; // handle scope |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1236 gclog_or_tty->print(" VerifyDuringGC:(after)"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1237 Universe::heap()->prepare_for_verify(); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
1238 Universe::verify(/* allow dirty */ true, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
1239 /* silent */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
1240 /* option */ VerifyOption_G1UseNextMarking); |
811 | 1241 } |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1242 assert(!restart_for_overflow(), "sanity"); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1243 } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1244 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1245 // Reset the marking state if marking completed |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1246 if (!restart_for_overflow()) { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1247 set_non_marking_state(); |
342 | 1248 } |
1249 | |
1250 #if VERIFY_OBJS_PROCESSED | |
1251 _scan_obj_cl.objs_processed = 0; | |
1252 ThreadLocalObjQueue::objs_enqueued = 0; | |
1253 #endif | |
1254 | |
1255 // Statistics | |
1256 double now = os::elapsedTime(); | |
1257 _remark_mark_times.add((mark_work_end - start) * 1000.0); | |
1258 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); | |
1259 _remark_times.add((now - start) * 1000.0); | |
1260 | |
1261 g1p->record_concurrent_mark_remark_end(); | |
1262 } | |
1263 | |
1264 #define CARD_BM_TEST_MODE 0 | |
1265 | |
1266 class CalcLiveObjectsClosure: public HeapRegionClosure { | |
1267 | |
1268 CMBitMapRO* _bm; | |
1269 ConcurrentMark* _cm; | |
1270 bool _changed; | |
1271 bool _yield; | |
1272 size_t _words_done; | |
1273 size_t _tot_live; | |
1274 size_t _tot_used; | |
1275 size_t _regions_done; | |
1276 double _start_vtime_sec; | |
1277 | |
1278 BitMap* _region_bm; | |
1279 BitMap* _card_bm; | |
1280 intptr_t _bottom_card_num; | |
1281 bool _final; | |
1282 | |
1283 void mark_card_num_range(intptr_t start_card_num, intptr_t last_card_num) { | |
1284 for (intptr_t i = start_card_num; i <= last_card_num; i++) { | |
1285 #if CARD_BM_TEST_MODE | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1286 guarantee(_card_bm->at(i - _bottom_card_num), "Should already be set."); |
342 | 1287 #else |
1288 _card_bm->par_at_put(i - _bottom_card_num, 1); | |
1289 #endif | |
1290 } | |
1291 } | |
1292 | |
1293 public: | |
1294 CalcLiveObjectsClosure(bool final, | |
1295 CMBitMapRO *bm, ConcurrentMark *cm, | |
936 | 1296 BitMap* region_bm, BitMap* card_bm) : |
342 | 1297 _bm(bm), _cm(cm), _changed(false), _yield(true), |
1298 _words_done(0), _tot_live(0), _tot_used(0), | |
936 | 1299 _region_bm(region_bm), _card_bm(card_bm),_final(final), |
342 | 1300 _regions_done(0), _start_vtime_sec(0.0) |
1301 { | |
1302 _bottom_card_num = | |
1303 intptr_t(uintptr_t(G1CollectedHeap::heap()->reserved_region().start()) >> | |
1304 CardTableModRefBS::card_shift); | |
1305 } | |
1306 | |
829 | 1307 // It takes a region that's not empty (i.e., it has at least one |
1308 // live object in it and sets its corresponding bit on the region | |
1309 // bitmap to 1. If the region is "starts humongous" it will also set | |
1310 // to 1 the bits on the region bitmap that correspond to its | |
1311 // associated "continues humongous" regions. | |
1312 void set_bit_for_region(HeapRegion* hr) { | |
1313 assert(!hr->continuesHumongous(), "should have filtered those out"); | |
1314 | |
1315 size_t index = hr->hrs_index(); | |
1316 if (!hr->startsHumongous()) { | |
1317 // Normal (non-humongous) case: just set the bit. | |
1318 _region_bm->par_at_put((BitMap::idx_t) index, true); | |
1319 } else { | |
1320 // Starts humongous case: calculate how many regions are part of | |
1321 // this humongous region and then set the bit range. It might | |
1322 // have been a bit more efficient to look at the object that | |
1323 // spans these humongous regions to calculate their number from | |
1324 // the object's size. However, it's a good idea to calculate | |
1325 // this based on the metadata itself, and not the region | |
1326 // contents, so that this code is not aware of what goes into | |
1327 // the humongous regions (in case this changes in the future). | |
1328 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
1329 size_t end_index = index + 1; | |
831 | 1330 while (end_index < g1h->n_regions()) { |
1331 HeapRegion* chr = g1h->region_at(end_index); | |
829 | 1332 if (!chr->continuesHumongous()) { |
1333 break; | |
1334 } | |
1335 end_index += 1; | |
1336 } | |
1337 _region_bm->par_at_put_range((BitMap::idx_t) index, | |
1338 (BitMap::idx_t) end_index, true); | |
1339 } | |
1340 } | |
1341 | |
342 | 1342 bool doHeapRegion(HeapRegion* hr) { |
1343 if (!_final && _regions_done == 0) | |
1344 _start_vtime_sec = os::elapsedVTime(); | |
1345 | |
639 | 1346 if (hr->continuesHumongous()) { |
829 | 1347 // We will ignore these here and process them when their |
1348 // associated "starts humongous" region is processed (see | |
1349 // set_bit_for_heap_region()). Note that we cannot rely on their | |
1350 // associated "starts humongous" region to have their bit set to | |
1351 // 1 since, due to the region chunking in the parallel region | |
1352 // iteration, a "continues humongous" region might be visited | |
1353 // before its associated "starts humongous". | |
639 | 1354 return false; |
1355 } | |
342 | 1356 |
1357 HeapWord* nextTop = hr->next_top_at_mark_start(); | |
1358 HeapWord* start = hr->top_at_conc_mark_count(); | |
1359 assert(hr->bottom() <= start && start <= hr->end() && | |
1360 hr->bottom() <= nextTop && nextTop <= hr->end() && | |
1361 start <= nextTop, | |
1362 "Preconditions."); | |
1363 // Otherwise, record the number of word's we'll examine. | |
1364 size_t words_done = (nextTop - start); | |
1365 // Find the first marked object at or after "start". | |
1366 start = _bm->getNextMarkedWordAddress(start, nextTop); | |
1367 size_t marked_bytes = 0; | |
1368 | |
1369 // Below, the term "card num" means the result of shifting an address | |
1370 // by the card shift -- address 0 corresponds to card number 0. One | |
1371 // must subtract the card num of the bottom of the heap to obtain a | |
1372 // card table index. | |
1373 // The first card num of the sequence of live cards currently being | |
1374 // constructed. -1 ==> no sequence. | |
1375 intptr_t start_card_num = -1; | |
1376 // The last card num of the sequence of live cards currently being | |
1377 // constructed. -1 ==> no sequence. | |
1378 intptr_t last_card_num = -1; | |
1379 | |
1380 while (start < nextTop) { | |
1381 if (_yield && _cm->do_yield_check()) { | |
1382 // We yielded. It might be for a full collection, in which case | |
1383 // all bets are off; terminate the traversal. | |
1384 if (_cm->has_aborted()) { | |
1385 _changed = false; | |
1386 return true; | |
1387 } else { | |
1388 // Otherwise, it might be a collection pause, and the region | |
1389 // we're looking at might be in the collection set. We'll | |
1390 // abandon this region. | |
1391 return false; | |
1392 } | |
1393 } | |
1394 oop obj = oop(start); | |
1395 int obj_sz = obj->size(); | |
1396 // The card num of the start of the current object. | |
1397 intptr_t obj_card_num = | |
1398 intptr_t(uintptr_t(start) >> CardTableModRefBS::card_shift); | |
1399 | |
1400 HeapWord* obj_last = start + obj_sz - 1; | |
1401 intptr_t obj_last_card_num = | |
1402 intptr_t(uintptr_t(obj_last) >> CardTableModRefBS::card_shift); | |
1403 | |
1404 if (obj_card_num != last_card_num) { | |
1405 if (start_card_num == -1) { | |
1406 assert(last_card_num == -1, "Both or neither."); | |
1407 start_card_num = obj_card_num; | |
1408 } else { | |
1409 assert(last_card_num != -1, "Both or neither."); | |
1410 assert(obj_card_num >= last_card_num, "Inv"); | |
1411 if ((obj_card_num - last_card_num) > 1) { | |
1412 // Mark the last run, and start a new one. | |
1413 mark_card_num_range(start_card_num, last_card_num); | |
1414 start_card_num = obj_card_num; | |
1415 } | |
1416 } | |
1417 #if CARD_BM_TEST_MODE | |
1418 /* | |
1419 gclog_or_tty->print_cr("Setting bits from %d/%d.", | |
1420 obj_card_num - _bottom_card_num, | |
1421 obj_last_card_num - _bottom_card_num); | |
1422 */ | |
1423 for (intptr_t j = obj_card_num; j <= obj_last_card_num; j++) { | |
1424 _card_bm->par_at_put(j - _bottom_card_num, 1); | |
1425 } | |
1426 #endif | |
1427 } | |
1428 // In any case, we set the last card num. | |
1429 last_card_num = obj_last_card_num; | |
1430 | |
1030
dfdaf65c3423
6858886: G1: guarantee(_next_marked_bytes <= used(),"invariant") at heapRegion.hpp:359
apetrusenko
parents:
1023
diff
changeset
|
1431 marked_bytes += (size_t)obj_sz * HeapWordSize; |
342 | 1432 // Find the next marked object after this one. |
1433 start = _bm->getNextMarkedWordAddress(start + 1, nextTop); | |
1434 _changed = true; | |
1435 } | |
1436 // Handle the last range, if any. | |
1437 if (start_card_num != -1) | |
1438 mark_card_num_range(start_card_num, last_card_num); | |
1439 if (_final) { | |
1440 // Mark the allocated-since-marking portion... | |
1441 HeapWord* tp = hr->top(); | |
1442 if (nextTop < tp) { | |
1443 start_card_num = | |
1444 intptr_t(uintptr_t(nextTop) >> CardTableModRefBS::card_shift); | |
1445 last_card_num = | |
1446 intptr_t(uintptr_t(tp) >> CardTableModRefBS::card_shift); | |
1447 mark_card_num_range(start_card_num, last_card_num); | |
1448 // This definitely means the region has live objects. | |
829 | 1449 set_bit_for_region(hr); |
342 | 1450 } |
1451 } | |
1452 | |
1453 hr->add_to_marked_bytes(marked_bytes); | |
1454 // Update the live region bitmap. | |
1455 if (marked_bytes > 0) { | |
829 | 1456 set_bit_for_region(hr); |
342 | 1457 } |
1458 hr->set_top_at_conc_mark_count(nextTop); | |
1459 _tot_live += hr->next_live_bytes(); | |
1460 _tot_used += hr->used(); | |
1461 _words_done = words_done; | |
1462 | |
1463 if (!_final) { | |
1464 ++_regions_done; | |
1465 if (_regions_done % 10 == 0) { | |
1466 double end_vtime_sec = os::elapsedVTime(); | |
1467 double elapsed_vtime_sec = end_vtime_sec - _start_vtime_sec; | |
1468 if (elapsed_vtime_sec > (10.0 / 1000.0)) { | |
1469 jlong sleep_time_ms = | |
1470 (jlong) (elapsed_vtime_sec * _cm->cleanup_sleep_factor() * 1000.0); | |
1471 os::sleep(Thread::current(), sleep_time_ms, false); | |
1472 _start_vtime_sec = end_vtime_sec; | |
1473 } | |
1474 } | |
1475 } | |
1476 | |
1477 return false; | |
1478 } | |
1479 | |
1480 bool changed() { return _changed; } | |
1481 void reset() { _changed = false; _words_done = 0; } | |
1482 void no_yield() { _yield = false; } | |
1483 size_t words_done() { return _words_done; } | |
1484 size_t tot_live() { return _tot_live; } | |
1485 size_t tot_used() { return _tot_used; } | |
1486 }; | |
1487 | |
1488 | |
1489 void ConcurrentMark::calcDesiredRegions() { | |
1490 _region_bm.clear(); | |
1491 _card_bm.clear(); | |
1492 CalcLiveObjectsClosure calccl(false /*final*/, | |
1493 nextMarkBitMap(), this, | |
936 | 1494 &_region_bm, &_card_bm); |
342 | 1495 G1CollectedHeap *g1h = G1CollectedHeap::heap(); |
1496 g1h->heap_region_iterate(&calccl); | |
1497 | |
1498 do { | |
1499 calccl.reset(); | |
1500 g1h->heap_region_iterate(&calccl); | |
1501 } while (calccl.changed()); | |
1502 } | |
1503 | |
1504 class G1ParFinalCountTask: public AbstractGangTask { | |
1505 protected: | |
1506 G1CollectedHeap* _g1h; | |
1507 CMBitMap* _bm; | |
1508 size_t _n_workers; | |
1509 size_t *_live_bytes; | |
1510 size_t *_used_bytes; | |
1511 BitMap* _region_bm; | |
1512 BitMap* _card_bm; | |
1513 public: | |
1514 G1ParFinalCountTask(G1CollectedHeap* g1h, CMBitMap* bm, | |
1515 BitMap* region_bm, BitMap* card_bm) : | |
1516 AbstractGangTask("G1 final counting"), _g1h(g1h), | |
1517 _bm(bm), _region_bm(region_bm), _card_bm(card_bm) | |
1518 { | |
1519 if (ParallelGCThreads > 0) | |
1520 _n_workers = _g1h->workers()->total_workers(); | |
1521 else | |
1522 _n_workers = 1; | |
1523 _live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); | |
1524 _used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); | |
1525 } | |
1526 | |
1527 ~G1ParFinalCountTask() { | |
1528 FREE_C_HEAP_ARRAY(size_t, _live_bytes); | |
1529 FREE_C_HEAP_ARRAY(size_t, _used_bytes); | |
1530 } | |
1531 | |
1532 void work(int i) { | |
1533 CalcLiveObjectsClosure calccl(true /*final*/, | |
1534 _bm, _g1h->concurrent_mark(), | |
936 | 1535 _region_bm, _card_bm); |
342 | 1536 calccl.no_yield(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
1537 if (G1CollectedHeap::use_parallel_gc_threads()) { |
355 | 1538 _g1h->heap_region_par_iterate_chunked(&calccl, i, |
1539 HeapRegion::FinalCountClaimValue); | |
342 | 1540 } else { |
1541 _g1h->heap_region_iterate(&calccl); | |
1542 } | |
1543 assert(calccl.complete(), "Shouldn't have yielded!"); | |
1544 | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
1545 assert((size_t) i < _n_workers, "invariant"); |
342 | 1546 _live_bytes[i] = calccl.tot_live(); |
1547 _used_bytes[i] = calccl.tot_used(); | |
1548 } | |
1549 size_t live_bytes() { | |
1550 size_t live_bytes = 0; | |
1551 for (size_t i = 0; i < _n_workers; ++i) | |
1552 live_bytes += _live_bytes[i]; | |
1553 return live_bytes; | |
1554 } | |
1555 size_t used_bytes() { | |
1556 size_t used_bytes = 0; | |
1557 for (size_t i = 0; i < _n_workers; ++i) | |
1558 used_bytes += _used_bytes[i]; | |
1559 return used_bytes; | |
1560 } | |
1561 }; | |
1562 | |
1563 class G1ParNoteEndTask; | |
1564 | |
1565 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { | |
1566 G1CollectedHeap* _g1; | |
1567 int _worker_num; | |
1568 size_t _max_live_bytes; | |
1569 size_t _regions_claimed; | |
1570 size_t _freed_bytes; | |
2173 | 1571 FreeRegionList* _local_cleanup_list; |
1572 HumongousRegionSet* _humongous_proxy_set; | |
1573 HRRSCleanupTask* _hrrs_cleanup_task; | |
342 | 1574 double _claimed_region_time; |
1575 double _max_region_time; | |
1576 | |
1577 public: | |
1578 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, | |
2173 | 1579 int worker_num, |
1580 FreeRegionList* local_cleanup_list, | |
1581 HumongousRegionSet* humongous_proxy_set, | |
1582 HRRSCleanupTask* hrrs_cleanup_task); | |
342 | 1583 size_t freed_bytes() { return _freed_bytes; } |
1584 | |
1585 bool doHeapRegion(HeapRegion *r); | |
1586 | |
1587 size_t max_live_bytes() { return _max_live_bytes; } | |
1588 size_t regions_claimed() { return _regions_claimed; } | |
1589 double claimed_region_time_sec() { return _claimed_region_time; } | |
1590 double max_region_time_sec() { return _max_region_time; } | |
1591 }; | |
1592 | |
1593 class G1ParNoteEndTask: public AbstractGangTask { | |
1594 friend class G1NoteEndOfConcMarkClosure; | |
2152 | 1595 |
342 | 1596 protected: |
1597 G1CollectedHeap* _g1h; | |
1598 size_t _max_live_bytes; | |
1599 size_t _freed_bytes; | |
2152 | 1600 FreeRegionList* _cleanup_list; |
1601 | |
342 | 1602 public: |
1603 G1ParNoteEndTask(G1CollectedHeap* g1h, | |
2152 | 1604 FreeRegionList* cleanup_list) : |
342 | 1605 AbstractGangTask("G1 note end"), _g1h(g1h), |
2152 | 1606 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { } |
342 | 1607 |
1608 void work(int i) { | |
1609 double start = os::elapsedTime(); | |
2173 | 1610 FreeRegionList local_cleanup_list("Local Cleanup List"); |
1611 HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set"); | |
1612 HRRSCleanupTask hrrs_cleanup_task; | |
1613 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i, &local_cleanup_list, | |
1614 &humongous_proxy_set, | |
1615 &hrrs_cleanup_task); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
1616 if (G1CollectedHeap::use_parallel_gc_threads()) { |
355 | 1617 _g1h->heap_region_par_iterate_chunked(&g1_note_end, i, |
1618 HeapRegion::NoteEndClaimValue); | |
342 | 1619 } else { |
1620 _g1h->heap_region_iterate(&g1_note_end); | |
1621 } | |
1622 assert(g1_note_end.complete(), "Shouldn't have yielded!"); | |
1623 | |
2152 | 1624 // Now update the lists |
1625 _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(), | |
1626 NULL /* free_list */, | |
2173 | 1627 &humongous_proxy_set, |
2152 | 1628 true /* par */); |
342 | 1629 { |
1630 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
1631 _max_live_bytes += g1_note_end.max_live_bytes(); | |
1632 _freed_bytes += g1_note_end.freed_bytes(); | |
2152 | 1633 |
2173 | 1634 _cleanup_list->add_as_tail(&local_cleanup_list); |
1635 assert(local_cleanup_list.is_empty(), "post-condition"); | |
1636 | |
1637 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); | |
342 | 1638 } |
1639 double end = os::elapsedTime(); | |
1640 if (G1PrintParCleanupStats) { | |
1641 gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] " | |
1642 "claimed %d regions (tot = %8.3f ms, max = %8.3f ms).\n", | |
1643 i, start, end, (end-start)*1000.0, | |
1644 g1_note_end.regions_claimed(), | |
1645 g1_note_end.claimed_region_time_sec()*1000.0, | |
1646 g1_note_end.max_region_time_sec()*1000.0); | |
1647 } | |
1648 } | |
1649 size_t max_live_bytes() { return _max_live_bytes; } | |
1650 size_t freed_bytes() { return _freed_bytes; } | |
1651 }; | |
1652 | |
1653 class G1ParScrubRemSetTask: public AbstractGangTask { | |
1654 protected: | |
1655 G1RemSet* _g1rs; | |
1656 BitMap* _region_bm; | |
1657 BitMap* _card_bm; | |
1658 public: | |
1659 G1ParScrubRemSetTask(G1CollectedHeap* g1h, | |
1660 BitMap* region_bm, BitMap* card_bm) : | |
1661 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), | |
1662 _region_bm(region_bm), _card_bm(card_bm) | |
1663 {} | |
1664 | |
1665 void work(int i) { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
1666 if (G1CollectedHeap::use_parallel_gc_threads()) { |
355 | 1667 _g1rs->scrub_par(_region_bm, _card_bm, i, |
1668 HeapRegion::ScrubRemSetClaimValue); | |
342 | 1669 } else { |
1670 _g1rs->scrub(_region_bm, _card_bm); | |
1671 } | |
1672 } | |
1673 | |
1674 }; | |
1675 | |
1676 G1NoteEndOfConcMarkClosure:: | |
1677 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, | |
2173 | 1678 int worker_num, |
1679 FreeRegionList* local_cleanup_list, | |
1680 HumongousRegionSet* humongous_proxy_set, | |
1681 HRRSCleanupTask* hrrs_cleanup_task) | |
342 | 1682 : _g1(g1), _worker_num(worker_num), |
1683 _max_live_bytes(0), _regions_claimed(0), | |
2152 | 1684 _freed_bytes(0), |
342 | 1685 _claimed_region_time(0.0), _max_region_time(0.0), |
2173 | 1686 _local_cleanup_list(local_cleanup_list), |
1687 _humongous_proxy_set(humongous_proxy_set), | |
1688 _hrrs_cleanup_task(hrrs_cleanup_task) { } | |
2152 | 1689 |
1690 bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *hr) { | |
342 | 1691 // We use a claim value of zero here because all regions |
1692 // were claimed with value 1 in the FinalCount task. | |
2152 | 1693 hr->reset_gc_time_stamp(); |
1694 if (!hr->continuesHumongous()) { | |
342 | 1695 double start = os::elapsedTime(); |
1696 _regions_claimed++; | |
2152 | 1697 hr->note_end_of_marking(); |
1698 _max_live_bytes += hr->max_live_bytes(); | |
2173 | 1699 _g1->free_region_if_empty(hr, |
1700 &_freed_bytes, | |
1701 _local_cleanup_list, | |
1702 _humongous_proxy_set, | |
1703 _hrrs_cleanup_task, | |
1704 true /* par */); | |
342 | 1705 double region_time = (os::elapsedTime() - start); |
1706 _claimed_region_time += region_time; | |
1707 if (region_time > _max_region_time) _max_region_time = region_time; | |
1708 } | |
1709 return false; | |
1710 } | |
1711 | |
1712 void ConcurrentMark::cleanup() { | |
1713 // world is stopped at this checkpoint | |
1714 assert(SafepointSynchronize::is_at_safepoint(), | |
1715 "world should be stopped"); | |
1716 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
1717 | |
1718 // If a full collection has happened, we shouldn't do this. | |
1719 if (has_aborted()) { | |
1720 g1h->set_marking_complete(); // So bitmap clearing isn't confused | |
1721 return; | |
1722 } | |
1723 | |
2152 | 1724 g1h->verify_region_sets_optional(); |
1725 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1726 if (VerifyDuringGC) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1727 HandleMark hm; // handle scope |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1728 gclog_or_tty->print(" VerifyDuringGC:(before)"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1729 Universe::heap()->prepare_for_verify(); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
1730 Universe::verify(/* allow dirty */ true, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
1731 /* silent */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
1732 /* option */ VerifyOption_G1UsePrevMarking); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1733 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1734 |
342 | 1735 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); |
1736 g1p->record_concurrent_mark_cleanup_start(); | |
1737 | |
1738 double start = os::elapsedTime(); | |
1739 | |
2173 | 1740 HeapRegionRemSet::reset_for_cleanup_tasks(); |
1741 | |
342 | 1742 // Do counting once more with the world stopped for good measure. |
1743 G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), | |
1744 &_region_bm, &_card_bm); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
1745 if (G1CollectedHeap::use_parallel_gc_threads()) { |
355 | 1746 assert(g1h->check_heap_region_claim_values( |
1747 HeapRegion::InitialClaimValue), | |
1748 "sanity check"); | |
1749 | |
342 | 1750 int n_workers = g1h->workers()->total_workers(); |
1751 g1h->set_par_threads(n_workers); | |
1752 g1h->workers()->run_task(&g1_par_count_task); | |
1753 g1h->set_par_threads(0); | |
355 | 1754 |
1755 assert(g1h->check_heap_region_claim_values( | |
1756 HeapRegion::FinalCountClaimValue), | |
1757 "sanity check"); | |
342 | 1758 } else { |
1759 g1_par_count_task.work(0); | |
1760 } | |
1761 | |
1762 size_t known_garbage_bytes = | |
1763 g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes(); | |
1764 #if 0 | |
1765 gclog_or_tty->print_cr("used %1.2lf, live %1.2lf, garbage %1.2lf", | |
1766 (double) g1_par_count_task.used_bytes() / (double) (1024 * 1024), | |
1767 (double) g1_par_count_task.live_bytes() / (double) (1024 * 1024), | |
1768 (double) known_garbage_bytes / (double) (1024 * 1024)); | |
1769 #endif // 0 | |
1770 g1p->set_known_garbage_bytes(known_garbage_bytes); | |
1771 | |
1772 size_t start_used_bytes = g1h->used(); | |
1773 _at_least_one_mark_complete = true; | |
1774 g1h->set_marking_complete(); | |
1775 | |
1776 double count_end = os::elapsedTime(); | |
1777 double this_final_counting_time = (count_end - start); | |
1778 if (G1PrintParCleanupStats) { | |
1779 gclog_or_tty->print_cr("Cleanup:"); | |
1780 gclog_or_tty->print_cr(" Finalize counting: %8.3f ms", | |
1781 this_final_counting_time*1000.0); | |
1782 } | |
1783 _total_counting_time += this_final_counting_time; | |
1784 | |
2435
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
1785 if (G1PrintRegionLivenessInfo) { |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
1786 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
1787 _g1h->heap_region_iterate(&cl); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
1788 } |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
1789 |
342 | 1790 // Install newly created mark bitMap as "prev". |
1791 swapMarkBitMaps(); | |
1792 | |
1793 g1h->reset_gc_time_stamp(); | |
1794 | |
1795 // Note end of marking in all heap regions. | |
1796 double note_end_start = os::elapsedTime(); | |
2152 | 1797 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
1798 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 1799 int n_workers = g1h->workers()->total_workers(); |
1800 g1h->set_par_threads(n_workers); | |
1801 g1h->workers()->run_task(&g1_par_note_end_task); | |
1802 g1h->set_par_threads(0); | |
355 | 1803 |
1804 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue), | |
1805 "sanity check"); | |
342 | 1806 } else { |
1807 g1_par_note_end_task.work(0); | |
1808 } | |
2152 | 1809 |
1810 if (!cleanup_list_is_empty()) { | |
1811 // The cleanup list is not empty, so we'll have to process it | |
1812 // concurrently. Notify anyone else that might be wanting free | |
1813 // regions that there will be more free regions coming soon. | |
1814 g1h->set_free_regions_coming(); | |
1815 } | |
342 | 1816 double note_end_end = os::elapsedTime(); |
1817 if (G1PrintParCleanupStats) { | |
1818 gclog_or_tty->print_cr(" note end of marking: %8.3f ms.", | |
1819 (note_end_end - note_end_start)*1000.0); | |
1820 } | |
1821 | |
355 | 1822 |
342 | 1823 // call below, since it affects the metric by which we sort the heap |
1824 // regions. | |
1825 if (G1ScrubRemSets) { | |
1826 double rs_scrub_start = os::elapsedTime(); | |
1827 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
1828 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 1829 int n_workers = g1h->workers()->total_workers(); |
1830 g1h->set_par_threads(n_workers); | |
1831 g1h->workers()->run_task(&g1_par_scrub_rs_task); | |
1832 g1h->set_par_threads(0); | |
355 | 1833 |
1834 assert(g1h->check_heap_region_claim_values( | |
1835 HeapRegion::ScrubRemSetClaimValue), | |
1836 "sanity check"); | |
342 | 1837 } else { |
1838 g1_par_scrub_rs_task.work(0); | |
1839 } | |
1840 | |
1841 double rs_scrub_end = os::elapsedTime(); | |
1842 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); | |
1843 _total_rs_scrub_time += this_rs_scrub_time; | |
1844 } | |
1845 | |
1846 // this will also free any regions totally full of garbage objects, | |
1847 // and sort the regions. | |
1848 g1h->g1_policy()->record_concurrent_mark_cleanup_end( | |
1849 g1_par_note_end_task.freed_bytes(), | |
1850 g1_par_note_end_task.max_live_bytes()); | |
1851 | |
1852 // Statistics. | |
1853 double end = os::elapsedTime(); | |
1854 _cleanup_times.add((end - start) * 1000.0); | |
1855 | |
1856 // G1CollectedHeap::heap()->print(); | |
1857 // gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d", | |
1858 // G1CollectedHeap::heap()->get_gc_time_stamp()); | |
1859 | |
1860 if (PrintGC || PrintGCDetails) { | |
1861 g1h->print_size_transition(gclog_or_tty, | |
1862 start_used_bytes, | |
1863 g1h->used(), | |
1864 g1h->capacity()); | |
1865 } | |
1866 | |
1867 size_t cleaned_up_bytes = start_used_bytes - g1h->used(); | |
1868 g1p->decrease_known_garbage_bytes(cleaned_up_bytes); | |
1869 | |
1870 // We need to make this be a "collection" so any collection pause that | |
1871 // races with it goes around and waits for completeCleanup to finish. | |
1872 g1h->increment_total_collections(); | |
1873 | |
751 | 1874 if (VerifyDuringGC) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1875 HandleMark hm; // handle scope |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1876 gclog_or_tty->print(" VerifyDuringGC:(after)"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1877 Universe::heap()->prepare_for_verify(); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
1878 Universe::verify(/* allow dirty */ true, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
1879 /* silent */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
1880 /* option */ VerifyOption_G1UsePrevMarking); |
342 | 1881 } |
2152 | 1882 |
1883 g1h->verify_region_sets_optional(); | |
342 | 1884 } |
1885 | |
1886 void ConcurrentMark::completeCleanup() { | |
1887 if (has_aborted()) return; | |
1888 | |
2152 | 1889 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
1890 | |
1891 _cleanup_list.verify_optional(); | |
2361 | 1892 FreeRegionList tmp_free_list("Tmp Free List"); |
2152 | 1893 |
1894 if (G1ConcRegionFreeingVerbose) { | |
1895 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " | |
1896 "cleanup list has "SIZE_FORMAT" entries", | |
1897 _cleanup_list.length()); | |
1898 } | |
1899 | |
1900 // Noone else should be accessing the _cleanup_list at this point, | |
1901 // so it's not necessary to take any locks | |
1902 while (!_cleanup_list.is_empty()) { | |
1903 HeapRegion* hr = _cleanup_list.remove_head(); | |
1904 assert(hr != NULL, "the list was not empty"); | |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3316
diff
changeset
|
1905 hr->par_clear(); |
2361 | 1906 tmp_free_list.add_as_tail(hr); |
2152 | 1907 |
1908 // Instead of adding one region at a time to the secondary_free_list, | |
1909 // we accumulate them in the local list and move them a few at a | |
1910 // time. This also cuts down on the number of notify_all() calls | |
1911 // we do during this process. We'll also append the local list when | |
1912 // _cleanup_list is empty (which means we just removed the last | |
1913 // region from the _cleanup_list). | |
2361 | 1914 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || |
2152 | 1915 _cleanup_list.is_empty()) { |
1916 if (G1ConcRegionFreeingVerbose) { | |
1917 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " | |
1918 "appending "SIZE_FORMAT" entries to the " | |
1919 "secondary_free_list, clean list still has " | |
1920 SIZE_FORMAT" entries", | |
2361 | 1921 tmp_free_list.length(), |
2152 | 1922 _cleanup_list.length()); |
342 | 1923 } |
2152 | 1924 |
1925 { | |
1926 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); | |
2361 | 1927 g1h->secondary_free_list_add_as_tail(&tmp_free_list); |
2152 | 1928 SecondaryFreeList_lock->notify_all(); |
1929 } | |
1930 | |
1931 if (G1StressConcRegionFreeing) { | |
1932 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { | |
1933 os::sleep(Thread::current(), (jlong) 1, false); | |
1934 } | |
1935 } | |
342 | 1936 } |
1937 } | |
2361 | 1938 assert(tmp_free_list.is_empty(), "post-condition"); |
342 | 1939 } |
1940 | |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1941 // Support closures for reference procssing in G1 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1942 |
2037
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
1974
diff
changeset
|
1943 bool G1CMIsAliveClosure::do_object_b(oop obj) { |
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
1974
diff
changeset
|
1944 HeapWord* addr = (HeapWord*)obj; |
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
1974
diff
changeset
|
1945 return addr != NULL && |
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
1974
diff
changeset
|
1946 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); |
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
1974
diff
changeset
|
1947 } |
342 | 1948 |
1949 class G1CMKeepAliveClosure: public OopClosure { | |
1950 G1CollectedHeap* _g1; | |
1951 ConcurrentMark* _cm; | |
1952 CMBitMap* _bitMap; | |
1953 public: | |
1954 G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm, | |
1955 CMBitMap* bitMap) : | |
1956 _g1(g1), _cm(cm), | |
1957 _bitMap(bitMap) {} | |
1958 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1959 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1960 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1961 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1962 template <class T> void do_oop_work(T* p) { |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1963 oop obj = oopDesc::load_decode_heap_oop(p); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1964 HeapWord* addr = (HeapWord*)obj; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1965 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1966 if (_cm->verbose_high()) |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1967 gclog_or_tty->print_cr("\t[0] we're looking at location " |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1968 "*"PTR_FORMAT" = "PTR_FORMAT, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1969 p, (void*) obj); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1970 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1971 if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) { |
342 | 1972 _bitMap->mark(addr); |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1973 _cm->mark_stack_push(obj); |
342 | 1974 } |
1975 } | |
1976 }; | |
1977 | |
1978 class G1CMDrainMarkingStackClosure: public VoidClosure { | |
1979 CMMarkStack* _markStack; | |
1980 CMBitMap* _bitMap; | |
1981 G1CMKeepAliveClosure* _oopClosure; | |
1982 public: | |
1983 G1CMDrainMarkingStackClosure(CMBitMap* bitMap, CMMarkStack* markStack, | |
1984 G1CMKeepAliveClosure* oopClosure) : | |
1985 _bitMap(bitMap), | |
1986 _markStack(markStack), | |
1987 _oopClosure(oopClosure) | |
1988 {} | |
1989 | |
1990 void do_void() { | |
1991 _markStack->drain((OopClosure*)_oopClosure, _bitMap, false); | |
1992 } | |
1993 }; | |
1994 | |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1995 // 'Keep Alive' closure used by parallel reference processing. |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1996 // An instance of this closure is used in the parallel reference processing |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1997 // code rather than an instance of G1CMKeepAliveClosure. We could have used |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1998 // the G1CMKeepAliveClosure as it is MT-safe. Also reference objects are |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
1999 // placed on to discovered ref lists once so we can mark and push with no |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2000 // need to check whether the object has already been marked. Using the |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2001 // G1CMKeepAliveClosure would mean, however, having all the worker threads |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2002 // operating on the global mark stack. This means that an individual |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2003 // worker would be doing lock-free pushes while it processes its own |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2004 // discovered ref list followed by drain call. If the discovered ref lists |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2005 // are unbalanced then this could cause interference with the other |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2006 // workers. Using a CMTask (and its embedded local data structures) |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2007 // avoids that potential interference. |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2008 class G1CMParKeepAliveAndDrainClosure: public OopClosure { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2009 ConcurrentMark* _cm; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2010 CMTask* _task; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2011 CMBitMap* _bitMap; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2012 int _ref_counter_limit; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2013 int _ref_counter; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2014 public: |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2015 G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2016 CMTask* task, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2017 CMBitMap* bitMap) : |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2018 _cm(cm), _task(task), _bitMap(bitMap), |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2019 _ref_counter_limit(G1RefProcDrainInterval) |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2020 { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2021 assert(_ref_counter_limit > 0, "sanity"); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2022 _ref_counter = _ref_counter_limit; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2023 } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2024 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2025 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2026 virtual void do_oop( oop* p) { do_oop_work(p); } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2027 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2028 template <class T> void do_oop_work(T* p) { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2029 if (!_cm->has_overflown()) { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2030 oop obj = oopDesc::load_decode_heap_oop(p); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2031 if (_cm->verbose_high()) |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2032 gclog_or_tty->print_cr("\t[%d] we're looking at location " |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2033 "*"PTR_FORMAT" = "PTR_FORMAT, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2034 _task->task_id(), p, (void*) obj); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2035 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2036 _task->deal_with_reference(obj); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2037 _ref_counter--; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2038 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2039 if (_ref_counter == 0) { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2040 // We have dealt with _ref_counter_limit references, pushing them and objects |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2041 // reachable from them on to the local stack (and possibly the global stack). |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2042 // Call do_marking_step() to process these entries. We call the routine in a |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2043 // loop, which we'll exit if there's nothing more to do (i.e. we're done |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2044 // with the entries that we've pushed as a result of the deal_with_reference |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2045 // calls above) or we overflow. |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2046 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2047 // while there may still be some work to do. (See the comment at the |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2048 // beginning of CMTask::do_marking_step() for those conditions - one of which |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2049 // is reaching the specified time target.) It is only when |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2050 // CMTask::do_marking_step() returns without setting the has_aborted() flag |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2051 // that the marking has completed. |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2052 do { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2053 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2054 _task->do_marking_step(mark_step_duration_ms, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2055 false /* do_stealing */, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2056 false /* do_termination */); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2057 } while (_task->has_aborted() && !_cm->has_overflown()); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2058 _ref_counter = _ref_counter_limit; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2059 } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2060 } else { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2061 if (_cm->verbose_high()) |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2062 gclog_or_tty->print_cr("\t[%d] CM Overflow", _task->task_id()); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2063 } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2064 } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2065 }; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2066 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2067 class G1CMParDrainMarkingStackClosure: public VoidClosure { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2068 ConcurrentMark* _cm; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2069 CMTask* _task; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2070 public: |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2071 G1CMParDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task) : |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2072 _cm(cm), _task(task) |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2073 {} |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2074 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2075 void do_void() { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2076 do { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2077 if (_cm->verbose_high()) |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2078 gclog_or_tty->print_cr("\t[%d] Drain: Calling do marking_step", _task->task_id()); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2079 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2080 // We call CMTask::do_marking_step() to completely drain the local and |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2081 // global marking stacks. The routine is called in a loop, which we'll |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2082 // exit if there's nothing more to do (i.e. we'completely drained the |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2083 // entries that were pushed as a result of applying the |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2084 // G1CMParKeepAliveAndDrainClosure to the entries on the discovered ref |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2085 // lists above) or we overflow the global marking stack. |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2086 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2087 // while there may still be some work to do. (See the comment at the |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2088 // beginning of CMTask::do_marking_step() for those conditions - one of which |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2089 // is reaching the specified time target.) It is only when |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2090 // CMTask::do_marking_step() returns without setting the has_aborted() flag |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2091 // that the marking has completed. |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2092 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2093 _task->do_marking_step(1000000000.0 /* something very large */, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2094 true /* do_stealing */, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2095 true /* do_termination */); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2096 } while (_task->has_aborted() && !_cm->has_overflown()); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2097 } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2098 }; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2099 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2100 // Implementation of AbstractRefProcTaskExecutor for G1 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2101 class G1RefProcTaskExecutor: public AbstractRefProcTaskExecutor { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2102 private: |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2103 G1CollectedHeap* _g1h; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2104 ConcurrentMark* _cm; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2105 CMBitMap* _bitmap; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2106 WorkGang* _workers; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2107 int _active_workers; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2108 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2109 public: |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2110 G1RefProcTaskExecutor(G1CollectedHeap* g1h, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2111 ConcurrentMark* cm, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2112 CMBitMap* bitmap, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2113 WorkGang* workers, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2114 int n_workers) : |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2115 _g1h(g1h), _cm(cm), _bitmap(bitmap), |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2116 _workers(workers), _active_workers(n_workers) |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2117 { } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2118 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2119 // Executes the given task using concurrent marking worker threads. |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2120 virtual void execute(ProcessTask& task); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2121 virtual void execute(EnqueueTask& task); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2122 }; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2123 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2124 class G1RefProcTaskProxy: public AbstractGangTask { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2125 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2126 ProcessTask& _proc_task; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2127 G1CollectedHeap* _g1h; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2128 ConcurrentMark* _cm; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2129 CMBitMap* _bitmap; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2130 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2131 public: |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2132 G1RefProcTaskProxy(ProcessTask& proc_task, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2133 G1CollectedHeap* g1h, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2134 ConcurrentMark* cm, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2135 CMBitMap* bitmap) : |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2136 AbstractGangTask("Process reference objects in parallel"), |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2137 _proc_task(proc_task), _g1h(g1h), _cm(cm), _bitmap(bitmap) |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2138 {} |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2139 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2140 virtual void work(int i) { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2141 CMTask* marking_task = _cm->task(i); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2142 G1CMIsAliveClosure g1_is_alive(_g1h); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2143 G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task, _bitmap); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2144 G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2145 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2146 _proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2147 } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2148 }; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2149 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2150 void G1RefProcTaskExecutor::execute(ProcessTask& proc_task) { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2151 assert(_workers != NULL, "Need parallel worker threads."); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2152 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2153 G1RefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2154 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2155 // We need to reset the phase for each task execution so that |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2156 // the termination protocol of CMTask::do_marking_step works. |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2157 _cm->set_phase(_active_workers, false /* concurrent */); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2158 _g1h->set_par_threads(_active_workers); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2159 _workers->run_task(&proc_task_proxy); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2160 _g1h->set_par_threads(0); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2161 } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2162 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2163 class G1RefEnqueueTaskProxy: public AbstractGangTask { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2164 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2165 EnqueueTask& _enq_task; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2166 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2167 public: |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2168 G1RefEnqueueTaskProxy(EnqueueTask& enq_task) : |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2169 AbstractGangTask("Enqueue reference objects in parallel"), |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2170 _enq_task(enq_task) |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2171 { } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2172 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2173 virtual void work(int i) { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2174 _enq_task.work(i); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2175 } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2176 }; |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2177 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2178 void G1RefProcTaskExecutor::execute(EnqueueTask& enq_task) { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2179 assert(_workers != NULL, "Need parallel worker threads."); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2180 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2181 G1RefEnqueueTaskProxy enq_task_proxy(enq_task); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2182 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2183 _g1h->set_par_threads(_active_workers); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2184 _workers->run_task(&enq_task_proxy); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2185 _g1h->set_par_threads(0); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2186 } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2187 |
342 | 2188 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { |
2189 ResourceMark rm; | |
2190 HandleMark hm; | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
355
diff
changeset
|
2191 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
355
diff
changeset
|
2192 ReferenceProcessor* rp = g1h->ref_processor(); |
342 | 2193 |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1972
diff
changeset
|
2194 // See the comment in G1CollectedHeap::ref_processing_init() |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1972
diff
changeset
|
2195 // about how reference processing currently works in G1. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1972
diff
changeset
|
2196 |
342 | 2197 // Process weak references. |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
2198 rp->setup_policy(clear_all_soft_refs); |
342 | 2199 assert(_markStack.isEmpty(), "mark stack should be empty"); |
2200 | |
2037
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
1974
diff
changeset
|
2201 G1CMIsAliveClosure g1_is_alive(g1h); |
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
1974
diff
changeset
|
2202 G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap()); |
342 | 2203 G1CMDrainMarkingStackClosure |
2037
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
1974
diff
changeset
|
2204 g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive); |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2205 // We use the work gang from the G1CollectedHeap and we utilize all |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2206 // the worker threads. |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2207 int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1; |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2208 active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1); |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2209 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2210 G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(), |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2211 g1h->workers(), active_workers); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2212 |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2213 |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2214 if (rp->processing_is_mt()) { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2215 // Set the degree of MT here. If the discovery is done MT, there |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2216 // may have been a different number of threads doing the discovery |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2217 // and a different number of discovered lists may have Ref objects. |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2218 // That is OK as long as the Reference lists are balanced (see |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2219 // balance_all_queues() and balance_queues()). |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2220 rp->set_active_mt_degree(active_workers); |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2221 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2222 rp->process_discovered_references(&g1_is_alive, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2223 &g1_keep_alive, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2224 &g1_drain_mark_stack, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2225 &par_task_executor); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2226 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2227 // The work routines of the parallel keep_alive and drain_marking_stack |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2228 // will set the has_overflown flag if we overflow the global marking |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2229 // stack. |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2230 } else { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2231 rp->process_discovered_references(&g1_is_alive, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2232 &g1_keep_alive, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2233 &g1_drain_mark_stack, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2234 NULL); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2235 |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2236 } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2237 |
342 | 2238 assert(_markStack.overflow() || _markStack.isEmpty(), |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2239 "mark stack should be empty (unless it overflowed)"); |
342 | 2240 if (_markStack.overflow()) { |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2241 // Should have been done already when we tried to push an |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2242 // entry on to the global mark stack. But let's do it again. |
342 | 2243 set_has_overflown(); |
2244 } | |
2245 | |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2246 if (rp->processing_is_mt()) { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2247 assert(rp->num_q() == active_workers, "why not"); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2248 rp->enqueue_discovered_references(&par_task_executor); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2249 } else { |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2250 rp->enqueue_discovered_references(); |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2251 } |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2252 |
342 | 2253 rp->verify_no_references_recorded(); |
2254 assert(!rp->discovery_enabled(), "should have been disabled"); | |
2255 | |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2175
diff
changeset
|
2256 // Now clean up stale oops in StringTable |
2037
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
1974
diff
changeset
|
2257 StringTable::unlink(&g1_is_alive); |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2175
diff
changeset
|
2258 // Clean up unreferenced symbols in symbol table. |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2175
diff
changeset
|
2259 SymbolTable::unlink(); |
342 | 2260 } |
2261 | |
2262 void ConcurrentMark::swapMarkBitMaps() { | |
2263 CMBitMapRO* temp = _prevMarkBitMap; | |
2264 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; | |
2265 _nextMarkBitMap = (CMBitMap*) temp; | |
2266 } | |
2267 | |
2268 class CMRemarkTask: public AbstractGangTask { | |
2269 private: | |
2270 ConcurrentMark *_cm; | |
2271 | |
2272 public: | |
2273 void work(int worker_i) { | |
2274 // Since all available tasks are actually started, we should | |
2275 // only proceed if we're supposed to be actived. | |
2276 if ((size_t)worker_i < _cm->active_tasks()) { | |
2277 CMTask* task = _cm->task(worker_i); | |
2278 task->record_start_time(); | |
2279 do { | |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2280 task->do_marking_step(1000000000.0 /* something very large */, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2281 true /* do_stealing */, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2282 true /* do_termination */); |
342 | 2283 } while (task->has_aborted() && !_cm->has_overflown()); |
2284 // If we overflow, then we do not want to restart. We instead | |
2285 // want to abort remark and do concurrent marking again. | |
2286 task->record_end_time(); | |
2287 } | |
2288 } | |
2289 | |
2290 CMRemarkTask(ConcurrentMark* cm) : | |
2291 AbstractGangTask("Par Remark"), _cm(cm) { } | |
2292 }; | |
2293 | |
2294 void ConcurrentMark::checkpointRootsFinalWork() { | |
2295 ResourceMark rm; | |
2296 HandleMark hm; | |
2297 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
2298 | |
2299 g1h->ensure_parsability(false); | |
2300 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
2301 if (G1CollectedHeap::use_parallel_gc_threads()) { |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
912
diff
changeset
|
2302 G1CollectedHeap::StrongRootsScope srs(g1h); |
342 | 2303 // this is remark, so we'll use up all available threads |
2304 int active_workers = ParallelGCThreads; | |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2305 set_phase(active_workers, false /* concurrent */); |
342 | 2306 |
2307 CMRemarkTask remarkTask(this); | |
2308 // We will start all available threads, even if we decide that the | |
2309 // active_workers will be fewer. The extra ones will just bail out | |
2310 // immediately. | |
2311 int n_workers = g1h->workers()->total_workers(); | |
2312 g1h->set_par_threads(n_workers); | |
2313 g1h->workers()->run_task(&remarkTask); | |
2314 g1h->set_par_threads(0); | |
2315 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
912
diff
changeset
|
2316 G1CollectedHeap::StrongRootsScope srs(g1h); |
342 | 2317 // this is remark, so we'll use up all available threads |
2318 int active_workers = 1; | |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
2319 set_phase(active_workers, false /* concurrent */); |
342 | 2320 |
2321 CMRemarkTask remarkTask(this); | |
2322 // We will start all available threads, even if we decide that the | |
2323 // active_workers will be fewer. The extra ones will just bail out | |
2324 // immediately. | |
2325 remarkTask.work(0); | |
2326 } | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
2327 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
2328 guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant"); |
342 | 2329 |
2330 print_stats(); | |
2331 | |
2332 #if VERIFY_OBJS_PROCESSED | |
2333 if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) { | |
2334 gclog_or_tty->print_cr("Processed = %d, enqueued = %d.", | |
2335 _scan_obj_cl.objs_processed, | |
2336 ThreadLocalObjQueue::objs_enqueued); | |
2337 guarantee(_scan_obj_cl.objs_processed == | |
2338 ThreadLocalObjQueue::objs_enqueued, | |
2339 "Different number of objs processed and enqueued."); | |
2340 } | |
2341 #endif | |
2342 } | |
2343 | |
1044 | 2344 #ifndef PRODUCT |
2345 | |
1388 | 2346 class PrintReachableOopClosure: public OopClosure { |
342 | 2347 private: |
2348 G1CollectedHeap* _g1h; | |
2349 outputStream* _out; | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2350 VerifyOption _vo; |
1388 | 2351 bool _all; |
342 | 2352 |
2353 public: | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2354 PrintReachableOopClosure(outputStream* out, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2355 VerifyOption vo, |
1388 | 2356 bool all) : |
1044 | 2357 _g1h(G1CollectedHeap::heap()), |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2358 _out(out), _vo(vo), _all(all) { } |
342 | 2359 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2360 void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2361 void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2362 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2363 template <class T> void do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2364 oop obj = oopDesc::load_decode_heap_oop(p); |
342 | 2365 const char* str = NULL; |
2366 const char* str2 = ""; | |
2367 | |
1388 | 2368 if (obj == NULL) { |
2369 str = ""; | |
2370 } else if (!_g1h->is_in_g1_reserved(obj)) { | |
2371 str = " O"; | |
2372 } else { | |
342 | 2373 HeapRegion* hr = _g1h->heap_region_containing(obj); |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
2374 guarantee(hr != NULL, "invariant"); |
1044 | 2375 bool over_tams = false; |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2376 bool marked = false; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2377 |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2378 switch (_vo) { |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2379 case VerifyOption_G1UsePrevMarking: |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2380 over_tams = hr->obj_allocated_since_prev_marking(obj); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2381 marked = _g1h->isMarkedPrev(obj); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2382 break; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2383 case VerifyOption_G1UseNextMarking: |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2384 over_tams = hr->obj_allocated_since_next_marking(obj); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2385 marked = _g1h->isMarkedNext(obj); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2386 break; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2387 case VerifyOption_G1UseMarkWord: |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2388 marked = obj->is_gc_marked(); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2389 break; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2390 default: |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2391 ShouldNotReachHere(); |
1044 | 2392 } |
2393 | |
2394 if (over_tams) { | |
1388 | 2395 str = " >"; |
2396 if (marked) { | |
342 | 2397 str2 = " AND MARKED"; |
1044 | 2398 } |
1388 | 2399 } else if (marked) { |
2400 str = " M"; | |
1044 | 2401 } else { |
1388 | 2402 str = " NOT"; |
1044 | 2403 } |
342 | 2404 } |
2405 | |
1388 | 2406 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", |
342 | 2407 p, (void*) obj, str, str2); |
2408 } | |
2409 }; | |
2410 | |
1388 | 2411 class PrintReachableObjectClosure : public ObjectClosure { |
342 | 2412 private: |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2413 G1CollectedHeap* _g1h; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2414 outputStream* _out; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2415 VerifyOption _vo; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2416 bool _all; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2417 HeapRegion* _hr; |
342 | 2418 |
2419 public: | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2420 PrintReachableObjectClosure(outputStream* out, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2421 VerifyOption vo, |
1388 | 2422 bool all, |
2423 HeapRegion* hr) : | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2424 _g1h(G1CollectedHeap::heap()), |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2425 _out(out), _vo(vo), _all(all), _hr(hr) { } |
1388 | 2426 |
2427 void do_object(oop o) { | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2428 bool over_tams = false; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2429 bool marked = false; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2430 |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2431 switch (_vo) { |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2432 case VerifyOption_G1UsePrevMarking: |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2433 over_tams = _hr->obj_allocated_since_prev_marking(o); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2434 marked = _g1h->isMarkedPrev(o); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2435 break; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2436 case VerifyOption_G1UseNextMarking: |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2437 over_tams = _hr->obj_allocated_since_next_marking(o); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2438 marked = _g1h->isMarkedNext(o); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2439 break; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2440 case VerifyOption_G1UseMarkWord: |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2441 marked = o->is_gc_marked(); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2442 break; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2443 default: |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2444 ShouldNotReachHere(); |
1388 | 2445 } |
2446 bool print_it = _all || over_tams || marked; | |
2447 | |
2448 if (print_it) { | |
2449 _out->print_cr(" "PTR_FORMAT"%s", | |
2450 o, (over_tams) ? " >" : (marked) ? " M" : ""); | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2451 PrintReachableOopClosure oopCl(_out, _vo, _all); |
1388 | 2452 o->oop_iterate(&oopCl); |
2453 } | |
342 | 2454 } |
2455 }; | |
2456 | |
1388 | 2457 class PrintReachableRegionClosure : public HeapRegionClosure { |
342 | 2458 private: |
2459 outputStream* _out; | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2460 VerifyOption _vo; |
1388 | 2461 bool _all; |
342 | 2462 |
2463 public: | |
2464 bool doHeapRegion(HeapRegion* hr) { | |
2465 HeapWord* b = hr->bottom(); | |
2466 HeapWord* e = hr->end(); | |
2467 HeapWord* t = hr->top(); | |
1044 | 2468 HeapWord* p = NULL; |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2469 |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2470 switch (_vo) { |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2471 case VerifyOption_G1UsePrevMarking: |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2472 p = hr->prev_top_at_mark_start(); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2473 break; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2474 case VerifyOption_G1UseNextMarking: |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2475 p = hr->next_top_at_mark_start(); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2476 break; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2477 case VerifyOption_G1UseMarkWord: |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2478 // When we are verifying marking using the mark word |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2479 // TAMS has no relevance. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2480 assert(p == NULL, "post-condition"); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2481 break; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2482 default: |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2483 ShouldNotReachHere(); |
1044 | 2484 } |
342 | 2485 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " |
1044 | 2486 "TAMS: "PTR_FORMAT, b, e, t, p); |
1388 | 2487 _out->cr(); |
2488 | |
2489 HeapWord* from = b; | |
2490 HeapWord* to = t; | |
2491 | |
2492 if (to > from) { | |
2493 _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to); | |
2494 _out->cr(); | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2495 PrintReachableObjectClosure ocl(_out, _vo, _all, hr); |
1388 | 2496 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); |
2497 _out->cr(); | |
2498 } | |
342 | 2499 |
2500 return false; | |
2501 } | |
2502 | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2503 PrintReachableRegionClosure(outputStream* out, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2504 VerifyOption vo, |
1388 | 2505 bool all) : |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2506 _out(out), _vo(vo), _all(all) { } |
342 | 2507 }; |
2508 | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2509 static const char* verify_option_to_tams(VerifyOption vo) { |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2510 switch (vo) { |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2511 case VerifyOption_G1UsePrevMarking: |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2512 return "PTAMS"; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2513 case VerifyOption_G1UseNextMarking: |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2514 return "NTAMS"; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2515 default: |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2516 return "NONE"; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2517 } |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2518 } |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2519 |
1388 | 2520 void ConcurrentMark::print_reachable(const char* str, |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2521 VerifyOption vo, |
1388 | 2522 bool all) { |
2523 gclog_or_tty->cr(); | |
2524 gclog_or_tty->print_cr("== Doing heap dump... "); | |
1044 | 2525 |
2526 if (G1PrintReachableBaseFile == NULL) { | |
2527 gclog_or_tty->print_cr(" #### error: no base file defined"); | |
2528 return; | |
2529 } | |
2530 | |
2531 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) > | |
2532 (JVM_MAXPATHLEN - 1)) { | |
2533 gclog_or_tty->print_cr(" #### error: file name too long"); | |
2534 return; | |
2535 } | |
2536 | |
2537 char file_name[JVM_MAXPATHLEN]; | |
2538 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str); | |
2539 gclog_or_tty->print_cr(" dumping to file %s", file_name); | |
2540 | |
2541 fileStream fout(file_name); | |
2542 if (!fout.is_open()) { | |
2543 gclog_or_tty->print_cr(" #### error: could not open file"); | |
2544 return; | |
2545 } | |
2546 | |
2547 outputStream* out = &fout; | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2548 out->print_cr("-- USING %s", verify_option_to_tams(vo)); |
1044 | 2549 out->cr(); |
2550 | |
1388 | 2551 out->print_cr("--- ITERATING OVER REGIONS"); |
1044 | 2552 out->cr(); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3771
diff
changeset
|
2553 PrintReachableRegionClosure rcl(out, vo, all); |
1388 | 2554 _g1h->heap_region_iterate(&rcl); |
1044 | 2555 out->cr(); |
2556 | |
2557 gclog_or_tty->print_cr(" done"); | |
1388 | 2558 gclog_or_tty->flush(); |
342 | 2559 } |
2560 | |
1044 | 2561 #endif // PRODUCT |
2562 | |
342 | 2563 // This note is for drainAllSATBBuffers and the code in between. |
2564 // In the future we could reuse a task to do this work during an | |
2565 // evacuation pause (since now tasks are not active and can be claimed | |
2566 // during an evacuation pause). This was a late change to the code and | |
2567 // is currently not being taken advantage of. | |
2568 | |
2569 class CMGlobalObjectClosure : public ObjectClosure { | |
2570 private: | |
2571 ConcurrentMark* _cm; | |
2572 | |
2573 public: | |
2574 void do_object(oop obj) { | |
2575 _cm->deal_with_reference(obj); | |
2576 } | |
2577 | |
2578 CMGlobalObjectClosure(ConcurrentMark* cm) : _cm(cm) { } | |
2579 }; | |
2580 | |
2581 void ConcurrentMark::deal_with_reference(oop obj) { | |
3771 | 2582 if (verbose_high()) { |
342 | 2583 gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT, |
2584 (void*) obj); | |
3771 | 2585 } |
342 | 2586 |
2587 HeapWord* objAddr = (HeapWord*) obj; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2588 assert(obj->is_oop_or_null(true /* ignore mark word */), "Error"); |
342 | 2589 if (_g1h->is_in_g1_reserved(objAddr)) { |
3771 | 2590 assert(obj != NULL, "null check is implicit"); |
2591 if (!_nextMarkBitMap->isMarked(objAddr)) { | |
2592 // Only get the containing region if the object is not marked on the | |
2593 // bitmap (otherwise, it's a waste of time since we won't do | |
2594 // anything with it). | |
2595 HeapRegion* hr = _g1h->heap_region_containing_raw(obj); | |
2596 if (!hr->obj_allocated_since_next_marking(obj)) { | |
2597 if (verbose_high()) { | |
2598 gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered " | |
2599 "marked", (void*) obj); | |
2600 } | |
2601 | |
2602 // we need to mark it first | |
2603 if (_nextMarkBitMap->parMark(objAddr)) { | |
2604 // No OrderAccess:store_load() is needed. It is implicit in the | |
2605 // CAS done in parMark(objAddr) above | |
2606 HeapWord* finger = _finger; | |
2607 if (objAddr < finger) { | |
2608 if (verbose_high()) { | |
2609 gclog_or_tty->print_cr("[global] below the global finger " | |
2610 "("PTR_FORMAT"), pushing it", finger); | |
2611 } | |
2612 if (!mark_stack_push(obj)) { | |
2613 if (verbose_low()) { | |
2614 gclog_or_tty->print_cr("[global] global stack overflow during " | |
2615 "deal_with_reference"); | |
2616 } | |
2617 } | |
342 | 2618 } |
2619 } | |
2620 } | |
2621 } | |
2622 } | |
2623 } | |
2624 | |
2625 void ConcurrentMark::drainAllSATBBuffers() { | |
2626 CMGlobalObjectClosure oc(this); | |
2627 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); | |
2628 satb_mq_set.set_closure(&oc); | |
2629 | |
2630 while (satb_mq_set.apply_closure_to_completed_buffer()) { | |
2631 if (verbose_medium()) | |
2632 gclog_or_tty->print_cr("[global] processed an SATB buffer"); | |
2633 } | |
2634 | |
2635 // no need to check whether we should do this, as this is only | |
2636 // called during an evacuation pause | |
2637 satb_mq_set.iterate_closure_all_threads(); | |
2638 | |
2639 satb_mq_set.set_closure(NULL); | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
2640 assert(satb_mq_set.completed_buffers_num() == 0, "invariant"); |
342 | 2641 } |
2642 | |
2643 void ConcurrentMark::markPrev(oop p) { | |
2644 // Note we are overriding the read-only view of the prev map here, via | |
2645 // the cast. | |
2646 ((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*)p); | |
2647 } | |
2648 | |
2649 void ConcurrentMark::clear(oop p) { | |
2650 assert(p != NULL && p->is_oop(), "expected an oop"); | |
2651 HeapWord* addr = (HeapWord*)p; | |
2652 assert(addr >= _nextMarkBitMap->startWord() || | |
2653 addr < _nextMarkBitMap->endWord(), "in a region"); | |
2654 | |
2655 _nextMarkBitMap->clear(addr); | |
2656 } | |
2657 | |
2658 void ConcurrentMark::clearRangeBothMaps(MemRegion mr) { | |
2659 // Note we are overriding the read-only view of the prev map here, via | |
2660 // the cast. | |
2661 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); | |
2662 _nextMarkBitMap->clearRange(mr); | |
2663 } | |
2664 | |
2665 HeapRegion* | |
2666 ConcurrentMark::claim_region(int task_num) { | |
2667 // "checkpoint" the finger | |
2668 HeapWord* finger = _finger; | |
2669 | |
2670 // _heap_end will not change underneath our feet; it only changes at | |
2671 // yield points. | |
2672 while (finger < _heap_end) { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
2673 assert(_g1h->is_in_g1_reserved(finger), "invariant"); |
342 | 2674 |
3771 | 2675 // Note on how this code handles humongous regions. In the |
2676 // normal case the finger will reach the start of a "starts | |
2677 // humongous" (SH) region. Its end will either be the end of the | |
2678 // last "continues humongous" (CH) region in the sequence, or the | |
2679 // standard end of the SH region (if the SH is the only region in | |
2680 // the sequence). That way claim_region() will skip over the CH | |
2681 // regions. However, there is a subtle race between a CM thread | |
2682 // executing this method and a mutator thread doing a humongous | |
2683 // object allocation. The two are not mutually exclusive as the CM | |
2684 // thread does not need to hold the Heap_lock when it gets | |
2685 // here. So there is a chance that claim_region() will come across | |
2686 // a free region that's in the progress of becoming a SH or a CH | |
2687 // region. In the former case, it will either | |
2688 // a) Miss the update to the region's end, in which case it will | |
2689 // visit every subsequent CH region, will find their bitmaps | |
2690 // empty, and do nothing, or | |
2691 // b) Will observe the update of the region's end (in which case | |
2692 // it will skip the subsequent CH regions). | |
2693 // If it comes across a region that suddenly becomes CH, the | |
2694 // scenario will be similar to b). So, the race between | |
2695 // claim_region() and a humongous object allocation might force us | |
2696 // to do a bit of unnecessary work (due to some unnecessary bitmap | |
2697 // iterations) but it should not introduce and correctness issues. | |
2698 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); | |
342 | 2699 HeapWord* bottom = curr_region->bottom(); |
2700 HeapWord* end = curr_region->end(); | |
2701 HeapWord* limit = curr_region->next_top_at_mark_start(); | |
2702 | |
3771 | 2703 if (verbose_low()) { |
342 | 2704 gclog_or_tty->print_cr("[%d] curr_region = "PTR_FORMAT" " |
2705 "["PTR_FORMAT", "PTR_FORMAT"), " | |
2706 "limit = "PTR_FORMAT, | |
2707 task_num, curr_region, bottom, end, limit); | |
3771 | 2708 } |
2709 | |
2710 // Is the gap between reading the finger and doing the CAS too long? | |
2711 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); | |
342 | 2712 if (res == finger) { |
2713 // we succeeded | |
2714 | |
2715 // notice that _finger == end cannot be guaranteed here since, | |
2716 // someone else might have moved the finger even further | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
2717 assert(_finger >= end, "the finger should have moved forward"); |
342 | 2718 |
2719 if (verbose_low()) | |
2720 gclog_or_tty->print_cr("[%d] we were successful with region = " | |
2721 PTR_FORMAT, task_num, curr_region); | |
2722 | |
2723 if (limit > bottom) { | |
2724 if (verbose_low()) | |
2725 gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is not empty, " | |
2726 "returning it ", task_num, curr_region); | |
2727 return curr_region; | |
2728 } else { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
2729 assert(limit == bottom, |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
2730 "the region limit should be at bottom"); |
342 | 2731 if (verbose_low()) |
2732 gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is empty, " | |
2733 "returning NULL", task_num, curr_region); | |
2734 // we return NULL and the caller should try calling | |
2735 // claim_region() again. | |
2736 return NULL; | |
2737 } | |
2738 } else { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
2739 assert(_finger > finger, "the finger should have moved forward"); |
342 | 2740 if (verbose_low()) |
2741 gclog_or_tty->print_cr("[%d] somebody else moved the finger, " | |
2742 "global finger = "PTR_FORMAT", " | |
2743 "our finger = "PTR_FORMAT, | |
2744 task_num, _finger, finger); | |
2745 | |
2746 // read it again | |
2747 finger = _finger; | |
2748 } | |
2749 } | |
2750 | |
2751 return NULL; | |
2752 } | |
2753 | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2754 bool ConcurrentMark::invalidate_aborted_regions_in_cset() { |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2755 bool result = false; |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2756 for (int i = 0; i < (int)_max_task_num; ++i) { |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2757 CMTask* the_task = _tasks[i]; |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2758 MemRegion mr = the_task->aborted_region(); |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2759 if (mr.start() != NULL) { |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2760 assert(mr.end() != NULL, "invariant"); |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2761 assert(mr.word_size() > 0, "invariant"); |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2762 HeapRegion* hr = _g1h->heap_region_containing(mr.start()); |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2763 assert(hr != NULL, "invariant"); |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2764 if (hr->in_collection_set()) { |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2765 // The region points into the collection set |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2766 the_task->set_aborted_region(MemRegion()); |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2767 result = true; |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2768 } |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2769 } |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2770 } |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2771 return result; |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2772 } |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2773 |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2774 bool ConcurrentMark::has_aborted_regions() { |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2775 for (int i = 0; i < (int)_max_task_num; ++i) { |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2776 CMTask* the_task = _tasks[i]; |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2777 MemRegion mr = the_task->aborted_region(); |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2778 if (mr.start() != NULL) { |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2779 assert(mr.end() != NULL, "invariant"); |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2780 assert(mr.word_size() > 0, "invariant"); |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2781 return true; |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2782 } |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2783 } |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2784 return false; |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2785 } |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2786 |
342 | 2787 void ConcurrentMark::oops_do(OopClosure* cl) { |
2788 if (_markStack.size() > 0 && verbose_low()) | |
2789 gclog_or_tty->print_cr("[global] scanning the global marking stack, " | |
2790 "size = %d", _markStack.size()); | |
2791 // we first iterate over the contents of the mark stack... | |
2792 _markStack.oops_do(cl); | |
2793 | |
2794 for (int i = 0; i < (int)_max_task_num; ++i) { | |
2795 OopTaskQueue* queue = _task_queues->queue((int)i); | |
2796 | |
2797 if (queue->size() > 0 && verbose_low()) | |
2798 gclog_or_tty->print_cr("[global] scanning task queue of task %d, " | |
2799 "size = %d", i, queue->size()); | |
2800 | |
2801 // ...then over the contents of the all the task queues. | |
2802 queue->oops_do(cl); | |
2803 } | |
2804 | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2805 // Invalidate any entries, that are in the region stack, that |
342 | 2806 // point into the collection set |
2807 if (_regionStack.invalidate_entries_into_cset()) { | |
2808 // otherwise, any gray objects copied during the evacuation pause | |
2809 // might not be visited. | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
2810 assert(_should_gray_objects, "invariant"); |
342 | 2811 } |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2812 |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2813 // Invalidate any aborted regions, recorded in the individual CM |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2814 // tasks, that point into the collection set. |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2815 if (invalidate_aborted_regions_in_cset()) { |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2816 // otherwise, any gray objects copied during the evacuation pause |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2817 // might not be visited. |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2818 assert(_should_gray_objects, "invariant"); |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2819 } |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
2820 |
342 | 2821 } |
2822 | |
3316
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
2823 void ConcurrentMark::clear_marking_state(bool clear_overflow) { |
342 | 2824 _markStack.setEmpty(); |
2825 _markStack.clear_overflow(); | |
2826 _regionStack.setEmpty(); | |
2827 _regionStack.clear_overflow(); | |
3316
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
2828 if (clear_overflow) { |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
2829 clear_has_overflown(); |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
2830 } else { |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
2831 assert(has_overflown(), "pre-condition"); |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
2832 } |
342 | 2833 _finger = _heap_start; |
2834 | |
2835 for (int i = 0; i < (int)_max_task_num; ++i) { | |
2836 OopTaskQueue* queue = _task_queues->queue(i); | |
2837 queue->set_empty(); | |
1885
a5c514e74487
6988458: G1: assert(mr.end() <= _cm->finger()) failed: otherwise the region shouldn't be on the stack
johnc
parents:
1835
diff
changeset
|
2838 // Clear any partial regions from the CMTasks |
a5c514e74487
6988458: G1: assert(mr.end() <= _cm->finger()) failed: otherwise the region shouldn't be on the stack
johnc
parents:
1835
diff
changeset
|
2839 _tasks[i]->clear_aborted_region(); |
342 | 2840 } |
2841 } | |
2842 | |
2843 void ConcurrentMark::print_stats() { | |
2844 if (verbose_stats()) { | |
2845 gclog_or_tty->print_cr("---------------------------------------------------------------------"); | |
2846 for (size_t i = 0; i < _active_tasks; ++i) { | |
2847 _tasks[i]->print_stats(); | |
2848 gclog_or_tty->print_cr("---------------------------------------------------------------------"); | |
2849 } | |
2850 } | |
2851 } | |
2852 | |
2853 class CSMarkOopClosure: public OopClosure { | |
2854 friend class CSMarkBitMapClosure; | |
2855 | |
2856 G1CollectedHeap* _g1h; | |
2857 CMBitMap* _bm; | |
2858 ConcurrentMark* _cm; | |
2859 oop* _ms; | |
2860 jint* _array_ind_stack; | |
2861 int _ms_size; | |
2862 int _ms_ind; | |
2863 int _array_increment; | |
2864 | |
2865 bool push(oop obj, int arr_ind = 0) { | |
2866 if (_ms_ind == _ms_size) { | |
2867 gclog_or_tty->print_cr("Mark stack is full."); | |
2868 return false; | |
2869 } | |
2870 _ms[_ms_ind] = obj; | |
2871 if (obj->is_objArray()) _array_ind_stack[_ms_ind] = arr_ind; | |
2872 _ms_ind++; | |
2873 return true; | |
2874 } | |
2875 | |
2876 oop pop() { | |
2877 if (_ms_ind == 0) return NULL; | |
2878 else { | |
2879 _ms_ind--; | |
2880 return _ms[_ms_ind]; | |
2881 } | |
2882 } | |
2883 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2884 template <class T> bool drain() { |
342 | 2885 while (_ms_ind > 0) { |
2886 oop obj = pop(); | |
2887 assert(obj != NULL, "Since index was non-zero."); | |
2888 if (obj->is_objArray()) { | |
2889 jint arr_ind = _array_ind_stack[_ms_ind]; | |
2890 objArrayOop aobj = objArrayOop(obj); | |
2891 jint len = aobj->length(); | |
2892 jint next_arr_ind = arr_ind + _array_increment; | |
2893 if (next_arr_ind < len) { | |
2894 push(obj, next_arr_ind); | |
2895 } | |
2896 // Now process this portion of this one. | |
2897 int lim = MIN2(next_arr_ind, len); | |
2898 for (int j = arr_ind; j < lim; j++) { | |
912
308762b2bf14
6872000: G1: compilation fails on linux/older gcc
apetrusenko
parents:
866
diff
changeset
|
2899 do_oop(aobj->objArrayOopDesc::obj_at_addr<T>(j)); |
342 | 2900 } |
2901 | |
2902 } else { | |
2903 obj->oop_iterate(this); | |
2904 } | |
2905 if (abort()) return false; | |
2906 } | |
2907 return true; | |
2908 } | |
2909 | |
2910 public: | |
2911 CSMarkOopClosure(ConcurrentMark* cm, int ms_size) : | |
2912 _g1h(G1CollectedHeap::heap()), | |
2913 _cm(cm), | |
2914 _bm(cm->nextMarkBitMap()), | |
2915 _ms_size(ms_size), _ms_ind(0), | |
2916 _ms(NEW_C_HEAP_ARRAY(oop, ms_size)), | |
2917 _array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)), | |
2918 _array_increment(MAX2(ms_size/8, 16)) | |
2919 {} | |
2920 | |
2921 ~CSMarkOopClosure() { | |
2922 FREE_C_HEAP_ARRAY(oop, _ms); | |
2923 FREE_C_HEAP_ARRAY(jint, _array_ind_stack); | |
2924 } | |
2925 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2926 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2927 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2928 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2929 template <class T> void do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2930 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2931 if (oopDesc::is_null(heap_oop)) return; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2932 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
342 | 2933 if (obj->is_forwarded()) { |
2934 // If the object has already been forwarded, we have to make sure | |
2935 // that it's marked. So follow the forwarding pointer. Note that | |
2936 // this does the right thing for self-forwarding pointers in the | |
2937 // evacuation failure case. | |
2938 obj = obj->forwardee(); | |
2939 } | |
2940 HeapRegion* hr = _g1h->heap_region_containing(obj); | |
2941 if (hr != NULL) { | |
2942 if (hr->in_collection_set()) { | |
2943 if (_g1h->is_obj_ill(obj)) { | |
2944 _bm->mark((HeapWord*)obj); | |
2945 if (!push(obj)) { | |
2946 gclog_or_tty->print_cr("Setting abort in CSMarkOopClosure because push failed."); | |
2947 set_abort(); | |
2948 } | |
2949 } | |
2950 } else { | |
2951 // Outside the collection set; we need to gray it | |
2952 _cm->deal_with_reference(obj); | |
2953 } | |
2954 } | |
2955 } | |
2956 }; | |
2957 | |
2958 class CSMarkBitMapClosure: public BitMapClosure { | |
2959 G1CollectedHeap* _g1h; | |
2960 CMBitMap* _bitMap; | |
2961 ConcurrentMark* _cm; | |
2962 CSMarkOopClosure _oop_cl; | |
2963 public: | |
2964 CSMarkBitMapClosure(ConcurrentMark* cm, int ms_size) : | |
2965 _g1h(G1CollectedHeap::heap()), | |
2966 _bitMap(cm->nextMarkBitMap()), | |
2967 _oop_cl(cm, ms_size) | |
2968 {} | |
2969 | |
2970 ~CSMarkBitMapClosure() {} | |
2971 | |
2972 bool do_bit(size_t offset) { | |
2973 // convert offset into a HeapWord* | |
2974 HeapWord* addr = _bitMap->offsetToHeapWord(offset); | |
2975 assert(_bitMap->endWord() && addr < _bitMap->endWord(), | |
2976 "address out of range"); | |
2977 assert(_bitMap->isMarked(addr), "tautology"); | |
2978 oop obj = oop(addr); | |
2979 if (!obj->is_forwarded()) { | |
2980 if (!_oop_cl.push(obj)) return false; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2981 if (UseCompressedOops) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2982 if (!_oop_cl.drain<narrowOop>()) return false; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2983 } else { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2984 if (!_oop_cl.drain<oop>()) return false; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2985 } |
342 | 2986 } |
2987 // Otherwise... | |
2988 return true; | |
2989 } | |
2990 }; | |
2991 | |
2992 | |
2993 class CompleteMarkingInCSHRClosure: public HeapRegionClosure { | |
2994 CMBitMap* _bm; | |
2995 CSMarkBitMapClosure _bit_cl; | |
2996 enum SomePrivateConstants { | |
2997 MSSize = 1000 | |
2998 }; | |
2999 bool _completed; | |
3000 public: | |
3001 CompleteMarkingInCSHRClosure(ConcurrentMark* cm) : | |
3002 _bm(cm->nextMarkBitMap()), | |
3003 _bit_cl(cm, MSSize), | |
3004 _completed(true) | |
3005 {} | |
3006 | |
3007 ~CompleteMarkingInCSHRClosure() {} | |
3008 | |
3009 bool doHeapRegion(HeapRegion* r) { | |
3010 if (!r->evacuation_failed()) { | |
3011 MemRegion mr = MemRegion(r->bottom(), r->next_top_at_mark_start()); | |
3012 if (!mr.is_empty()) { | |
3013 if (!_bm->iterate(&_bit_cl, mr)) { | |
3014 _completed = false; | |
3015 return true; | |
3016 } | |
3017 } | |
3018 } | |
3019 return false; | |
3020 } | |
3021 | |
3022 bool completed() { return _completed; } | |
3023 }; | |
3024 | |
3025 class ClearMarksInHRClosure: public HeapRegionClosure { | |
3026 CMBitMap* _bm; | |
3027 public: | |
3028 ClearMarksInHRClosure(CMBitMap* bm): _bm(bm) { } | |
3029 | |
3030 bool doHeapRegion(HeapRegion* r) { | |
3031 if (!r->used_region().is_empty() && !r->evacuation_failed()) { | |
3032 MemRegion usedMR = r->used_region(); | |
3033 _bm->clearRange(r->used_region()); | |
3034 } | |
3035 return false; | |
3036 } | |
3037 }; | |
3038 | |
3039 void ConcurrentMark::complete_marking_in_collection_set() { | |
3040 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
3041 | |
3042 if (!g1h->mark_in_progress()) { | |
3043 g1h->g1_policy()->record_mark_closure_time(0.0); | |
3044 return; | |
3045 } | |
3046 | |
3047 int i = 1; | |
3048 double start = os::elapsedTime(); | |
3049 while (true) { | |
3050 i++; | |
3051 CompleteMarkingInCSHRClosure cmplt(this); | |
3052 g1h->collection_set_iterate(&cmplt); | |
3053 if (cmplt.completed()) break; | |
3054 } | |
3055 double end_time = os::elapsedTime(); | |
3056 double elapsed_time_ms = (end_time - start) * 1000.0; | |
3057 g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms); | |
3058 | |
3059 ClearMarksInHRClosure clr(nextMarkBitMap()); | |
3060 g1h->collection_set_iterate(&clr); | |
3061 } | |
3062 | |
3063 // The next two methods deal with the following optimisation. Some | |
3064 // objects are gray by being marked and located above the finger. If | |
3065 // they are copied, during an evacuation pause, below the finger then | |
3066 // the need to be pushed on the stack. The observation is that, if | |
3067 // there are no regions in the collection set located above the | |
3068 // finger, then the above cannot happen, hence we do not need to | |
3069 // explicitly gray any objects when copying them to below the | |
3070 // finger. The global stack will be scanned to ensure that, if it | |
3071 // points to objects being copied, it will update their | |
3072 // location. There is a tricky situation with the gray objects in | |
3073 // region stack that are being coped, however. See the comment in | |
3074 // newCSet(). | |
3075 | |
3076 void ConcurrentMark::newCSet() { | |
3077 if (!concurrent_marking_in_progress()) | |
3078 // nothing to do if marking is not in progress | |
3079 return; | |
3080 | |
3081 // find what the lowest finger is among the global and local fingers | |
3082 _min_finger = _finger; | |
3083 for (int i = 0; i < (int)_max_task_num; ++i) { | |
3084 CMTask* task = _tasks[i]; | |
3085 HeapWord* task_finger = task->finger(); | |
3086 if (task_finger != NULL && task_finger < _min_finger) | |
3087 _min_finger = task_finger; | |
3088 } | |
3089 | |
3090 _should_gray_objects = false; | |
3091 | |
3092 // This fixes a very subtle and fustrating bug. It might be the case | |
3093 // that, during en evacuation pause, heap regions that contain | |
3094 // objects that are gray (by being in regions contained in the | |
3095 // region stack) are included in the collection set. Since such gray | |
3096 // objects will be moved, and because it's not easy to redirect | |
3097 // region stack entries to point to a new location (because objects | |
3098 // in one region might be scattered to multiple regions after they | |
3099 // are copied), one option is to ensure that all marked objects | |
3100 // copied during a pause are pushed on the stack. Notice, however, | |
3101 // that this problem can only happen when the region stack is not | |
3102 // empty during an evacuation pause. So, we make the fix a bit less | |
3103 // conservative and ensure that regions are pushed on the stack, | |
3104 // irrespective whether all collection set regions are below the | |
3105 // finger, if the region stack is not empty. This is expected to be | |
3106 // a rare case, so I don't think it's necessary to be smarted about it. | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3107 if (!region_stack_empty() || has_aborted_regions()) |
342 | 3108 _should_gray_objects = true; |
3109 } | |
3110 | |
3111 void ConcurrentMark::registerCSetRegion(HeapRegion* hr) { | |
3112 if (!concurrent_marking_in_progress()) | |
3113 return; | |
3114 | |
3115 HeapWord* region_end = hr->end(); | |
3116 if (region_end > _min_finger) | |
3117 _should_gray_objects = true; | |
3118 } | |
3119 | |
3378
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3120 // Resets the region fields of active CMTasks whose values point |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3121 // into the collection set. |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3122 void ConcurrentMark::reset_active_task_region_fields_in_cset() { |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3123 assert(SafepointSynchronize::is_at_safepoint(), "should be in STW"); |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3124 assert(parallel_marking_threads() <= _max_task_num, "sanity"); |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3125 |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3126 for (int i = 0; i < (int)parallel_marking_threads(); i += 1) { |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3127 CMTask* task = _tasks[i]; |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3128 HeapWord* task_finger = task->finger(); |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3129 if (task_finger != NULL) { |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3130 assert(_g1h->is_in_g1_reserved(task_finger), "not in heap"); |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3131 HeapRegion* finger_region = _g1h->heap_region_containing(task_finger); |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3132 if (finger_region->in_collection_set()) { |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3133 // The task's current region is in the collection set. |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3134 // This region will be evacuated in the current GC and |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3135 // the region fields in the task will be stale. |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3136 task->giveup_current_region(); |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3137 } |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3138 } |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3139 } |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3140 } |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3317
diff
changeset
|
3141 |
342 | 3142 // abandon current marking iteration due to a Full GC |
3143 void ConcurrentMark::abort() { | |
3144 // Clear all marks to force marking thread to do nothing | |
3145 _nextMarkBitMap->clearAll(); | |
3146 // Empty mark stack | |
3147 clear_marking_state(); | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3148 for (int i = 0; i < (int)_max_task_num; ++i) { |
342 | 3149 _tasks[i]->clear_region_fields(); |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3150 } |
342 | 3151 _has_aborted = true; |
3152 | |
3153 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); | |
3154 satb_mq_set.abandon_partial_marking(); | |
1317
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1314
diff
changeset
|
3155 // This can be called either during or outside marking, we'll read |
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1314
diff
changeset
|
3156 // the expected_active value from the SATB queue set. |
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1314
diff
changeset
|
3157 satb_mq_set.set_active_all_threads( |
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1314
diff
changeset
|
3158 false, /* new active value */ |
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1314
diff
changeset
|
3159 satb_mq_set.is_active() /* expected_active */); |
342 | 3160 } |
3161 | |
3162 static void print_ms_time_info(const char* prefix, const char* name, | |
3163 NumberSeq& ns) { | |
3164 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", | |
3165 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); | |
3166 if (ns.num() > 0) { | |
3167 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", | |
3168 prefix, ns.sd(), ns.maximum()); | |
3169 } | |
3170 } | |
3171 | |
3172 void ConcurrentMark::print_summary_info() { | |
3173 gclog_or_tty->print_cr(" Concurrent marking:"); | |
3174 print_ms_time_info(" ", "init marks", _init_times); | |
3175 print_ms_time_info(" ", "remarks", _remark_times); | |
3176 { | |
3177 print_ms_time_info(" ", "final marks", _remark_mark_times); | |
3178 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); | |
3179 | |
3180 } | |
3181 print_ms_time_info(" ", "cleanups", _cleanup_times); | |
3182 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", | |
3183 _total_counting_time, | |
3184 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / | |
3185 (double)_cleanup_times.num() | |
3186 : 0.0)); | |
3187 if (G1ScrubRemSets) { | |
3188 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", | |
3189 _total_rs_scrub_time, | |
3190 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / | |
3191 (double)_cleanup_times.num() | |
3192 : 0.0)); | |
3193 } | |
3194 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", | |
3195 (_init_times.sum() + _remark_times.sum() + | |
3196 _cleanup_times.sum())/1000.0); | |
3197 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " | |
3198 "(%8.2f s marking, %8.2f s counting).", | |
3199 cmThread()->vtime_accum(), | |
3200 cmThread()->vtime_mark_accum(), | |
3201 cmThread()->vtime_count_accum()); | |
3202 } | |
3203 | |
1019 | 3204 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { |
3205 _parallel_workers->print_worker_threads_on(st); | |
3206 } | |
3207 | |
342 | 3208 // Closures |
3209 // XXX: there seems to be a lot of code duplication here; | |
3210 // should refactor and consolidate the shared code. | |
3211 | |
3212 // This closure is used to mark refs into the CMS generation in | |
3213 // the CMS bit map. Called at the first checkpoint. | |
3214 | |
3215 // We take a break if someone is trying to stop the world. | |
3216 bool ConcurrentMark::do_yield_check(int worker_i) { | |
3217 if (should_yield()) { | |
3218 if (worker_i == 0) | |
3219 _g1h->g1_policy()->record_concurrent_pause(); | |
3220 cmThread()->yield(); | |
3221 if (worker_i == 0) | |
3222 _g1h->g1_policy()->record_concurrent_pause_end(); | |
3223 return true; | |
3224 } else { | |
3225 return false; | |
3226 } | |
3227 } | |
3228 | |
3229 bool ConcurrentMark::should_yield() { | |
3230 return cmThread()->should_yield(); | |
3231 } | |
3232 | |
3233 bool ConcurrentMark::containing_card_is_marked(void* p) { | |
3234 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1); | |
3235 return _card_bm.at(offset >> CardTableModRefBS::card_shift); | |
3236 } | |
3237 | |
3238 bool ConcurrentMark::containing_cards_are_marked(void* start, | |
3239 void* last) { | |
3240 return | |
3241 containing_card_is_marked(start) && | |
3242 containing_card_is_marked(last); | |
3243 } | |
3244 | |
3245 #ifndef PRODUCT | |
3246 // for debugging purposes | |
3247 void ConcurrentMark::print_finger() { | |
3248 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, | |
3249 _heap_start, _heap_end, _finger); | |
3250 for (int i = 0; i < (int) _max_task_num; ++i) { | |
3251 gclog_or_tty->print(" %d: "PTR_FORMAT, i, _tasks[i]->finger()); | |
3252 } | |
3253 gclog_or_tty->print_cr(""); | |
3254 } | |
3255 #endif | |
3256 | |
3771 | 3257 void CMTask::scan_object(oop obj) { |
3258 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); | |
3259 | |
3260 if (_cm->verbose_high()) { | |
3261 gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT, | |
3262 _task_id, (void*) obj); | |
3263 } | |
3264 | |
3265 size_t obj_size = obj->size(); | |
3266 _words_scanned += obj_size; | |
3267 | |
3268 obj->oop_iterate(_cm_oop_closure); | |
3269 statsOnly( ++_objs_scanned ); | |
3270 check_limits(); | |
3271 } | |
3272 | |
342 | 3273 // Closure for iteration over bitmaps |
3274 class CMBitMapClosure : public BitMapClosure { | |
3275 private: | |
3276 // the bitmap that is being iterated over | |
3277 CMBitMap* _nextMarkBitMap; | |
3278 ConcurrentMark* _cm; | |
3279 CMTask* _task; | |
3280 // true if we're scanning a heap region claimed by the task (so that | |
3281 // we move the finger along), false if we're not, i.e. currently when | |
3282 // scanning a heap region popped from the region stack (so that we | |
3283 // do not move the task finger along; it'd be a mistake if we did so). | |
3284 bool _scanning_heap_region; | |
3285 | |
3286 public: | |
3287 CMBitMapClosure(CMTask *task, | |
3288 ConcurrentMark* cm, | |
3289 CMBitMap* nextMarkBitMap) | |
3290 : _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } | |
3291 | |
3292 void set_scanning_heap_region(bool scanning_heap_region) { | |
3293 _scanning_heap_region = scanning_heap_region; | |
3294 } | |
3295 | |
3296 bool do_bit(size_t offset) { | |
3297 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3298 assert(_nextMarkBitMap->isMarked(addr), "invariant"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3299 assert( addr < _cm->finger(), "invariant"); |
342 | 3300 |
3301 if (_scanning_heap_region) { | |
3302 statsOnly( _task->increase_objs_found_on_bitmap() ); | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3303 assert(addr >= _task->finger(), "invariant"); |
342 | 3304 // We move that task's local finger along. |
3305 _task->move_finger_to(addr); | |
3306 } else { | |
3307 // We move the task's region finger along. | |
3308 _task->move_region_finger_to(addr); | |
3309 } | |
3310 | |
3311 _task->scan_object(oop(addr)); | |
3312 // we only partially drain the local queue and global stack | |
3313 _task->drain_local_queue(true); | |
3314 _task->drain_global_stack(true); | |
3315 | |
3316 // if the has_aborted flag has been raised, we need to bail out of | |
3317 // the iteration | |
3318 return !_task->has_aborted(); | |
3319 } | |
3320 }; | |
3321 | |
3322 // Closure for iterating over objects, currently only used for | |
3323 // processing SATB buffers. | |
3324 class CMObjectClosure : public ObjectClosure { | |
3325 private: | |
3326 CMTask* _task; | |
3327 | |
3328 public: | |
3329 void do_object(oop obj) { | |
3330 _task->deal_with_reference(obj); | |
3331 } | |
3332 | |
3333 CMObjectClosure(CMTask* task) : _task(task) { } | |
3334 }; | |
3335 | |
3771 | 3336 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, |
3337 ConcurrentMark* cm, | |
3338 CMTask* task) | |
3339 : _g1h(g1h), _cm(cm), _task(task) { | |
3340 assert(_ref_processor == NULL, "should be initialized to NULL"); | |
3341 | |
3342 if (G1UseConcMarkReferenceProcessing) { | |
3343 _ref_processor = g1h->ref_processor(); | |
3344 assert(_ref_processor != NULL, "should not be NULL"); | |
342 | 3345 } |
3771 | 3346 } |
342 | 3347 |
3348 void CMTask::setup_for_region(HeapRegion* hr) { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3349 // Separated the asserts so that we know which one fires. |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3350 assert(hr != NULL, |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3351 "claim_region() should have filtered out continues humongous regions"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3352 assert(!hr->continuesHumongous(), |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3353 "claim_region() should have filtered out continues humongous regions"); |
342 | 3354 |
3355 if (_cm->verbose_low()) | |
3356 gclog_or_tty->print_cr("[%d] setting up for region "PTR_FORMAT, | |
3357 _task_id, hr); | |
3358 | |
3359 _curr_region = hr; | |
3360 _finger = hr->bottom(); | |
3361 update_region_limit(); | |
3362 } | |
3363 | |
3364 void CMTask::update_region_limit() { | |
3365 HeapRegion* hr = _curr_region; | |
3366 HeapWord* bottom = hr->bottom(); | |
3367 HeapWord* limit = hr->next_top_at_mark_start(); | |
3368 | |
3369 if (limit == bottom) { | |
3370 if (_cm->verbose_low()) | |
3371 gclog_or_tty->print_cr("[%d] found an empty region " | |
3372 "["PTR_FORMAT", "PTR_FORMAT")", | |
3373 _task_id, bottom, limit); | |
3374 // The region was collected underneath our feet. | |
3375 // We set the finger to bottom to ensure that the bitmap | |
3376 // iteration that will follow this will not do anything. | |
3377 // (this is not a condition that holds when we set the region up, | |
3378 // as the region is not supposed to be empty in the first place) | |
3379 _finger = bottom; | |
3380 } else if (limit >= _region_limit) { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3381 assert(limit >= _finger, "peace of mind"); |
342 | 3382 } else { |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3383 assert(limit < _region_limit, "only way to get here"); |
342 | 3384 // This can happen under some pretty unusual circumstances. An |
3385 // evacuation pause empties the region underneath our feet (NTAMS | |
3386 // at bottom). We then do some allocation in the region (NTAMS | |
3387 // stays at bottom), followed by the region being used as a GC | |
3388 // alloc region (NTAMS will move to top() and the objects | |
3389 // originally below it will be grayed). All objects now marked in | |
3390 // the region are explicitly grayed, if below the global finger, | |
3391 // and we do not need in fact to scan anything else. So, we simply | |
3392 // set _finger to be limit to ensure that the bitmap iteration | |
3393 // doesn't do anything. | |
3394 _finger = limit; | |
3395 } | |
3396 | |
3397 _region_limit = limit; | |
3398 } | |
3399 | |
3400 void CMTask::giveup_current_region() { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3401 assert(_curr_region != NULL, "invariant"); |
342 | 3402 if (_cm->verbose_low()) |
3403 gclog_or_tty->print_cr("[%d] giving up region "PTR_FORMAT, | |
3404 _task_id, _curr_region); | |
3405 clear_region_fields(); | |
3406 } | |
3407 | |
3408 void CMTask::clear_region_fields() { | |
3409 // Values for these three fields that indicate that we're not | |
3410 // holding on to a region. | |
3411 _curr_region = NULL; | |
3412 _finger = NULL; | |
3413 _region_limit = NULL; | |
3414 | |
3415 _region_finger = NULL; | |
3416 } | |
3417 | |
3771 | 3418 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { |
3419 if (cm_oop_closure == NULL) { | |
3420 assert(_cm_oop_closure != NULL, "invariant"); | |
3421 } else { | |
3422 assert(_cm_oop_closure == NULL, "invariant"); | |
3423 } | |
3424 _cm_oop_closure = cm_oop_closure; | |
3425 } | |
3426 | |
342 | 3427 void CMTask::reset(CMBitMap* nextMarkBitMap) { |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3428 guarantee(nextMarkBitMap != NULL, "invariant"); |
342 | 3429 |
3430 if (_cm->verbose_low()) | |
3431 gclog_or_tty->print_cr("[%d] resetting", _task_id); | |
3432 | |
3433 _nextMarkBitMap = nextMarkBitMap; | |
3434 clear_region_fields(); | |
1885
a5c514e74487
6988458: G1: assert(mr.end() <= _cm->finger()) failed: otherwise the region shouldn't be on the stack
johnc
parents:
1835
diff
changeset
|
3435 assert(_aborted_region.is_empty(), "should have been cleared"); |
342 | 3436 |
3437 _calls = 0; | |
3438 _elapsed_time_ms = 0.0; | |
3439 _termination_time_ms = 0.0; | |
3440 _termination_start_time_ms = 0.0; | |
3441 | |
3442 #if _MARKING_STATS_ | |
3443 _local_pushes = 0; | |
3444 _local_pops = 0; | |
3445 _local_max_size = 0; | |
3446 _objs_scanned = 0; | |
3447 _global_pushes = 0; | |
3448 _global_pops = 0; | |
3449 _global_max_size = 0; | |
3450 _global_transfers_to = 0; | |
3451 _global_transfers_from = 0; | |
3452 _region_stack_pops = 0; | |
3453 _regions_claimed = 0; | |
3454 _objs_found_on_bitmap = 0; | |
3455 _satb_buffers_processed = 0; | |
3456 _steal_attempts = 0; | |
3457 _steals = 0; | |
3458 _aborted = 0; | |
3459 _aborted_overflow = 0; | |
3460 _aborted_cm_aborted = 0; | |
3461 _aborted_yield = 0; | |
3462 _aborted_timed_out = 0; | |
3463 _aborted_satb = 0; | |
3464 _aborted_termination = 0; | |
3465 #endif // _MARKING_STATS_ | |
3466 } | |
3467 | |
3468 bool CMTask::should_exit_termination() { | |
3469 regular_clock_call(); | |
3470 // This is called when we are in the termination protocol. We should | |
3471 // quit if, for some reason, this task wants to abort or the global | |
3472 // stack is not empty (this means that we can get work from it). | |
3473 return !_cm->mark_stack_empty() || has_aborted(); | |
3474 } | |
3475 | |
3476 void CMTask::reached_limit() { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3477 assert(_words_scanned >= _words_scanned_limit || |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3478 _refs_reached >= _refs_reached_limit , |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3479 "shouldn't have been called otherwise"); |
342 | 3480 regular_clock_call(); |
3481 } | |
3482 | |
3483 void CMTask::regular_clock_call() { | |
3484 if (has_aborted()) | |
3485 return; | |
3486 | |
3487 // First, we need to recalculate the words scanned and refs reached | |
3488 // limits for the next clock call. | |
3489 recalculate_limits(); | |
3490 | |
3491 // During the regular clock call we do the following | |
3492 | |
3493 // (1) If an overflow has been flagged, then we abort. | |
3494 if (_cm->has_overflown()) { | |
3495 set_has_aborted(); | |
3496 return; | |
3497 } | |
3498 | |
3499 // If we are not concurrent (i.e. we're doing remark) we don't need | |
3500 // to check anything else. The other steps are only needed during | |
3501 // the concurrent marking phase. | |
3502 if (!concurrent()) | |
3503 return; | |
3504 | |
3505 // (2) If marking has been aborted for Full GC, then we also abort. | |
3506 if (_cm->has_aborted()) { | |
3507 set_has_aborted(); | |
3508 statsOnly( ++_aborted_cm_aborted ); | |
3509 return; | |
3510 } | |
3511 | |
3512 double curr_time_ms = os::elapsedVTime() * 1000.0; | |
3513 | |
3514 // (3) If marking stats are enabled, then we update the step history. | |
3515 #if _MARKING_STATS_ | |
3516 if (_words_scanned >= _words_scanned_limit) | |
3517 ++_clock_due_to_scanning; | |
3518 if (_refs_reached >= _refs_reached_limit) | |
3519 ++_clock_due_to_marking; | |
3520 | |
3521 double last_interval_ms = curr_time_ms - _interval_start_time_ms; | |
3522 _interval_start_time_ms = curr_time_ms; | |
3523 _all_clock_intervals_ms.add(last_interval_ms); | |
3524 | |
3525 if (_cm->verbose_medium()) { | |
3526 gclog_or_tty->print_cr("[%d] regular clock, interval = %1.2lfms, " | |
3527 "scanned = %d%s, refs reached = %d%s", | |
3528 _task_id, last_interval_ms, | |
3529 _words_scanned, | |
3530 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", | |
3531 _refs_reached, | |
3532 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); | |
3533 } | |
3534 #endif // _MARKING_STATS_ | |
3535 | |
3536 // (4) We check whether we should yield. If we have to, then we abort. | |
3537 if (_cm->should_yield()) { | |
3538 // We should yield. To do this we abort the task. The caller is | |
3539 // responsible for yielding. | |
3540 set_has_aborted(); | |
3541 statsOnly( ++_aborted_yield ); | |
3542 return; | |
3543 } | |
3544 | |
3545 // (5) We check whether we've reached our time quota. If we have, | |
3546 // then we abort. | |
3547 double elapsed_time_ms = curr_time_ms - _start_time_ms; | |
3548 if (elapsed_time_ms > _time_target_ms) { | |
3549 set_has_aborted(); | |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
3550 _has_timed_out = true; |
342 | 3551 statsOnly( ++_aborted_timed_out ); |
3552 return; | |
3553 } | |
3554 | |
3555 // (6) Finally, we check whether there are enough completed STAB | |
3556 // buffers available for processing. If there are, we abort. | |
3557 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); | |
3558 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { | |
3559 if (_cm->verbose_low()) | |
3560 gclog_or_tty->print_cr("[%d] aborting to deal with pending SATB buffers", | |
3561 _task_id); | |
3562 // we do need to process SATB buffers, we'll abort and restart | |
3563 // the marking task to do so | |
3564 set_has_aborted(); | |
3565 statsOnly( ++_aborted_satb ); | |
3566 return; | |
3567 } | |
3568 } | |
3569 | |
3570 void CMTask::recalculate_limits() { | |
3571 _real_words_scanned_limit = _words_scanned + words_scanned_period; | |
3572 _words_scanned_limit = _real_words_scanned_limit; | |
3573 | |
3574 _real_refs_reached_limit = _refs_reached + refs_reached_period; | |
3575 _refs_reached_limit = _real_refs_reached_limit; | |
3576 } | |
3577 | |
3578 void CMTask::decrease_limits() { | |
3579 // This is called when we believe that we're going to do an infrequent | |
3580 // operation which will increase the per byte scanned cost (i.e. move | |
3581 // entries to/from the global stack). It basically tries to decrease the | |
3582 // scanning limit so that the clock is called earlier. | |
3583 | |
3584 if (_cm->verbose_medium()) | |
3585 gclog_or_tty->print_cr("[%d] decreasing limits", _task_id); | |
3586 | |
3587 _words_scanned_limit = _real_words_scanned_limit - | |
3588 3 * words_scanned_period / 4; | |
3589 _refs_reached_limit = _real_refs_reached_limit - | |
3590 3 * refs_reached_period / 4; | |
3591 } | |
3592 | |
3593 void CMTask::move_entries_to_global_stack() { | |
3594 // local array where we'll store the entries that will be popped | |
3595 // from the local queue | |
3596 oop buffer[global_stack_transfer_size]; | |
3597 | |
3598 int n = 0; | |
3599 oop obj; | |
3600 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { | |
3601 buffer[n] = obj; | |
3602 ++n; | |
3603 } | |
3604 | |
3605 if (n > 0) { | |
3606 // we popped at least one entry from the local queue | |
3607 | |
3608 statsOnly( ++_global_transfers_to; _local_pops += n ); | |
3609 | |
3610 if (!_cm->mark_stack_push(buffer, n)) { | |
3611 if (_cm->verbose_low()) | |
3612 gclog_or_tty->print_cr("[%d] aborting due to global stack overflow", _task_id); | |
3613 set_has_aborted(); | |
3614 } else { | |
3615 // the transfer was successful | |
3616 | |
3617 if (_cm->verbose_medium()) | |
3618 gclog_or_tty->print_cr("[%d] pushed %d entries to the global stack", | |
3619 _task_id, n); | |
3620 statsOnly( int tmp_size = _cm->mark_stack_size(); | |
3621 if (tmp_size > _global_max_size) | |
3622 _global_max_size = tmp_size; | |
3623 _global_pushes += n ); | |
3624 } | |
3625 } | |
3626 | |
3627 // this operation was quite expensive, so decrease the limits | |
3628 decrease_limits(); | |
3629 } | |
3630 | |
3631 void CMTask::get_entries_from_global_stack() { | |
3632 // local array where we'll store the entries that will be popped | |
3633 // from the global stack. | |
3634 oop buffer[global_stack_transfer_size]; | |
3635 int n; | |
3636 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3637 assert(n <= global_stack_transfer_size, |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3638 "we should not pop more than the given limit"); |
342 | 3639 if (n > 0) { |
3640 // yes, we did actually pop at least one entry | |
3641 | |
3642 statsOnly( ++_global_transfers_from; _global_pops += n ); | |
3643 if (_cm->verbose_medium()) | |
3644 gclog_or_tty->print_cr("[%d] popped %d entries from the global stack", | |
3645 _task_id, n); | |
3646 for (int i = 0; i < n; ++i) { | |
3647 bool success = _task_queue->push(buffer[i]); | |
3648 // We only call this when the local queue is empty or under a | |
3649 // given target limit. So, we do not expect this push to fail. | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3650 assert(success, "invariant"); |
342 | 3651 } |
3652 | |
3653 statsOnly( int tmp_size = _task_queue->size(); | |
3654 if (tmp_size > _local_max_size) | |
3655 _local_max_size = tmp_size; | |
3656 _local_pushes += n ); | |
3657 } | |
3658 | |
3659 // this operation was quite expensive, so decrease the limits | |
3660 decrease_limits(); | |
3661 } | |
3662 | |
3663 void CMTask::drain_local_queue(bool partially) { | |
3664 if (has_aborted()) | |
3665 return; | |
3666 | |
3667 // Decide what the target size is, depending whether we're going to | |
3668 // drain it partially (so that other tasks can steal if they run out | |
3669 // of things to do) or totally (at the very end). | |
3670 size_t target_size; | |
3671 if (partially) | |
3672 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); | |
3673 else | |
3674 target_size = 0; | |
3675 | |
3676 if (_task_queue->size() > target_size) { | |
3677 if (_cm->verbose_high()) | |
3678 gclog_or_tty->print_cr("[%d] draining local queue, target size = %d", | |
3679 _task_id, target_size); | |
3680 | |
3681 oop obj; | |
3682 bool ret = _task_queue->pop_local(obj); | |
3683 while (ret) { | |
3684 statsOnly( ++_local_pops ); | |
3685 | |
3686 if (_cm->verbose_high()) | |
3687 gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id, | |
3688 (void*) obj); | |
3689 | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3690 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); |
2361 | 3691 assert(!_g1h->is_on_master_free_list( |
2152 | 3692 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); |
342 | 3693 |
3694 scan_object(obj); | |
3695 | |
3696 if (_task_queue->size() <= target_size || has_aborted()) | |
3697 ret = false; | |
3698 else | |
3699 ret = _task_queue->pop_local(obj); | |
3700 } | |
3701 | |
3702 if (_cm->verbose_high()) | |
3703 gclog_or_tty->print_cr("[%d] drained local queue, size = %d", | |
3704 _task_id, _task_queue->size()); | |
3705 } | |
3706 } | |
3707 | |
3708 void CMTask::drain_global_stack(bool partially) { | |
3709 if (has_aborted()) | |
3710 return; | |
3711 | |
3712 // We have a policy to drain the local queue before we attempt to | |
3713 // drain the global stack. | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3714 assert(partially || _task_queue->size() == 0, "invariant"); |
342 | 3715 |
3716 // Decide what the target size is, depending whether we're going to | |
3717 // drain it partially (so that other tasks can steal if they run out | |
3718 // of things to do) or totally (at the very end). Notice that, | |
3719 // because we move entries from the global stack in chunks or | |
3720 // because another task might be doing the same, we might in fact | |
3721 // drop below the target. But, this is not a problem. | |
3722 size_t target_size; | |
3723 if (partially) | |
3724 target_size = _cm->partial_mark_stack_size_target(); | |
3725 else | |
3726 target_size = 0; | |
3727 | |
3728 if (_cm->mark_stack_size() > target_size) { | |
3729 if (_cm->verbose_low()) | |
3730 gclog_or_tty->print_cr("[%d] draining global_stack, target size %d", | |
3731 _task_id, target_size); | |
3732 | |
3733 while (!has_aborted() && _cm->mark_stack_size() > target_size) { | |
3734 get_entries_from_global_stack(); | |
3735 drain_local_queue(partially); | |
3736 } | |
3737 | |
3738 if (_cm->verbose_low()) | |
3739 gclog_or_tty->print_cr("[%d] drained global stack, size = %d", | |
3740 _task_id, _cm->mark_stack_size()); | |
3741 } | |
3742 } | |
3743 | |
3744 // SATB Queue has several assumptions on whether to call the par or | |
3745 // non-par versions of the methods. this is why some of the code is | |
3746 // replicated. We should really get rid of the single-threaded version | |
3747 // of the code to simplify things. | |
3748 void CMTask::drain_satb_buffers() { | |
3749 if (has_aborted()) | |
3750 return; | |
3751 | |
3752 // We set this so that the regular clock knows that we're in the | |
3753 // middle of draining buffers and doesn't set the abort flag when it | |
3754 // notices that SATB buffers are available for draining. It'd be | |
3755 // very counter productive if it did that. :-) | |
3756 _draining_satb_buffers = true; | |
3757 | |
3758 CMObjectClosure oc(this); | |
3759 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
3760 if (G1CollectedHeap::use_parallel_gc_threads()) |
342 | 3761 satb_mq_set.set_par_closure(_task_id, &oc); |
3762 else | |
3763 satb_mq_set.set_closure(&oc); | |
3764 | |
3765 // This keeps claiming and applying the closure to completed buffers | |
3766 // until we run out of buffers or we need to abort. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
3767 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 3768 while (!has_aborted() && |
3769 satb_mq_set.par_apply_closure_to_completed_buffer(_task_id)) { | |
3770 if (_cm->verbose_medium()) | |
3771 gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id); | |
3772 statsOnly( ++_satb_buffers_processed ); | |
3773 regular_clock_call(); | |
3774 } | |
3775 } else { | |
3776 while (!has_aborted() && | |
3777 satb_mq_set.apply_closure_to_completed_buffer()) { | |
3778 if (_cm->verbose_medium()) | |
3779 gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id); | |
3780 statsOnly( ++_satb_buffers_processed ); | |
3781 regular_clock_call(); | |
3782 } | |
3783 } | |
3784 | |
3785 if (!concurrent() && !has_aborted()) { | |
3786 // We should only do this during remark. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
3787 if (G1CollectedHeap::use_parallel_gc_threads()) |
342 | 3788 satb_mq_set.par_iterate_closure_all_threads(_task_id); |
3789 else | |
3790 satb_mq_set.iterate_closure_all_threads(); | |
3791 } | |
3792 | |
3793 _draining_satb_buffers = false; | |
3794 | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3795 assert(has_aborted() || |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3796 concurrent() || |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3797 satb_mq_set.completed_buffers_num() == 0, "invariant"); |
342 | 3798 |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1719
diff
changeset
|
3799 if (G1CollectedHeap::use_parallel_gc_threads()) |
342 | 3800 satb_mq_set.set_par_closure(_task_id, NULL); |
3801 else | |
3802 satb_mq_set.set_closure(NULL); | |
3803 | |
3804 // again, this was a potentially expensive operation, decrease the | |
3805 // limits to get the regular clock call early | |
3806 decrease_limits(); | |
3807 } | |
3808 | |
3809 void CMTask::drain_region_stack(BitMapClosure* bc) { | |
3810 if (has_aborted()) | |
3811 return; | |
3812 | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3813 assert(_region_finger == NULL, |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3814 "it should be NULL when we're not scanning a region"); |
342 | 3815 |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3816 if (!_cm->region_stack_empty() || !_aborted_region.is_empty()) { |
342 | 3817 if (_cm->verbose_low()) |
3818 gclog_or_tty->print_cr("[%d] draining region stack, size = %d", | |
3819 _task_id, _cm->region_stack_size()); | |
3820 | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3821 MemRegion mr; |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3822 |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3823 if (!_aborted_region.is_empty()) { |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3824 mr = _aborted_region; |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3825 _aborted_region = MemRegion(); |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3826 |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3827 if (_cm->verbose_low()) |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3828 gclog_or_tty->print_cr("[%d] scanning aborted region [ " PTR_FORMAT ", " PTR_FORMAT " )", |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3829 _task_id, mr.start(), mr.end()); |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3830 } else { |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3831 mr = _cm->region_stack_pop_lock_free(); |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3832 // it returns MemRegion() if the pop fails |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3833 statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3834 } |
342 | 3835 |
3836 while (mr.start() != NULL) { | |
3837 if (_cm->verbose_medium()) | |
3838 gclog_or_tty->print_cr("[%d] we are scanning region " | |
3839 "["PTR_FORMAT", "PTR_FORMAT")", | |
3840 _task_id, mr.start(), mr.end()); | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3841 |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3842 assert(mr.end() <= _cm->finger(), |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3843 "otherwise the region shouldn't be on the stack"); |
342 | 3844 assert(!mr.is_empty(), "Only non-empty regions live on the region stack"); |
3845 if (_nextMarkBitMap->iterate(bc, mr)) { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3846 assert(!has_aborted(), |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3847 "cannot abort the task without aborting the bitmap iteration"); |
342 | 3848 |
3849 // We finished iterating over the region without aborting. | |
3850 regular_clock_call(); | |
3851 if (has_aborted()) | |
3852 mr = MemRegion(); | |
3853 else { | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3854 mr = _cm->region_stack_pop_lock_free(); |
342 | 3855 // it returns MemRegion() if the pop fails |
3856 statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); | |
3857 } | |
3858 } else { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3859 assert(has_aborted(), "currently the only way to do so"); |
342 | 3860 |
3861 // The only way to abort the bitmap iteration is to return | |
3862 // false from the do_bit() method. However, inside the | |
3863 // do_bit() method we move the _region_finger to point to the | |
3864 // object currently being looked at. So, if we bail out, we | |
3865 // have definitely set _region_finger to something non-null. | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
3866 assert(_region_finger != NULL, "invariant"); |
342 | 3867 |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3868 // Make sure that any previously aborted region has been |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3869 // cleared. |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3870 assert(_aborted_region.is_empty(), "aborted region not cleared"); |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3871 |
342 | 3872 // The iteration was actually aborted. So now _region_finger |
3873 // points to the address of the object we last scanned. If we | |
3874 // leave it there, when we restart this task, we will rescan | |
3875 // the object. It is easy to avoid this. We move the finger by | |
3876 // enough to point to the next possible object header (the | |
3877 // bitmap knows by how much we need to move it as it knows its | |
3878 // granularity). | |
3879 MemRegion newRegion = | |
3880 MemRegion(_nextMarkBitMap->nextWord(_region_finger), mr.end()); | |
3881 | |
3882 if (!newRegion.is_empty()) { | |
3883 if (_cm->verbose_low()) { | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3884 gclog_or_tty->print_cr("[%d] recording unscanned region" |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3885 "[" PTR_FORMAT "," PTR_FORMAT ") in CMTask", |
342 | 3886 _task_id, |
3887 newRegion.start(), newRegion.end()); | |
3888 } | |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3889 // Now record the part of the region we didn't scan to |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3890 // make sure this task scans it later. |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
3891 _aborted_region = newRegion; |
342 | 3892 } |
3893 // break from while | |
3894 mr = MemRegion(); | |
3895 } | |
3896 _region_finger = NULL; | |
3897 } | |
3898 | |
3899 if (_cm->verbose_low()) | |
3900 gclog_or_tty->print_cr("[%d] drained region stack, size = %d", | |
3901 _task_id, _cm->region_stack_size()); | |
3902 } | |
3903 } | |
3904 | |
3905 void CMTask::print_stats() { | |
3906 gclog_or_tty->print_cr("Marking Stats, task = %d, calls = %d", | |
3907 _task_id, _calls); | |
3908 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", | |
3909 _elapsed_time_ms, _termination_time_ms); | |
3910 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", | |
3911 _step_times_ms.num(), _step_times_ms.avg(), | |
3912 _step_times_ms.sd()); | |
3913 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", | |
3914 _step_times_ms.maximum(), _step_times_ms.sum()); | |
3915 | |
3916 #if _MARKING_STATS_ | |
3917 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", | |
3918 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), | |
3919 _all_clock_intervals_ms.sd()); | |
3920 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", | |
3921 _all_clock_intervals_ms.maximum(), | |
3922 _all_clock_intervals_ms.sum()); | |
3923 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", | |
3924 _clock_due_to_scanning, _clock_due_to_marking); | |
3925 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", | |
3926 _objs_scanned, _objs_found_on_bitmap); | |
3927 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", | |
3928 _local_pushes, _local_pops, _local_max_size); | |
3929 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", | |
3930 _global_pushes, _global_pops, _global_max_size); | |
3931 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", | |
3932 _global_transfers_to,_global_transfers_from); | |
3933 gclog_or_tty->print_cr(" Regions: claimed = %d, Region Stack: pops = %d", | |
3934 _regions_claimed, _region_stack_pops); | |
3935 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); | |
3936 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", | |
3937 _steal_attempts, _steals); | |
3938 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); | |
3939 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", | |
3940 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); | |
3941 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", | |
3942 _aborted_timed_out, _aborted_satb, _aborted_termination); | |
3943 #endif // _MARKING_STATS_ | |
3944 } | |
3945 | |
3946 /***************************************************************************** | |
3947 | |
3948 The do_marking_step(time_target_ms) method is the building block | |
3949 of the parallel marking framework. It can be called in parallel | |
3950 with other invocations of do_marking_step() on different tasks | |
3951 (but only one per task, obviously) and concurrently with the | |
3952 mutator threads, or during remark, hence it eliminates the need | |
3953 for two versions of the code. When called during remark, it will | |
3954 pick up from where the task left off during the concurrent marking | |
3955 phase. Interestingly, tasks are also claimable during evacuation | |
3956 pauses too, since do_marking_step() ensures that it aborts before | |
3957 it needs to yield. | |
3958 | |
3959 The data structures that is uses to do marking work are the | |
3960 following: | |
3961 | |
3962 (1) Marking Bitmap. If there are gray objects that appear only | |
3963 on the bitmap (this happens either when dealing with an overflow | |
3964 or when the initial marking phase has simply marked the roots | |
3965 and didn't push them on the stack), then tasks claim heap | |
3966 regions whose bitmap they then scan to find gray objects. A | |
3967 global finger indicates where the end of the last claimed region | |
3968 is. A local finger indicates how far into the region a task has | |
3969 scanned. The two fingers are used to determine how to gray an | |
3970 object (i.e. whether simply marking it is OK, as it will be | |
3971 visited by a task in the future, or whether it needs to be also | |
3972 pushed on a stack). | |
3973 | |
3974 (2) Local Queue. The local queue of the task which is accessed | |
3975 reasonably efficiently by the task. Other tasks can steal from | |
3976 it when they run out of work. Throughout the marking phase, a | |
3977 task attempts to keep its local queue short but not totally | |
3978 empty, so that entries are available for stealing by other | |
3979 tasks. Only when there is no more work, a task will totally | |
3980 drain its local queue. | |
3981 | |
3982 (3) Global Mark Stack. This handles local queue overflow. During | |
3983 marking only sets of entries are moved between it and the local | |
3984 queues, as access to it requires a mutex and more fine-grain | |
3985 interaction with it which might cause contention. If it | |
3986 overflows, then the marking phase should restart and iterate | |
3987 over the bitmap to identify gray objects. Throughout the marking | |
3988 phase, tasks attempt to keep the global mark stack at a small | |
3989 length but not totally empty, so that entries are available for | |
3990 popping by other tasks. Only when there is no more work, tasks | |
3991 will totally drain the global mark stack. | |
3992 | |
3993 (4) Global Region Stack. Entries on it correspond to areas of | |
3994 the bitmap that need to be scanned since they contain gray | |
3995 objects. Pushes on the region stack only happen during | |
3996 evacuation pauses and typically correspond to areas covered by | |
3997 GC LABS. If it overflows, then the marking phase should restart | |
3998 and iterate over the bitmap to identify gray objects. Tasks will | |
3999 try to totally drain the region stack as soon as possible. | |
4000 | |
4001 (5) SATB Buffer Queue. This is where completed SATB buffers are | |
4002 made available. Buffers are regularly removed from this queue | |
4003 and scanned for roots, so that the queue doesn't get too | |
4004 long. During remark, all completed buffers are processed, as | |
4005 well as the filled in parts of any uncompleted buffers. | |
4006 | |
4007 The do_marking_step() method tries to abort when the time target | |
4008 has been reached. There are a few other cases when the | |
4009 do_marking_step() method also aborts: | |
4010 | |
4011 (1) When the marking phase has been aborted (after a Full GC). | |
4012 | |
4013 (2) When a global overflow (either on the global stack or the | |
4014 region stack) has been triggered. Before the task aborts, it | |
4015 will actually sync up with the other tasks to ensure that all | |
4016 the marking data structures (local queues, stacks, fingers etc.) | |
4017 are re-initialised so that when do_marking_step() completes, | |
4018 the marking phase can immediately restart. | |
4019 | |
4020 (3) When enough completed SATB buffers are available. The | |
4021 do_marking_step() method only tries to drain SATB buffers right | |
4022 at the beginning. So, if enough buffers are available, the | |
4023 marking step aborts and the SATB buffers are processed at | |
4024 the beginning of the next invocation. | |
4025 | |
4026 (4) To yield. when we have to yield then we abort and yield | |
4027 right at the end of do_marking_step(). This saves us from a lot | |
4028 of hassle as, by yielding we might allow a Full GC. If this | |
4029 happens then objects will be compacted underneath our feet, the | |
4030 heap might shrink, etc. We save checking for this by just | |
4031 aborting and doing the yield right at the end. | |
4032 | |
4033 From the above it follows that the do_marking_step() method should | |
4034 be called in a loop (or, otherwise, regularly) until it completes. | |
4035 | |
4036 If a marking step completes without its has_aborted() flag being | |
4037 true, it means it has completed the current marking phase (and | |
4038 also all other marking tasks have done so and have all synced up). | |
4039 | |
4040 A method called regular_clock_call() is invoked "regularly" (in | |
4041 sub ms intervals) throughout marking. It is this clock method that | |
4042 checks all the abort conditions which were mentioned above and | |
4043 decides when the task should abort. A work-based scheme is used to | |
4044 trigger this clock method: when the number of object words the | |
4045 marking phase has scanned or the number of references the marking | |
4046 phase has visited reach a given limit. Additional invocations to | |
4047 the method clock have been planted in a few other strategic places | |
4048 too. The initial reason for the clock method was to avoid calling | |
4049 vtime too regularly, as it is quite expensive. So, once it was in | |
4050 place, it was natural to piggy-back all the other conditions on it | |
4051 too and not constantly check them throughout the code. | |
4052 | |
4053 *****************************************************************************/ | |
4054 | |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
4055 void CMTask::do_marking_step(double time_target_ms, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
4056 bool do_stealing, |
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
4057 bool do_termination) { |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4058 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4059 assert(concurrent() == _cm->concurrent(), "they should be the same"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4060 |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4061 assert(concurrent() || _cm->region_stack_empty(), |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4062 "the region stack should have been cleared before remark"); |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
4063 assert(concurrent() || !_cm->has_aborted_regions(), |
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
4064 "aborted regions should have been cleared before remark"); |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4065 assert(_region_finger == NULL, |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4066 "this should be non-null only when a region is being scanned"); |
342 | 4067 |
4068 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4069 assert(_task_queues != NULL, "invariant"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4070 assert(_task_queue != NULL, "invariant"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4071 assert(_task_queues->queue(_task_id) == _task_queue, "invariant"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4072 |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4073 assert(!_claimed, |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4074 "only one thread should claim this task at any one time"); |
342 | 4075 |
4076 // OK, this doesn't safeguard again all possible scenarios, as it is | |
4077 // possible for two threads to set the _claimed flag at the same | |
4078 // time. But it is only for debugging purposes anyway and it will | |
4079 // catch most problems. | |
4080 _claimed = true; | |
4081 | |
4082 _start_time_ms = os::elapsedVTime() * 1000.0; | |
4083 statsOnly( _interval_start_time_ms = _start_time_ms ); | |
4084 | |
4085 double diff_prediction_ms = | |
4086 g1_policy->get_new_prediction(&_marking_step_diffs_ms); | |
4087 _time_target_ms = time_target_ms - diff_prediction_ms; | |
4088 | |
4089 // set up the variables that are used in the work-based scheme to | |
4090 // call the regular clock method | |
4091 _words_scanned = 0; | |
4092 _refs_reached = 0; | |
4093 recalculate_limits(); | |
4094 | |
4095 // clear all flags | |
4096 clear_has_aborted(); | |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
4097 _has_timed_out = false; |
342 | 4098 _draining_satb_buffers = false; |
4099 | |
4100 ++_calls; | |
4101 | |
4102 if (_cm->verbose_low()) | |
4103 gclog_or_tty->print_cr("[%d] >>>>>>>>>> START, call = %d, " | |
4104 "target = %1.2lfms >>>>>>>>>>", | |
4105 _task_id, _calls, _time_target_ms); | |
4106 | |
4107 // Set up the bitmap and oop closures. Anything that uses them is | |
4108 // eventually called from this method, so it is OK to allocate these | |
4109 // statically. | |
4110 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); | |
3771 | 4111 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); |
4112 set_cm_oop_closure(&cm_oop_closure); | |
342 | 4113 |
4114 if (_cm->has_overflown()) { | |
4115 // This can happen if the region stack or the mark stack overflows | |
4116 // during a GC pause and this task, after a yield point, | |
4117 // restarts. We have to abort as we need to get into the overflow | |
4118 // protocol which happens right at the end of this task. | |
4119 set_has_aborted(); | |
4120 } | |
4121 | |
4122 // First drain any available SATB buffers. After this, we will not | |
4123 // look at SATB buffers before the next invocation of this method. | |
4124 // If enough completed SATB buffers are queued up, the regular clock | |
4125 // will abort this task so that it restarts. | |
4126 drain_satb_buffers(); | |
4127 // ...then partially drain the local queue and the global stack | |
4128 drain_local_queue(true); | |
4129 drain_global_stack(true); | |
4130 | |
4131 // Then totally drain the region stack. We will not look at | |
4132 // it again before the next invocation of this method. Entries on | |
4133 // the region stack are only added during evacuation pauses, for | |
4134 // which we have to yield. When we do, we abort the task anyway so | |
4135 // it will look at the region stack again when it restarts. | |
4136 bitmap_closure.set_scanning_heap_region(false); | |
4137 drain_region_stack(&bitmap_closure); | |
4138 // ...then partially drain the local queue and the global stack | |
4139 drain_local_queue(true); | |
4140 drain_global_stack(true); | |
4141 | |
4142 do { | |
4143 if (!has_aborted() && _curr_region != NULL) { | |
4144 // This means that we're already holding on to a region. | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4145 assert(_finger != NULL, "if region is not NULL, then the finger " |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4146 "should not be NULL either"); |
342 | 4147 |
4148 // We might have restarted this task after an evacuation pause | |
4149 // which might have evacuated the region we're holding on to | |
4150 // underneath our feet. Let's read its limit again to make sure | |
4151 // that we do not iterate over a region of the heap that | |
4152 // contains garbage (update_region_limit() will also move | |
4153 // _finger to the start of the region if it is found empty). | |
4154 update_region_limit(); | |
4155 // We will start from _finger not from the start of the region, | |
4156 // as we might be restarting this task after aborting half-way | |
4157 // through scanning this region. In this case, _finger points to | |
4158 // the address where we last found a marked object. If this is a | |
4159 // fresh region, _finger points to start(). | |
4160 MemRegion mr = MemRegion(_finger, _region_limit); | |
4161 | |
4162 if (_cm->verbose_low()) | |
4163 gclog_or_tty->print_cr("[%d] we're scanning part " | |
4164 "["PTR_FORMAT", "PTR_FORMAT") " | |
4165 "of region "PTR_FORMAT, | |
4166 _task_id, _finger, _region_limit, _curr_region); | |
4167 | |
4168 // Let's iterate over the bitmap of the part of the | |
4169 // region that is left. | |
4170 bitmap_closure.set_scanning_heap_region(true); | |
4171 if (mr.is_empty() || | |
4172 _nextMarkBitMap->iterate(&bitmap_closure, mr)) { | |
4173 // We successfully completed iterating over the region. Now, | |
4174 // let's give up the region. | |
4175 giveup_current_region(); | |
4176 regular_clock_call(); | |
4177 } else { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4178 assert(has_aborted(), "currently the only way to do so"); |
342 | 4179 // The only way to abort the bitmap iteration is to return |
4180 // false from the do_bit() method. However, inside the | |
4181 // do_bit() method we move the _finger to point to the | |
4182 // object currently being looked at. So, if we bail out, we | |
4183 // have definitely set _finger to something non-null. | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4184 assert(_finger != NULL, "invariant"); |
342 | 4185 |
4186 // Region iteration was actually aborted. So now _finger | |
4187 // points to the address of the object we last scanned. If we | |
4188 // leave it there, when we restart this task, we will rescan | |
4189 // the object. It is easy to avoid this. We move the finger by | |
4190 // enough to point to the next possible object header (the | |
4191 // bitmap knows by how much we need to move it as it knows its | |
4192 // granularity). | |
1314
3f0549ed0c98
6921710: G1: assert(new_finger >= _finger && new_finger < _region_limit,"invariant")
apetrusenko
parents:
1284
diff
changeset
|
4193 assert(_finger < _region_limit, "invariant"); |
3f0549ed0c98
6921710: G1: assert(new_finger >= _finger && new_finger < _region_limit,"invariant")
apetrusenko
parents:
1284
diff
changeset
|
4194 HeapWord* new_finger = _nextMarkBitMap->nextWord(_finger); |
3f0549ed0c98
6921710: G1: assert(new_finger >= _finger && new_finger < _region_limit,"invariant")
apetrusenko
parents:
1284
diff
changeset
|
4195 // Check if bitmap iteration was aborted while scanning the last object |
3f0549ed0c98
6921710: G1: assert(new_finger >= _finger && new_finger < _region_limit,"invariant")
apetrusenko
parents:
1284
diff
changeset
|
4196 if (new_finger >= _region_limit) { |
3f0549ed0c98
6921710: G1: assert(new_finger >= _finger && new_finger < _region_limit,"invariant")
apetrusenko
parents:
1284
diff
changeset
|
4197 giveup_current_region(); |
3f0549ed0c98
6921710: G1: assert(new_finger >= _finger && new_finger < _region_limit,"invariant")
apetrusenko
parents:
1284
diff
changeset
|
4198 } else { |
3f0549ed0c98
6921710: G1: assert(new_finger >= _finger && new_finger < _region_limit,"invariant")
apetrusenko
parents:
1284
diff
changeset
|
4199 move_finger_to(new_finger); |
3f0549ed0c98
6921710: G1: assert(new_finger >= _finger && new_finger < _region_limit,"invariant")
apetrusenko
parents:
1284
diff
changeset
|
4200 } |
342 | 4201 } |
4202 } | |
4203 // At this point we have either completed iterating over the | |
4204 // region we were holding on to, or we have aborted. | |
4205 | |
4206 // We then partially drain the local queue and the global stack. | |
4207 // (Do we really need this?) | |
4208 drain_local_queue(true); | |
4209 drain_global_stack(true); | |
4210 | |
4211 // Read the note on the claim_region() method on why it might | |
4212 // return NULL with potentially more regions available for | |
4213 // claiming and why we have to check out_of_regions() to determine | |
4214 // whether we're done or not. | |
4215 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { | |
4216 // We are going to try to claim a new region. We should have | |
4217 // given up on the previous one. | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4218 // Separated the asserts so that we know which one fires. |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4219 assert(_curr_region == NULL, "invariant"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4220 assert(_finger == NULL, "invariant"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4221 assert(_region_limit == NULL, "invariant"); |
342 | 4222 if (_cm->verbose_low()) |
4223 gclog_or_tty->print_cr("[%d] trying to claim a new region", _task_id); | |
4224 HeapRegion* claimed_region = _cm->claim_region(_task_id); | |
4225 if (claimed_region != NULL) { | |
4226 // Yes, we managed to claim one | |
4227 statsOnly( ++_regions_claimed ); | |
4228 | |
4229 if (_cm->verbose_low()) | |
4230 gclog_or_tty->print_cr("[%d] we successfully claimed " | |
4231 "region "PTR_FORMAT, | |
4232 _task_id, claimed_region); | |
4233 | |
4234 setup_for_region(claimed_region); | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4235 assert(_curr_region == claimed_region, "invariant"); |
342 | 4236 } |
4237 // It is important to call the regular clock here. It might take | |
4238 // a while to claim a region if, for example, we hit a large | |
4239 // block of empty regions. So we need to call the regular clock | |
4240 // method once round the loop to make sure it's called | |
4241 // frequently enough. | |
4242 regular_clock_call(); | |
4243 } | |
4244 | |
4245 if (!has_aborted() && _curr_region == NULL) { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4246 assert(_cm->out_of_regions(), |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4247 "at this point we should be out of regions"); |
342 | 4248 } |
4249 } while ( _curr_region != NULL && !has_aborted()); | |
4250 | |
4251 if (!has_aborted()) { | |
4252 // We cannot check whether the global stack is empty, since other | |
343
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
4253 // tasks might be pushing objects to it concurrently. We also cannot |
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
4254 // check if the region stack is empty because if a thread is aborting |
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
4255 // it can push a partially done region back. |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4256 assert(_cm->out_of_regions(), |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4257 "at this point we should be out of regions"); |
342 | 4258 |
4259 if (_cm->verbose_low()) | |
4260 gclog_or_tty->print_cr("[%d] all regions claimed", _task_id); | |
4261 | |
4262 // Try to reduce the number of available SATB buffers so that | |
4263 // remark has less work to do. | |
4264 drain_satb_buffers(); | |
4265 } | |
4266 | |
4267 // Since we've done everything else, we can now totally drain the | |
4268 // local queue and global stack. | |
4269 drain_local_queue(false); | |
4270 drain_global_stack(false); | |
4271 | |
4272 // Attempt at work stealing from other task's queues. | |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
4273 if (do_stealing && !has_aborted()) { |
342 | 4274 // We have not aborted. This means that we have finished all that |
4275 // we could. Let's try to do some stealing... | |
4276 | |
4277 // We cannot check whether the global stack is empty, since other | |
343
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
4278 // tasks might be pushing objects to it concurrently. We also cannot |
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
4279 // check if the region stack is empty because if a thread is aborting |
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
4280 // it can push a partially done region back. |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4281 assert(_cm->out_of_regions() && _task_queue->size() == 0, |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4282 "only way to reach here"); |
342 | 4283 |
4284 if (_cm->verbose_low()) | |
4285 gclog_or_tty->print_cr("[%d] starting to steal", _task_id); | |
4286 | |
4287 while (!has_aborted()) { | |
4288 oop obj; | |
4289 statsOnly( ++_steal_attempts ); | |
4290 | |
4291 if (_cm->try_stealing(_task_id, &_hash_seed, obj)) { | |
4292 if (_cm->verbose_medium()) | |
4293 gclog_or_tty->print_cr("[%d] stolen "PTR_FORMAT" successfully", | |
4294 _task_id, (void*) obj); | |
4295 | |
4296 statsOnly( ++_steals ); | |
4297 | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4298 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4299 "any stolen object should be marked"); |
342 | 4300 scan_object(obj); |
4301 | |
4302 // And since we're towards the end, let's totally drain the | |
4303 // local queue and global stack. | |
4304 drain_local_queue(false); | |
4305 drain_global_stack(false); | |
4306 } else { | |
4307 break; | |
4308 } | |
4309 } | |
4310 } | |
4311 | |
3316
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
4312 // If we are about to wrap up and go into termination, check if we |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
4313 // should raise the overflow flag. |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
4314 if (do_termination && !has_aborted()) { |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
4315 if (_cm->force_overflow()->should_force()) { |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
4316 _cm->set_has_overflown(); |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
4317 regular_clock_call(); |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
4318 } |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
4319 } |
cd8e33b2a8ad
7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
tonyp
parents:
2436
diff
changeset
|
4320 |
342 | 4321 // We still haven't aborted. Now, let's try to get into the |
4322 // termination protocol. | |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
4323 if (do_termination && !has_aborted()) { |
342 | 4324 // We cannot check whether the global stack is empty, since other |
343
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
4325 // tasks might be concurrently pushing objects on it. We also cannot |
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
4326 // check if the region stack is empty because if a thread is aborting |
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
4327 // it can push a partially done region back. |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4328 // Separated the asserts so that we know which one fires. |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4329 assert(_cm->out_of_regions(), "only way to reach here"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4330 assert(_task_queue->size() == 0, "only way to reach here"); |
342 | 4331 |
4332 if (_cm->verbose_low()) | |
4333 gclog_or_tty->print_cr("[%d] starting termination protocol", _task_id); | |
4334 | |
4335 _termination_start_time_ms = os::elapsedVTime() * 1000.0; | |
4336 // The CMTask class also extends the TerminatorTerminator class, | |
4337 // hence its should_exit_termination() method will also decide | |
4338 // whether to exit the termination protocol or not. | |
4339 bool finished = _cm->terminator()->offer_termination(this); | |
4340 double termination_end_time_ms = os::elapsedVTime() * 1000.0; | |
4341 _termination_time_ms += | |
4342 termination_end_time_ms - _termination_start_time_ms; | |
4343 | |
4344 if (finished) { | |
4345 // We're all done. | |
4346 | |
4347 if (_task_id == 0) { | |
4348 // let's allow task 0 to do this | |
4349 if (concurrent()) { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4350 assert(_cm->concurrent_marking_in_progress(), "invariant"); |
342 | 4351 // we need to set this to false before the next |
4352 // safepoint. This way we ensure that the marking phase | |
4353 // doesn't observe any more heap expansions. | |
4354 _cm->clear_concurrent_marking_in_progress(); | |
4355 } | |
4356 } | |
4357 | |
4358 // We can now guarantee that the global stack is empty, since | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4359 // all other tasks have finished. We separated the guarantees so |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4360 // that, if a condition is false, we can immediately find out |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4361 // which one. |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4362 guarantee(_cm->out_of_regions(), "only way to reach here"); |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
4363 guarantee(_aborted_region.is_empty(), "only way to reach here"); |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4364 guarantee(_cm->region_stack_empty(), "only way to reach here"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4365 guarantee(_cm->mark_stack_empty(), "only way to reach here"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4366 guarantee(_task_queue->size() == 0, "only way to reach here"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4367 guarantee(!_cm->has_overflown(), "only way to reach here"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4368 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4369 guarantee(!_cm->region_stack_overflow(), "only way to reach here"); |
342 | 4370 |
4371 if (_cm->verbose_low()) | |
4372 gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id); | |
4373 } else { | |
4374 // Apparently there's more work to do. Let's abort this task. It | |
4375 // will restart it and we can hopefully find more things to do. | |
4376 | |
4377 if (_cm->verbose_low()) | |
4378 gclog_or_tty->print_cr("[%d] apparently there is more work to do", _task_id); | |
4379 | |
4380 set_has_aborted(); | |
4381 statsOnly( ++_aborted_termination ); | |
4382 } | |
4383 } | |
4384 | |
4385 // Mainly for debugging purposes to make sure that a pointer to the | |
4386 // closure which was statically allocated in this frame doesn't | |
4387 // escape it by accident. | |
3771 | 4388 set_cm_oop_closure(NULL); |
342 | 4389 double end_time_ms = os::elapsedVTime() * 1000.0; |
4390 double elapsed_time_ms = end_time_ms - _start_time_ms; | |
4391 // Update the step history. | |
4392 _step_times_ms.add(elapsed_time_ms); | |
4393 | |
4394 if (has_aborted()) { | |
4395 // The task was aborted for some reason. | |
4396 | |
4397 statsOnly( ++_aborted ); | |
4398 | |
2174
234761c55641
6608385: G1: need to support parallel reference processing
johnc
parents:
2152
diff
changeset
|
4399 if (_has_timed_out) { |
342 | 4400 double diff_ms = elapsed_time_ms - _time_target_ms; |
4401 // Keep statistics of how well we did with respect to hitting | |
4402 // our target only if we actually timed out (if we aborted for | |
4403 // other reasons, then the results might get skewed). | |
4404 _marking_step_diffs_ms.add(diff_ms); | |
4405 } | |
4406 | |
4407 if (_cm->has_overflown()) { | |
4408 // This is the interesting one. We aborted because a global | |
4409 // overflow was raised. This means we have to restart the | |
4410 // marking phase and start iterating over regions. However, in | |
4411 // order to do this we have to make sure that all tasks stop | |
4412 // what they are doing and re-initialise in a safe manner. We | |
4413 // will achieve this with the use of two barrier sync points. | |
4414 | |
4415 if (_cm->verbose_low()) | |
4416 gclog_or_tty->print_cr("[%d] detected overflow", _task_id); | |
4417 | |
4418 _cm->enter_first_sync_barrier(_task_id); | |
4419 // When we exit this sync barrier we know that all tasks have | |
4420 // stopped doing marking work. So, it's now safe to | |
4421 // re-initialise our data structures. At the end of this method, | |
4422 // task 0 will clear the global data structures. | |
4423 | |
4424 statsOnly( ++_aborted_overflow ); | |
4425 | |
4426 // We clear the local state of this task... | |
4427 clear_region_fields(); | |
4428 | |
4429 // ...and enter the second barrier. | |
4430 _cm->enter_second_sync_barrier(_task_id); | |
4431 // At this point everything has bee re-initialised and we're | |
4432 // ready to restart. | |
4433 } | |
4434 | |
4435 if (_cm->verbose_low()) { | |
4436 gclog_or_tty->print_cr("[%d] <<<<<<<<<< ABORTING, target = %1.2lfms, " | |
4437 "elapsed = %1.2lfms <<<<<<<<<<", | |
4438 _task_id, _time_target_ms, elapsed_time_ms); | |
4439 if (_cm->has_aborted()) | |
4440 gclog_or_tty->print_cr("[%d] ========== MARKING ABORTED ==========", | |
4441 _task_id); | |
4442 } | |
4443 } else { | |
4444 if (_cm->verbose_low()) | |
4445 gclog_or_tty->print_cr("[%d] <<<<<<<<<< FINISHED, target = %1.2lfms, " | |
4446 "elapsed = %1.2lfms <<<<<<<<<<", | |
4447 _task_id, _time_target_ms, elapsed_time_ms); | |
4448 } | |
4449 | |
4450 _claimed = false; | |
4451 } | |
4452 | |
4453 CMTask::CMTask(int task_id, | |
4454 ConcurrentMark* cm, | |
4455 CMTaskQueue* task_queue, | |
4456 CMTaskQueueSet* task_queues) | |
4457 : _g1h(G1CollectedHeap::heap()), | |
4458 _task_id(task_id), _cm(cm), | |
4459 _claimed(false), | |
4460 _nextMarkBitMap(NULL), _hash_seed(17), | |
4461 _task_queue(task_queue), | |
4462 _task_queues(task_queues), | |
3771 | 4463 _cm_oop_closure(NULL), |
1835
4805b9f4779e
6941395: G1: Use only lock-free versions of region stack push() and pop()
johnc
parents:
1833
diff
changeset
|
4464 _aborted_region(MemRegion()) { |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4465 guarantee(task_queue != NULL, "invariant"); |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1022
diff
changeset
|
4466 guarantee(task_queues != NULL, "invariant"); |
342 | 4467 |
4468 statsOnly( _clock_due_to_scanning = 0; | |
4469 _clock_due_to_marking = 0 ); | |
4470 | |
4471 _marking_step_diffs_ms.add(0.5); | |
4472 } | |
2435
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4473 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4474 // These are formatting macros that are used below to ensure |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4475 // consistent formatting. The *_H_* versions are used to format the |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4476 // header for a particular value and they should be kept consistent |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4477 // with the corresponding macro. Also note that most of the macros add |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4478 // the necessary white space (as a prefix) which makes them a bit |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4479 // easier to compose. |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4480 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4481 // All the output lines are prefixed with this string to be able to |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4482 // identify them easily in a large log file. |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4483 #define G1PPRL_LINE_PREFIX "###" |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4484 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4485 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4486 #ifdef _LP64 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4487 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4488 #else // _LP64 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4489 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4490 #endif // _LP64 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4491 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4492 // For per-region info |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4493 #define G1PPRL_TYPE_FORMAT " %-4s" |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4494 #define G1PPRL_TYPE_H_FORMAT " %4s" |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4495 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4496 #define G1PPRL_BYTE_H_FORMAT " %9s" |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4497 #define G1PPRL_DOUBLE_FORMAT " %14.1f" |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4498 #define G1PPRL_DOUBLE_H_FORMAT " %14s" |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4499 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4500 // For summary info |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4501 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4502 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4503 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4504 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4505 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4506 G1PrintRegionLivenessInfoClosure:: |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4507 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4508 : _out(out), |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4509 _total_used_bytes(0), _total_capacity_bytes(0), |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4510 _total_prev_live_bytes(0), _total_next_live_bytes(0), |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4511 _hum_used_bytes(0), _hum_capacity_bytes(0), |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4512 _hum_prev_live_bytes(0), _hum_next_live_bytes(0) { |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4513 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4514 MemRegion g1_committed = g1h->g1_committed(); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4515 MemRegion g1_reserved = g1h->g1_reserved(); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4516 double now = os::elapsedTime(); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4517 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4518 // Print the header of the output. |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4519 _out->cr(); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4520 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4521 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4522 G1PPRL_SUM_ADDR_FORMAT("committed") |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4523 G1PPRL_SUM_ADDR_FORMAT("reserved") |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4524 G1PPRL_SUM_BYTE_FORMAT("region-size"), |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4525 g1_committed.start(), g1_committed.end(), |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4526 g1_reserved.start(), g1_reserved.end(), |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4527 HeapRegion::GrainBytes); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4528 _out->print_cr(G1PPRL_LINE_PREFIX); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4529 _out->print_cr(G1PPRL_LINE_PREFIX |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4530 G1PPRL_TYPE_H_FORMAT |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4531 G1PPRL_ADDR_BASE_H_FORMAT |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4532 G1PPRL_BYTE_H_FORMAT |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4533 G1PPRL_BYTE_H_FORMAT |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4534 G1PPRL_BYTE_H_FORMAT |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4535 G1PPRL_DOUBLE_H_FORMAT, |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4536 "type", "address-range", |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4537 "used", "prev-live", "next-live", "gc-eff"); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4538 } |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4539 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4540 // It takes as a parameter a reference to one of the _hum_* fields, it |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4541 // deduces the corresponding value for a region in a humongous region |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4542 // series (either the region size, or what's left if the _hum_* field |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4543 // is < the region size), and updates the _hum_* field accordingly. |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4544 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4545 size_t bytes = 0; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4546 // The > 0 check is to deal with the prev and next live bytes which |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4547 // could be 0. |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4548 if (*hum_bytes > 0) { |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4549 bytes = MIN2((size_t) HeapRegion::GrainBytes, *hum_bytes); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4550 *hum_bytes -= bytes; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4551 } |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4552 return bytes; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4553 } |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4554 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4555 // It deduces the values for a region in a humongous region series |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4556 // from the _hum_* fields and updates those accordingly. It assumes |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4557 // that that _hum_* fields have already been set up from the "starts |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4558 // humongous" region and we visit the regions in address order. |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4559 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4560 size_t* capacity_bytes, |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4561 size_t* prev_live_bytes, |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4562 size_t* next_live_bytes) { |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4563 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4564 *used_bytes = get_hum_bytes(&_hum_used_bytes); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4565 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4566 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4567 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4568 } |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4569 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4570 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4571 const char* type = ""; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4572 HeapWord* bottom = r->bottom(); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4573 HeapWord* end = r->end(); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4574 size_t capacity_bytes = r->capacity(); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4575 size_t used_bytes = r->used(); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4576 size_t prev_live_bytes = r->live_bytes(); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4577 size_t next_live_bytes = r->next_live_bytes(); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4578 double gc_eff = r->gc_efficiency(); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4579 if (r->used() == 0) { |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4580 type = "FREE"; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4581 } else if (r->is_survivor()) { |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4582 type = "SURV"; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4583 } else if (r->is_young()) { |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4584 type = "EDEN"; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4585 } else if (r->startsHumongous()) { |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4586 type = "HUMS"; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4587 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4588 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4589 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4590 "they should have been zeroed after the last time we used them"); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4591 // Set up the _hum_* fields. |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4592 _hum_capacity_bytes = capacity_bytes; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4593 _hum_used_bytes = used_bytes; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4594 _hum_prev_live_bytes = prev_live_bytes; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4595 _hum_next_live_bytes = next_live_bytes; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4596 get_hum_bytes(&used_bytes, &capacity_bytes, |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4597 &prev_live_bytes, &next_live_bytes); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4598 end = bottom + HeapRegion::GrainWords; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4599 } else if (r->continuesHumongous()) { |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4600 type = "HUMC"; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4601 get_hum_bytes(&used_bytes, &capacity_bytes, |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4602 &prev_live_bytes, &next_live_bytes); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4603 assert(end == bottom + HeapRegion::GrainWords, "invariant"); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4604 } else { |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4605 type = "OLD"; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4606 } |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4607 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4608 _total_used_bytes += used_bytes; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4609 _total_capacity_bytes += capacity_bytes; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4610 _total_prev_live_bytes += prev_live_bytes; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4611 _total_next_live_bytes += next_live_bytes; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4612 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4613 // Print a line for this particular region. |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4614 _out->print_cr(G1PPRL_LINE_PREFIX |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4615 G1PPRL_TYPE_FORMAT |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4616 G1PPRL_ADDR_BASE_FORMAT |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4617 G1PPRL_BYTE_FORMAT |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4618 G1PPRL_BYTE_FORMAT |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4619 G1PPRL_BYTE_FORMAT |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4620 G1PPRL_DOUBLE_FORMAT, |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4621 type, bottom, end, |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4622 used_bytes, prev_live_bytes, next_live_bytes, gc_eff); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4623 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4624 return false; |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4625 } |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4626 |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4627 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4628 // Print the footer of the output. |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4629 _out->print_cr(G1PPRL_LINE_PREFIX); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4630 _out->print_cr(G1PPRL_LINE_PREFIX |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4631 " SUMMARY" |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4632 G1PPRL_SUM_MB_FORMAT("capacity") |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4633 G1PPRL_SUM_MB_PERC_FORMAT("used") |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4634 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4635 G1PPRL_SUM_MB_PERC_FORMAT("next-live"), |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4636 bytes_to_mb(_total_capacity_bytes), |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4637 bytes_to_mb(_total_used_bytes), |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4638 perc(_total_used_bytes, _total_capacity_bytes), |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4639 bytes_to_mb(_total_prev_live_bytes), |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4640 perc(_total_prev_live_bytes, _total_capacity_bytes), |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4641 bytes_to_mb(_total_next_live_bytes), |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4642 perc(_total_next_live_bytes, _total_capacity_bytes)); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4643 _out->cr(); |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
2369
diff
changeset
|
4644 } |