Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 1022:4c3458a31e17
6888316: G1: has_aborted() || _cm->region_stack_empty() fails
Summary: Remove incorrect guarantee.
Reviewed-by: apetrusenko, iveresov
author | tonyp |
---|---|
date | Wed, 07 Oct 2009 09:42:18 -0400 |
parents | 035d2e036a9b |
children | 11d4857fe5e1 |
rev | line source |
---|---|
342 | 1 /* |
844 | 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_concurrentMark.cpp.incl" | |
27 | |
28 // | |
29 // CMS Bit Map Wrapper | |
30 | |
31 CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter): | |
32 _bm((uintptr_t*)NULL,0), | |
33 _shifter(shifter) { | |
34 _bmStartWord = (HeapWord*)(rs.base()); | |
35 _bmWordSize = rs.size()/HeapWordSize; // rs.size() is in bytes | |
36 ReservedSpace brs(ReservedSpace::allocation_align_size_up( | |
37 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); | |
38 | |
39 guarantee(brs.is_reserved(), "couldn't allocate CMS bit map"); | |
40 // For now we'll just commit all of the bit map up fromt. | |
41 // Later on we'll try to be more parsimonious with swap. | |
42 guarantee(_virtual_space.initialize(brs, brs.size()), | |
43 "couldn't reseve backing store for CMS bit map"); | |
44 assert(_virtual_space.committed_size() == brs.size(), | |
45 "didn't reserve backing store for all of CMS bit map?"); | |
46 _bm.set_map((uintptr_t*)_virtual_space.low()); | |
47 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= | |
48 _bmWordSize, "inconsistency in bit map sizing"); | |
49 _bm.set_size(_bmWordSize >> _shifter); | |
50 } | |
51 | |
52 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr, | |
53 HeapWord* limit) const { | |
54 // First we must round addr *up* to a possible object boundary. | |
55 addr = (HeapWord*)align_size_up((intptr_t)addr, | |
56 HeapWordSize << _shifter); | |
57 size_t addrOffset = heapWordToOffset(addr); | |
58 if (limit == NULL) limit = _bmStartWord + _bmWordSize; | |
59 size_t limitOffset = heapWordToOffset(limit); | |
60 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); | |
61 HeapWord* nextAddr = offsetToHeapWord(nextOffset); | |
62 assert(nextAddr >= addr, "get_next_one postcondition"); | |
63 assert(nextAddr == limit || isMarked(nextAddr), | |
64 "get_next_one postcondition"); | |
65 return nextAddr; | |
66 } | |
67 | |
68 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr, | |
69 HeapWord* limit) const { | |
70 size_t addrOffset = heapWordToOffset(addr); | |
71 if (limit == NULL) limit = _bmStartWord + _bmWordSize; | |
72 size_t limitOffset = heapWordToOffset(limit); | |
73 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); | |
74 HeapWord* nextAddr = offsetToHeapWord(nextOffset); | |
75 assert(nextAddr >= addr, "get_next_one postcondition"); | |
76 assert(nextAddr == limit || !isMarked(nextAddr), | |
77 "get_next_one postcondition"); | |
78 return nextAddr; | |
79 } | |
80 | |
81 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { | |
82 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); | |
83 return (int) (diff >> _shifter); | |
84 } | |
85 | |
86 bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) { | |
87 HeapWord* left = MAX2(_bmStartWord, mr.start()); | |
88 HeapWord* right = MIN2(_bmStartWord + _bmWordSize, mr.end()); | |
89 if (right > left) { | |
90 // Right-open interval [leftOffset, rightOffset). | |
91 return _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right)); | |
92 } else { | |
93 return true; | |
94 } | |
95 } | |
96 | |
97 void CMBitMapRO::mostly_disjoint_range_union(BitMap* from_bitmap, | |
98 size_t from_start_index, | |
99 HeapWord* to_start_word, | |
100 size_t word_num) { | |
101 _bm.mostly_disjoint_range_union(from_bitmap, | |
102 from_start_index, | |
103 heapWordToOffset(to_start_word), | |
104 word_num); | |
105 } | |
106 | |
107 #ifndef PRODUCT | |
108 bool CMBitMapRO::covers(ReservedSpace rs) const { | |
109 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); | |
645
c3a720eefe82
6816308: Changes to allow builds with latest Windows SDK 6.1 on 64bit Windows 2003
kvn
parents:
619
diff
changeset
|
110 assert(((size_t)_bm.size() * (size_t)(1 << _shifter)) == _bmWordSize, |
342 | 111 "size inconsistency"); |
112 return _bmStartWord == (HeapWord*)(rs.base()) && | |
113 _bmWordSize == rs.size()>>LogHeapWordSize; | |
114 } | |
115 #endif | |
116 | |
117 void CMBitMap::clearAll() { | |
118 _bm.clear(); | |
119 return; | |
120 } | |
121 | |
122 void CMBitMap::markRange(MemRegion mr) { | |
123 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); | |
124 assert(!mr.is_empty(), "unexpected empty region"); | |
125 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == | |
126 ((HeapWord *) mr.end())), | |
127 "markRange memory region end is not card aligned"); | |
128 // convert address range into offset range | |
129 _bm.at_put_range(heapWordToOffset(mr.start()), | |
130 heapWordToOffset(mr.end()), true); | |
131 } | |
132 | |
133 void CMBitMap::clearRange(MemRegion mr) { | |
134 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); | |
135 assert(!mr.is_empty(), "unexpected empty region"); | |
136 // convert address range into offset range | |
137 _bm.at_put_range(heapWordToOffset(mr.start()), | |
138 heapWordToOffset(mr.end()), false); | |
139 } | |
140 | |
141 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, | |
142 HeapWord* end_addr) { | |
143 HeapWord* start = getNextMarkedWordAddress(addr); | |
144 start = MIN2(start, end_addr); | |
145 HeapWord* end = getNextUnmarkedWordAddress(start); | |
146 end = MIN2(end, end_addr); | |
147 assert(start <= end, "Consistency check"); | |
148 MemRegion mr(start, end); | |
149 if (!mr.is_empty()) { | |
150 clearRange(mr); | |
151 } | |
152 return mr; | |
153 } | |
154 | |
155 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : | |
156 _base(NULL), _cm(cm) | |
157 #ifdef ASSERT | |
158 , _drain_in_progress(false) | |
159 , _drain_in_progress_yields(false) | |
160 #endif | |
161 {} | |
162 | |
163 void CMMarkStack::allocate(size_t size) { | |
164 _base = NEW_C_HEAP_ARRAY(oop, size); | |
165 if (_base == NULL) | |
166 vm_exit_during_initialization("Failed to allocate " | |
167 "CM region mark stack"); | |
168 _index = 0; | |
169 // QQQQ cast ... | |
170 _capacity = (jint) size; | |
171 _oops_do_bound = -1; | |
172 NOT_PRODUCT(_max_depth = 0); | |
173 } | |
174 | |
175 CMMarkStack::~CMMarkStack() { | |
176 if (_base != NULL) FREE_C_HEAP_ARRAY(oop, _base); | |
177 } | |
178 | |
179 void CMMarkStack::par_push(oop ptr) { | |
180 while (true) { | |
181 if (isFull()) { | |
182 _overflow = true; | |
183 return; | |
184 } | |
185 // Otherwise... | |
186 jint index = _index; | |
187 jint next_index = index+1; | |
188 jint res = Atomic::cmpxchg(next_index, &_index, index); | |
189 if (res == index) { | |
190 _base[index] = ptr; | |
191 // Note that we don't maintain this atomically. We could, but it | |
192 // doesn't seem necessary. | |
193 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); | |
194 return; | |
195 } | |
196 // Otherwise, we need to try again. | |
197 } | |
198 } | |
199 | |
200 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { | |
201 while (true) { | |
202 if (isFull()) { | |
203 _overflow = true; | |
204 return; | |
205 } | |
206 // Otherwise... | |
207 jint index = _index; | |
208 jint next_index = index + n; | |
209 if (next_index > _capacity) { | |
210 _overflow = true; | |
211 return; | |
212 } | |
213 jint res = Atomic::cmpxchg(next_index, &_index, index); | |
214 if (res == index) { | |
215 for (int i = 0; i < n; i++) { | |
216 int ind = index + i; | |
217 assert(ind < _capacity, "By overflow test above."); | |
218 _base[ind] = ptr_arr[i]; | |
219 } | |
220 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); | |
221 return; | |
222 } | |
223 // Otherwise, we need to try again. | |
224 } | |
225 } | |
226 | |
227 | |
228 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { | |
229 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
230 jint start = _index; | |
231 jint next_index = start + n; | |
232 if (next_index > _capacity) { | |
233 _overflow = true; | |
234 return; | |
235 } | |
236 // Otherwise. | |
237 _index = next_index; | |
238 for (int i = 0; i < n; i++) { | |
239 int ind = start + i; | |
240 guarantee(ind < _capacity, "By overflow test above."); | |
241 _base[ind] = ptr_arr[i]; | |
242 } | |
243 } | |
244 | |
245 | |
246 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { | |
247 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
248 jint index = _index; | |
249 if (index == 0) { | |
250 *n = 0; | |
251 return false; | |
252 } else { | |
253 int k = MIN2(max, index); | |
254 jint new_ind = index - k; | |
255 for (int j = 0; j < k; j++) { | |
256 ptr_arr[j] = _base[new_ind + j]; | |
257 } | |
258 _index = new_ind; | |
259 *n = k; | |
260 return true; | |
261 } | |
262 } | |
263 | |
264 | |
265 CMRegionStack::CMRegionStack() : _base(NULL) {} | |
266 | |
267 void CMRegionStack::allocate(size_t size) { | |
268 _base = NEW_C_HEAP_ARRAY(MemRegion, size); | |
269 if (_base == NULL) | |
270 vm_exit_during_initialization("Failed to allocate " | |
271 "CM region mark stack"); | |
272 _index = 0; | |
273 // QQQQ cast ... | |
274 _capacity = (jint) size; | |
275 } | |
276 | |
277 CMRegionStack::~CMRegionStack() { | |
278 if (_base != NULL) FREE_C_HEAP_ARRAY(oop, _base); | |
279 } | |
280 | |
281 void CMRegionStack::push(MemRegion mr) { | |
282 assert(mr.word_size() > 0, "Precondition"); | |
283 while (true) { | |
284 if (isFull()) { | |
285 _overflow = true; | |
286 return; | |
287 } | |
288 // Otherwise... | |
289 jint index = _index; | |
290 jint next_index = index+1; | |
291 jint res = Atomic::cmpxchg(next_index, &_index, index); | |
292 if (res == index) { | |
293 _base[index] = mr; | |
294 return; | |
295 } | |
296 // Otherwise, we need to try again. | |
297 } | |
298 } | |
299 | |
300 MemRegion CMRegionStack::pop() { | |
301 while (true) { | |
302 // Otherwise... | |
303 jint index = _index; | |
304 | |
305 if (index == 0) { | |
306 return MemRegion(); | |
307 } | |
308 jint next_index = index-1; | |
309 jint res = Atomic::cmpxchg(next_index, &_index, index); | |
310 if (res == index) { | |
311 MemRegion mr = _base[next_index]; | |
312 if (mr.start() != NULL) { | |
313 tmp_guarantee_CM( mr.end() != NULL, "invariant" ); | |
314 tmp_guarantee_CM( mr.word_size() > 0, "invariant" ); | |
315 return mr; | |
316 } else { | |
317 // that entry was invalidated... let's skip it | |
318 tmp_guarantee_CM( mr.end() == NULL, "invariant" ); | |
319 } | |
320 } | |
321 // Otherwise, we need to try again. | |
322 } | |
323 } | |
324 | |
325 bool CMRegionStack::invalidate_entries_into_cset() { | |
326 bool result = false; | |
327 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
328 for (int i = 0; i < _oops_do_bound; ++i) { | |
329 MemRegion mr = _base[i]; | |
330 if (mr.start() != NULL) { | |
331 tmp_guarantee_CM( mr.end() != NULL, "invariant"); | |
332 tmp_guarantee_CM( mr.word_size() > 0, "invariant" ); | |
333 HeapRegion* hr = g1h->heap_region_containing(mr.start()); | |
334 tmp_guarantee_CM( hr != NULL, "invariant" ); | |
335 if (hr->in_collection_set()) { | |
336 // The region points into the collection set | |
337 _base[i] = MemRegion(); | |
338 result = true; | |
339 } | |
340 } else { | |
341 // that entry was invalidated... let's skip it | |
342 tmp_guarantee_CM( mr.end() == NULL, "invariant" ); | |
343 } | |
344 } | |
345 return result; | |
346 } | |
347 | |
348 template<class OopClosureClass> | |
349 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { | |
350 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after | |
351 || SafepointSynchronize::is_at_safepoint(), | |
352 "Drain recursion must be yield-safe."); | |
353 bool res = true; | |
354 debug_only(_drain_in_progress = true); | |
355 debug_only(_drain_in_progress_yields = yield_after); | |
356 while (!isEmpty()) { | |
357 oop newOop = pop(); | |
358 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); | |
359 assert(newOop->is_oop(), "Expected an oop"); | |
360 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), | |
361 "only grey objects on this stack"); | |
362 // iterate over the oops in this oop, marking and pushing | |
363 // the ones in CMS generation. | |
364 newOop->oop_iterate(cl); | |
365 if (yield_after && _cm->do_yield_check()) { | |
366 res = false; break; | |
367 } | |
368 } | |
369 debug_only(_drain_in_progress = false); | |
370 return res; | |
371 } | |
372 | |
373 void CMMarkStack::oops_do(OopClosure* f) { | |
374 if (_index == 0) return; | |
375 assert(_oops_do_bound != -1 && _oops_do_bound <= _index, | |
376 "Bound must be set."); | |
377 for (int i = 0; i < _oops_do_bound; i++) { | |
378 f->do_oop(&_base[i]); | |
379 } | |
380 _oops_do_bound = -1; | |
381 } | |
382 | |
383 bool ConcurrentMark::not_yet_marked(oop obj) const { | |
384 return (_g1h->is_obj_ill(obj) | |
385 || (_g1h->is_in_permanent(obj) | |
386 && !nextMarkBitMap()->isMarked((HeapWord*)obj))); | |
387 } | |
388 | |
389 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
390 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
391 #endif // _MSC_VER | |
392 | |
393 ConcurrentMark::ConcurrentMark(ReservedSpace rs, | |
394 int max_regions) : | |
395 _markBitMap1(rs, MinObjAlignment - 1), | |
396 _markBitMap2(rs, MinObjAlignment - 1), | |
397 | |
398 _parallel_marking_threads(0), | |
399 _sleep_factor(0.0), | |
400 _marking_task_overhead(1.0), | |
401 _cleanup_sleep_factor(0.0), | |
402 _cleanup_task_overhead(1.0), | |
403 _region_bm(max_regions, false /* in_resource_area*/), | |
404 _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >> | |
405 CardTableModRefBS::card_shift, | |
406 false /* in_resource_area*/), | |
407 _prevMarkBitMap(&_markBitMap1), | |
408 _nextMarkBitMap(&_markBitMap2), | |
409 _at_least_one_mark_complete(false), | |
410 | |
411 _markStack(this), | |
412 _regionStack(), | |
413 // _finger set in set_non_marking_state | |
414 | |
415 _max_task_num(MAX2(ParallelGCThreads, (size_t)1)), | |
416 // _active_tasks set in set_non_marking_state | |
417 // _tasks set inside the constructor | |
418 _task_queues(new CMTaskQueueSet((int) _max_task_num)), | |
419 _terminator(ParallelTaskTerminator((int) _max_task_num, _task_queues)), | |
420 | |
421 _has_overflown(false), | |
422 _concurrent(false), | |
619
7ea5ca260b28
6814467: G1: small fixes related to concurrent marking verboseness
tonyp
parents:
470
diff
changeset
|
423 _has_aborted(false), |
7ea5ca260b28
6814467: G1: small fixes related to concurrent marking verboseness
tonyp
parents:
470
diff
changeset
|
424 _restart_for_overflow(false), |
7ea5ca260b28
6814467: G1: small fixes related to concurrent marking verboseness
tonyp
parents:
470
diff
changeset
|
425 _concurrent_marking_in_progress(false), |
7ea5ca260b28
6814467: G1: small fixes related to concurrent marking verboseness
tonyp
parents:
470
diff
changeset
|
426 _should_gray_objects(false), |
342 | 427 |
428 // _verbose_level set below | |
429 | |
430 _init_times(), | |
431 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), | |
432 _cleanup_times(), | |
433 _total_counting_time(0.0), | |
434 _total_rs_scrub_time(0.0), | |
435 | |
936 | 436 _parallel_workers(NULL) |
342 | 437 { |
438 CMVerboseLevel verbose_level = | |
439 (CMVerboseLevel) G1MarkingVerboseLevel; | |
440 if (verbose_level < no_verbose) | |
441 verbose_level = no_verbose; | |
442 if (verbose_level > high_verbose) | |
443 verbose_level = high_verbose; | |
444 _verbose_level = verbose_level; | |
445 | |
446 if (verbose_low()) | |
447 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " | |
448 "heap end = "PTR_FORMAT, _heap_start, _heap_end); | |
449 | |
751 | 450 _markStack.allocate(G1MarkStackSize); |
451 _regionStack.allocate(G1MarkRegionStackSize); | |
342 | 452 |
453 // Create & start a ConcurrentMark thread. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
454 _cmThread = new ConcurrentMarkThread(this); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
455 assert(cmThread() != NULL, "CM Thread should have been created"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
456 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
457 |
342 | 458 _g1h = G1CollectedHeap::heap(); |
459 assert(CGC_lock != NULL, "Where's the CGC_lock?"); | |
460 assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency"); | |
461 assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency"); | |
462 | |
463 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); | |
464 satb_qs.set_buffer_size(G1SATBLogBufferSize); | |
465 | |
466 int size = (int) MAX2(ParallelGCThreads, (size_t)1); | |
467 _par_cleanup_thread_state = NEW_C_HEAP_ARRAY(ParCleanupThreadState*, size); | |
468 for (int i = 0 ; i < size; i++) { | |
469 _par_cleanup_thread_state[i] = new ParCleanupThreadState; | |
470 } | |
471 | |
472 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num); | |
473 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num); | |
474 | |
475 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail | |
476 _active_tasks = _max_task_num; | |
477 for (int i = 0; i < (int) _max_task_num; ++i) { | |
478 CMTaskQueue* task_queue = new CMTaskQueue(); | |
479 task_queue->initialize(); | |
480 _task_queues->register_queue(i, task_queue); | |
481 | |
482 _tasks[i] = new CMTask(i, this, task_queue, _task_queues); | |
483 _accum_task_vtime[i] = 0.0; | |
484 } | |
485 | |
486 if (ParallelMarkingThreads > ParallelGCThreads) { | |
487 vm_exit_during_initialization("Can't have more ParallelMarkingThreads " | |
488 "than ParallelGCThreads."); | |
489 } | |
490 if (ParallelGCThreads == 0) { | |
491 // if we are not running with any parallel GC threads we will not | |
492 // spawn any marking threads either | |
493 _parallel_marking_threads = 0; | |
494 _sleep_factor = 0.0; | |
495 _marking_task_overhead = 1.0; | |
496 } else { | |
497 if (ParallelMarkingThreads > 0) { | |
751 | 498 // notice that ParallelMarkingThreads overwrites G1MarkingOverheadPercent |
342 | 499 // if both are set |
500 | |
501 _parallel_marking_threads = ParallelMarkingThreads; | |
502 _sleep_factor = 0.0; | |
503 _marking_task_overhead = 1.0; | |
751 | 504 } else if (G1MarkingOverheadPercent > 0) { |
342 | 505 // we will calculate the number of parallel marking threads |
506 // based on a target overhead with respect to the soft real-time | |
507 // goal | |
508 | |
751 | 509 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; |
342 | 510 double overall_cm_overhead = |
751 | 511 (double) MaxGCPauseMillis * marking_overhead / |
512 (double) GCPauseIntervalMillis; | |
342 | 513 double cpu_ratio = 1.0 / (double) os::processor_count(); |
514 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); | |
515 double marking_task_overhead = | |
516 overall_cm_overhead / marking_thread_num * | |
517 (double) os::processor_count(); | |
518 double sleep_factor = | |
519 (1.0 - marking_task_overhead) / marking_task_overhead; | |
520 | |
521 _parallel_marking_threads = (size_t) marking_thread_num; | |
522 _sleep_factor = sleep_factor; | |
523 _marking_task_overhead = marking_task_overhead; | |
524 } else { | |
525 _parallel_marking_threads = MAX2((ParallelGCThreads + 2) / 4, (size_t)1); | |
526 _sleep_factor = 0.0; | |
527 _marking_task_overhead = 1.0; | |
528 } | |
529 | |
530 if (parallel_marking_threads() > 1) | |
531 _cleanup_task_overhead = 1.0; | |
532 else | |
533 _cleanup_task_overhead = marking_task_overhead(); | |
534 _cleanup_sleep_factor = | |
535 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); | |
536 | |
537 #if 0 | |
538 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); | |
539 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); | |
540 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); | |
541 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); | |
542 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); | |
543 #endif | |
544 | |
545 guarantee( parallel_marking_threads() > 0, "peace of mind" ); | |
1019 | 546 _parallel_workers = new WorkGang("G1 Parallel Marking Threads", |
342 | 547 (int) parallel_marking_threads(), false, true); |
548 if (_parallel_workers == NULL) | |
549 vm_exit_during_initialization("Failed necessary allocation."); | |
550 } | |
551 | |
552 // so that the call below can read a sensible value | |
553 _heap_start = (HeapWord*) rs.base(); | |
554 set_non_marking_state(); | |
555 } | |
556 | |
557 void ConcurrentMark::update_g1_committed(bool force) { | |
558 // If concurrent marking is not in progress, then we do not need to | |
559 // update _heap_end. This has a subtle and important | |
560 // side-effect. Imagine that two evacuation pauses happen between | |
561 // marking completion and remark. The first one can grow the | |
562 // heap (hence now the finger is below the heap end). Then, the | |
563 // second one could unnecessarily push regions on the region | |
564 // stack. This causes the invariant that the region stack is empty | |
565 // at the beginning of remark to be false. By ensuring that we do | |
566 // not observe heap expansions after marking is complete, then we do | |
567 // not have this problem. | |
568 if (!concurrent_marking_in_progress() && !force) | |
569 return; | |
570 | |
571 MemRegion committed = _g1h->g1_committed(); | |
572 tmp_guarantee_CM( committed.start() == _heap_start, | |
573 "start shouldn't change" ); | |
574 HeapWord* new_end = committed.end(); | |
575 if (new_end > _heap_end) { | |
576 // The heap has been expanded. | |
577 | |
578 _heap_end = new_end; | |
579 } | |
580 // Notice that the heap can also shrink. However, this only happens | |
581 // during a Full GC (at least currently) and the entire marking | |
582 // phase will bail out and the task will not be restarted. So, let's | |
583 // do nothing. | |
584 } | |
585 | |
586 void ConcurrentMark::reset() { | |
587 // Starting values for these two. This should be called in a STW | |
588 // phase. CM will be notified of any future g1_committed expansions | |
589 // will be at the end of evacuation pauses, when tasks are | |
590 // inactive. | |
591 MemRegion committed = _g1h->g1_committed(); | |
592 _heap_start = committed.start(); | |
593 _heap_end = committed.end(); | |
594 | |
595 guarantee( _heap_start != NULL && | |
596 _heap_end != NULL && | |
597 _heap_start < _heap_end, "heap bounds should look ok" ); | |
598 | |
599 // reset all the marking data structures and any necessary flags | |
600 clear_marking_state(); | |
601 | |
602 if (verbose_low()) | |
603 gclog_or_tty->print_cr("[global] resetting"); | |
604 | |
605 // We do reset all of them, since different phases will use | |
606 // different number of active threads. So, it's easiest to have all | |
607 // of them ready. | |
608 for (int i = 0; i < (int) _max_task_num; ++i) | |
609 _tasks[i]->reset(_nextMarkBitMap); | |
610 | |
611 // we need this to make sure that the flag is on during the evac | |
612 // pause with initial mark piggy-backed | |
613 set_concurrent_marking_in_progress(); | |
614 } | |
615 | |
616 void ConcurrentMark::set_phase(size_t active_tasks, bool concurrent) { | |
617 guarantee( active_tasks <= _max_task_num, "we should not have more" ); | |
618 | |
619 _active_tasks = active_tasks; | |
620 // Need to update the three data structures below according to the | |
621 // number of active threads for this phase. | |
622 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); | |
623 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); | |
624 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); | |
625 | |
626 _concurrent = concurrent; | |
627 // We propagate this to all tasks, not just the active ones. | |
628 for (int i = 0; i < (int) _max_task_num; ++i) | |
629 _tasks[i]->set_concurrent(concurrent); | |
630 | |
631 if (concurrent) { | |
632 set_concurrent_marking_in_progress(); | |
633 } else { | |
634 // We currently assume that the concurrent flag has been set to | |
635 // false before we start remark. At this point we should also be | |
636 // in a STW phase. | |
637 guarantee( !concurrent_marking_in_progress(), "invariant" ); | |
638 guarantee( _finger == _heap_end, "only way to get here" ); | |
639 update_g1_committed(true); | |
640 } | |
641 } | |
642 | |
643 void ConcurrentMark::set_non_marking_state() { | |
644 // We set the global marking state to some default values when we're | |
645 // not doing marking. | |
646 clear_marking_state(); | |
647 _active_tasks = 0; | |
648 clear_concurrent_marking_in_progress(); | |
649 } | |
650 | |
651 ConcurrentMark::~ConcurrentMark() { | |
652 int size = (int) MAX2(ParallelGCThreads, (size_t)1); | |
653 for (int i = 0; i < size; i++) delete _par_cleanup_thread_state[i]; | |
654 FREE_C_HEAP_ARRAY(ParCleanupThreadState*, | |
655 _par_cleanup_thread_state); | |
656 | |
657 for (int i = 0; i < (int) _max_task_num; ++i) { | |
658 delete _task_queues->queue(i); | |
659 delete _tasks[i]; | |
660 } | |
661 delete _task_queues; | |
662 FREE_C_HEAP_ARRAY(CMTask*, _max_task_num); | |
663 } | |
664 | |
665 // This closure is used to mark refs into the g1 generation | |
666 // from external roots in the CMS bit map. | |
667 // Called at the first checkpoint. | |
668 // | |
669 | |
670 #define PRINT_REACHABLE_AT_INITIAL_MARK 0 | |
671 #if PRINT_REACHABLE_AT_INITIAL_MARK | |
672 static FILE* reachable_file = NULL; | |
673 | |
674 class PrintReachableClosure: public OopsInGenClosure { | |
675 CMBitMap* _bm; | |
676 int _level; | |
677 public: | |
678 PrintReachableClosure(CMBitMap* bm) : | |
679 _bm(bm), _level(0) { | |
680 guarantee(reachable_file != NULL, "pre-condition"); | |
681 } | |
682 void do_oop(oop* p) { | |
683 oop obj = *p; | |
684 HeapWord* obj_addr = (HeapWord*)obj; | |
685 if (obj == NULL) return; | |
686 fprintf(reachable_file, "%d: "PTR_FORMAT" -> "PTR_FORMAT" (%d)\n", | |
687 _level, p, (void*) obj, _bm->isMarked(obj_addr)); | |
688 if (!_bm->isMarked(obj_addr)) { | |
689 _bm->mark(obj_addr); | |
690 _level++; | |
691 obj->oop_iterate(this); | |
692 _level--; | |
693 } | |
694 } | |
695 }; | |
696 #endif // PRINT_REACHABLE_AT_INITIAL_MARK | |
697 | |
698 #define SEND_HEAP_DUMP_TO_FILE 0 | |
699 #if SEND_HEAP_DUMP_TO_FILE | |
700 static FILE* heap_dump_file = NULL; | |
701 #endif // SEND_HEAP_DUMP_TO_FILE | |
702 | |
703 void ConcurrentMark::clearNextBitmap() { | |
704 guarantee(!G1CollectedHeap::heap()->mark_in_progress(), "Precondition."); | |
705 | |
706 // clear the mark bitmap (no grey objects to start with). | |
707 // We need to do this in chunks and offer to yield in between | |
708 // each chunk. | |
709 HeapWord* start = _nextMarkBitMap->startWord(); | |
710 HeapWord* end = _nextMarkBitMap->endWord(); | |
711 HeapWord* cur = start; | |
712 size_t chunkSize = M; | |
713 while (cur < end) { | |
714 HeapWord* next = cur + chunkSize; | |
715 if (next > end) | |
716 next = end; | |
717 MemRegion mr(cur,next); | |
718 _nextMarkBitMap->clearRange(mr); | |
719 cur = next; | |
720 do_yield_check(); | |
721 } | |
722 } | |
723 | |
724 class NoteStartOfMarkHRClosure: public HeapRegionClosure { | |
725 public: | |
726 bool doHeapRegion(HeapRegion* r) { | |
727 if (!r->continuesHumongous()) { | |
728 r->note_start_of_marking(true); | |
729 } | |
730 return false; | |
731 } | |
732 }; | |
733 | |
734 void ConcurrentMark::checkpointRootsInitialPre() { | |
735 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
736 G1CollectorPolicy* g1p = g1h->g1_policy(); | |
737 | |
738 _has_aborted = false; | |
739 | |
740 // Find all the reachable objects... | |
741 #if PRINT_REACHABLE_AT_INITIAL_MARK | |
742 guarantee(reachable_file == NULL, "Protocol"); | |
743 char fn_buf[100]; | |
744 sprintf(fn_buf, "/tmp/reachable.txt.%d", os::current_process_id()); | |
745 reachable_file = fopen(fn_buf, "w"); | |
746 // clear the mark bitmap (no grey objects to start with) | |
747 _nextMarkBitMap->clearAll(); | |
748 PrintReachableClosure prcl(_nextMarkBitMap); | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
912
diff
changeset
|
749 g1h->process_strong_roots(true, // activate StrongRootsScope |
342 | 750 false, // fake perm gen collection |
751 SharedHeap::SO_AllClasses, | |
752 &prcl, // Regular roots | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
912
diff
changeset
|
753 NULL, // do not visit active blobs |
342 | 754 &prcl // Perm Gen Roots |
755 ); | |
756 // The root iteration above "consumed" dirty cards in the perm gen. | |
757 // Therefore, as a shortcut, we dirty all such cards. | |
758 g1h->rem_set()->invalidate(g1h->perm_gen()->used_region(), false); | |
759 fclose(reachable_file); | |
760 reachable_file = NULL; | |
761 // clear the mark bitmap again. | |
762 _nextMarkBitMap->clearAll(); | |
763 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
764 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
765 #endif // PRINT_REACHABLE_AT_INITIAL_MARK | |
766 | |
767 // Initialise marking structures. This has to be done in a STW phase. | |
768 reset(); | |
769 } | |
770 | |
771 class CMMarkRootsClosure: public OopsInGenClosure { | |
772 private: | |
773 ConcurrentMark* _cm; | |
774 G1CollectedHeap* _g1h; | |
775 bool _do_barrier; | |
776 | |
777 public: | |
778 CMMarkRootsClosure(ConcurrentMark* cm, | |
779 G1CollectedHeap* g1h, | |
780 bool do_barrier) : _cm(cm), _g1h(g1h), | |
781 _do_barrier(do_barrier) { } | |
782 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
783 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
784 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
785 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
786 template <class T> void do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
787 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
788 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
789 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
790 assert(obj->is_oop() || obj->mark() == NULL, |
342 | 791 "expected an oop, possibly with mark word displaced"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
792 HeapWord* addr = (HeapWord*)obj; |
342 | 793 if (_g1h->is_in_g1_reserved(addr)) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
794 _cm->grayRoot(obj); |
342 | 795 } |
796 } | |
797 if (_do_barrier) { | |
798 assert(!_g1h->is_in_g1_reserved(p), | |
799 "Should be called on external roots"); | |
800 do_barrier(p); | |
801 } | |
802 } | |
803 }; | |
804 | |
805 void ConcurrentMark::checkpointRootsInitialPost() { | |
806 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
807 | |
808 // For each region note start of marking. | |
809 NoteStartOfMarkHRClosure startcl; | |
810 g1h->heap_region_iterate(&startcl); | |
811 | |
812 // Start weak-reference discovery. | |
813 ReferenceProcessor* rp = g1h->ref_processor(); | |
814 rp->verify_no_references_recorded(); | |
815 rp->enable_discovery(); // enable ("weak") refs discovery | |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
816 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle |
342 | 817 |
818 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); | |
819 satb_mq_set.set_process_completed_threshold(G1SATBProcessCompletedThreshold); | |
820 satb_mq_set.set_active_all_threads(true); | |
821 | |
822 // update_g1_committed() will be called at the end of an evac pause | |
823 // when marking is on. So, it's also called at the end of the | |
824 // initial-mark pause to update the heap end, if the heap expands | |
825 // during it. No need to call it here. | |
826 } | |
827 | |
828 // Checkpoint the roots into this generation from outside | |
829 // this generation. [Note this initial checkpoint need only | |
830 // be approximate -- we'll do a catch up phase subsequently.] | |
831 void ConcurrentMark::checkpointRootsInitial() { | |
832 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); | |
833 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
834 | |
835 double start = os::elapsedTime(); | |
836 | |
837 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); | |
838 g1p->record_concurrent_mark_init_start(); | |
839 checkpointRootsInitialPre(); | |
840 | |
841 // YSR: when concurrent precleaning is in place, we'll | |
842 // need to clear the cached card table here | |
843 | |
844 ResourceMark rm; | |
845 HandleMark hm; | |
846 | |
847 g1h->ensure_parsability(false); | |
848 g1h->perm_gen()->save_marks(); | |
849 | |
850 CMMarkRootsClosure notOlder(this, g1h, false); | |
851 CMMarkRootsClosure older(this, g1h, true); | |
852 | |
853 g1h->set_marking_started(); | |
854 g1h->rem_set()->prepare_for_younger_refs_iterate(false); | |
855 | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
912
diff
changeset
|
856 g1h->process_strong_roots(true, // activate StrongRootsScope |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
912
diff
changeset
|
857 false, // fake perm gen collection |
342 | 858 SharedHeap::SO_AllClasses, |
859 ¬Older, // Regular roots | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
912
diff
changeset
|
860 NULL, // do not visit active blobs |
342 | 861 &older // Perm Gen Roots |
862 ); | |
863 checkpointRootsInitialPost(); | |
864 | |
865 // Statistics. | |
866 double end = os::elapsedTime(); | |
867 _init_times.add((end - start) * 1000.0); | |
868 | |
869 g1p->record_concurrent_mark_init_end(); | |
870 } | |
871 | |
872 /* | |
873 Notice that in the next two methods, we actually leave the STS | |
874 during the barrier sync and join it immediately afterwards. If we | |
875 do not do this, this then the following deadlock can occur: one | |
876 thread could be in the barrier sync code, waiting for the other | |
877 thread to also sync up, whereas another one could be trying to | |
878 yield, while also waiting for the other threads to sync up too. | |
879 | |
880 Because the thread that does the sync barrier has left the STS, it | |
881 is possible to be suspended for a Full GC or an evacuation pause | |
882 could occur. This is actually safe, since the entering the sync | |
883 barrier is one of the last things do_marking_step() does, and it | |
884 doesn't manipulate any data structures afterwards. | |
885 */ | |
886 | |
887 void ConcurrentMark::enter_first_sync_barrier(int task_num) { | |
888 if (verbose_low()) | |
889 gclog_or_tty->print_cr("[%d] entering first barrier", task_num); | |
890 | |
891 ConcurrentGCThread::stsLeave(); | |
892 _first_overflow_barrier_sync.enter(); | |
893 ConcurrentGCThread::stsJoin(); | |
894 // at this point everyone should have synced up and not be doing any | |
895 // more work | |
896 | |
897 if (verbose_low()) | |
898 gclog_or_tty->print_cr("[%d] leaving first barrier", task_num); | |
899 | |
900 // let task 0 do this | |
901 if (task_num == 0) { | |
902 // task 0 is responsible for clearing the global data structures | |
903 clear_marking_state(); | |
904 | |
905 if (PrintGC) { | |
906 gclog_or_tty->date_stamp(PrintGCDateStamps); | |
907 gclog_or_tty->stamp(PrintGCTimeStamps); | |
908 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); | |
909 } | |
910 } | |
911 | |
912 // after this, each task should reset its own data structures then | |
913 // then go into the second barrier | |
914 } | |
915 | |
916 void ConcurrentMark::enter_second_sync_barrier(int task_num) { | |
917 if (verbose_low()) | |
918 gclog_or_tty->print_cr("[%d] entering second barrier", task_num); | |
919 | |
920 ConcurrentGCThread::stsLeave(); | |
921 _second_overflow_barrier_sync.enter(); | |
922 ConcurrentGCThread::stsJoin(); | |
923 // at this point everything should be re-initialised and ready to go | |
924 | |
925 if (verbose_low()) | |
926 gclog_or_tty->print_cr("[%d] leaving second barrier", task_num); | |
927 } | |
928 | |
929 void ConcurrentMark::grayRoot(oop p) { | |
930 HeapWord* addr = (HeapWord*) p; | |
931 // We can't really check against _heap_start and _heap_end, since it | |
932 // is possible during an evacuation pause with piggy-backed | |
933 // initial-mark that the committed space is expanded during the | |
934 // pause without CM observing this change. So the assertions below | |
935 // is a bit conservative; but better than nothing. | |
936 tmp_guarantee_CM( _g1h->g1_committed().contains(addr), | |
937 "address should be within the heap bounds" ); | |
938 | |
939 if (!_nextMarkBitMap->isMarked(addr)) | |
940 _nextMarkBitMap->parMark(addr); | |
941 } | |
942 | |
943 void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) { | |
944 // The objects on the region have already been marked "in bulk" by | |
945 // the caller. We only need to decide whether to push the region on | |
946 // the region stack or not. | |
947 | |
948 if (!concurrent_marking_in_progress() || !_should_gray_objects) | |
949 // We're done with marking and waiting for remark. We do not need to | |
950 // push anything else on the region stack. | |
951 return; | |
952 | |
953 HeapWord* finger = _finger; | |
954 | |
955 if (verbose_low()) | |
956 gclog_or_tty->print_cr("[global] attempting to push " | |
957 "region ["PTR_FORMAT", "PTR_FORMAT"), finger is at " | |
958 PTR_FORMAT, mr.start(), mr.end(), finger); | |
959 | |
960 if (mr.start() < finger) { | |
961 // The finger is always heap region aligned and it is not possible | |
962 // for mr to span heap regions. | |
963 tmp_guarantee_CM( mr.end() <= finger, "invariant" ); | |
964 | |
965 tmp_guarantee_CM( mr.start() <= mr.end() && | |
966 _heap_start <= mr.start() && | |
967 mr.end() <= _heap_end, | |
968 "region boundaries should fall within the committed space" ); | |
969 if (verbose_low()) | |
970 gclog_or_tty->print_cr("[global] region ["PTR_FORMAT", "PTR_FORMAT") " | |
971 "below the finger, pushing it", | |
972 mr.start(), mr.end()); | |
973 | |
974 if (!region_stack_push(mr)) { | |
975 if (verbose_low()) | |
976 gclog_or_tty->print_cr("[global] region stack has overflown."); | |
977 } | |
978 } | |
979 } | |
980 | |
981 void ConcurrentMark::markAndGrayObjectIfNecessary(oop p) { | |
982 // The object is not marked by the caller. We need to at least mark | |
983 // it and maybe push in on the stack. | |
984 | |
985 HeapWord* addr = (HeapWord*)p; | |
986 if (!_nextMarkBitMap->isMarked(addr)) { | |
987 // We definitely need to mark it, irrespective whether we bail out | |
988 // because we're done with marking. | |
989 if (_nextMarkBitMap->parMark(addr)) { | |
990 if (!concurrent_marking_in_progress() || !_should_gray_objects) | |
991 // If we're done with concurrent marking and we're waiting for | |
992 // remark, then we're not pushing anything on the stack. | |
993 return; | |
994 | |
995 // No OrderAccess:store_load() is needed. It is implicit in the | |
996 // CAS done in parMark(addr) above | |
997 HeapWord* finger = _finger; | |
998 | |
999 if (addr < finger) { | |
1000 if (!mark_stack_push(oop(addr))) { | |
1001 if (verbose_low()) | |
1002 gclog_or_tty->print_cr("[global] global stack overflow " | |
1003 "during parMark"); | |
1004 } | |
1005 } | |
1006 } | |
1007 } | |
1008 } | |
1009 | |
1010 class CMConcurrentMarkingTask: public AbstractGangTask { | |
1011 private: | |
1012 ConcurrentMark* _cm; | |
1013 ConcurrentMarkThread* _cmt; | |
1014 | |
1015 public: | |
1016 void work(int worker_i) { | |
1017 guarantee( Thread::current()->is_ConcurrentGC_thread(), | |
1018 "this should only be done by a conc GC thread" ); | |
1019 | |
1020 double start_vtime = os::elapsedVTime(); | |
1021 | |
1022 ConcurrentGCThread::stsJoin(); | |
1023 | |
1024 guarantee( (size_t)worker_i < _cm->active_tasks(), "invariant" ); | |
1025 CMTask* the_task = _cm->task(worker_i); | |
1026 the_task->record_start_time(); | |
1027 if (!_cm->has_aborted()) { | |
1028 do { | |
1029 double start_vtime_sec = os::elapsedVTime(); | |
1030 double start_time_sec = os::elapsedTime(); | |
1031 the_task->do_marking_step(10.0); | |
1032 double end_time_sec = os::elapsedTime(); | |
1033 double end_vtime_sec = os::elapsedVTime(); | |
1034 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; | |
1035 double elapsed_time_sec = end_time_sec - start_time_sec; | |
1036 _cm->clear_has_overflown(); | |
1037 | |
1038 bool ret = _cm->do_yield_check(worker_i); | |
1039 | |
1040 jlong sleep_time_ms; | |
1041 if (!_cm->has_aborted() && the_task->has_aborted()) { | |
1042 sleep_time_ms = | |
1043 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); | |
1044 ConcurrentGCThread::stsLeave(); | |
1045 os::sleep(Thread::current(), sleep_time_ms, false); | |
1046 ConcurrentGCThread::stsJoin(); | |
1047 } | |
1048 double end_time2_sec = os::elapsedTime(); | |
1049 double elapsed_time2_sec = end_time2_sec - start_time_sec; | |
1050 | |
1051 #if 0 | |
1052 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, " | |
1053 "overhead %1.4lf", | |
1054 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms, | |
1055 the_task->conc_overhead(os::elapsedTime()) * 8.0); | |
1056 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms", | |
1057 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0); | |
1058 #endif | |
1059 } while (!_cm->has_aborted() && the_task->has_aborted()); | |
1060 } | |
1061 the_task->record_end_time(); | |
1062 guarantee( !the_task->has_aborted() || _cm->has_aborted(), "invariant" ); | |
1063 | |
1064 ConcurrentGCThread::stsLeave(); | |
1065 | |
1066 double end_vtime = os::elapsedVTime(); | |
1067 _cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime); | |
1068 } | |
1069 | |
1070 CMConcurrentMarkingTask(ConcurrentMark* cm, | |
1071 ConcurrentMarkThread* cmt) : | |
1072 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } | |
1073 | |
1074 ~CMConcurrentMarkingTask() { } | |
1075 }; | |
1076 | |
1077 void ConcurrentMark::markFromRoots() { | |
1078 // we might be tempted to assert that: | |
1079 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), | |
1080 // "inconsistent argument?"); | |
1081 // However that wouldn't be right, because it's possible that | |
1082 // a safepoint is indeed in progress as a younger generation | |
1083 // stop-the-world GC happens even as we mark in this generation. | |
1084 | |
1085 _restart_for_overflow = false; | |
1086 | |
1087 set_phase(MAX2((size_t) 1, parallel_marking_threads()), true); | |
1088 | |
1089 CMConcurrentMarkingTask markingTask(this, cmThread()); | |
1090 if (parallel_marking_threads() > 0) | |
1091 _parallel_workers->run_task(&markingTask); | |
1092 else | |
1093 markingTask.work(0); | |
1094 print_stats(); | |
1095 } | |
1096 | |
1097 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { | |
1098 // world is stopped at this checkpoint | |
1099 assert(SafepointSynchronize::is_at_safepoint(), | |
1100 "world should be stopped"); | |
1101 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
1102 | |
1103 // If a full collection has happened, we shouldn't do this. | |
1104 if (has_aborted()) { | |
1105 g1h->set_marking_complete(); // So bitmap clearing isn't confused | |
1106 return; | |
1107 } | |
1108 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1109 if (VerifyDuringGC) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1110 HandleMark hm; // handle scope |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1111 gclog_or_tty->print(" VerifyDuringGC:(before)"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1112 Universe::heap()->prepare_for_verify(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1113 Universe::verify(true, false, true); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1114 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1115 |
342 | 1116 G1CollectorPolicy* g1p = g1h->g1_policy(); |
1117 g1p->record_concurrent_mark_remark_start(); | |
1118 | |
1119 double start = os::elapsedTime(); | |
1120 | |
1121 checkpointRootsFinalWork(); | |
1122 | |
1123 double mark_work_end = os::elapsedTime(); | |
1124 | |
1125 weakRefsWork(clear_all_soft_refs); | |
1126 | |
1127 if (has_overflown()) { | |
1128 // Oops. We overflowed. Restart concurrent marking. | |
1129 _restart_for_overflow = true; | |
1130 // Clear the flag. We do not need it any more. | |
1131 clear_has_overflown(); | |
1132 if (G1TraceMarkStackOverflow) | |
1133 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); | |
1134 } else { | |
1135 // We're done with marking. | |
1136 JavaThread::satb_mark_queue_set().set_active_all_threads(false); | |
811 | 1137 |
1138 if (VerifyDuringGC) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1139 HandleMark hm; // handle scope |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1140 gclog_or_tty->print(" VerifyDuringGC:(after)"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1141 Universe::heap()->prepare_for_verify(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1142 Universe::heap()->verify(/* allow_dirty */ true, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1143 /* silent */ false, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1144 /* use_prev_marking */ false); |
811 | 1145 } |
342 | 1146 } |
1147 | |
1148 #if VERIFY_OBJS_PROCESSED | |
1149 _scan_obj_cl.objs_processed = 0; | |
1150 ThreadLocalObjQueue::objs_enqueued = 0; | |
1151 #endif | |
1152 | |
1153 // Statistics | |
1154 double now = os::elapsedTime(); | |
1155 _remark_mark_times.add((mark_work_end - start) * 1000.0); | |
1156 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); | |
1157 _remark_times.add((now - start) * 1000.0); | |
1158 | |
1159 g1p->record_concurrent_mark_remark_end(); | |
1160 } | |
1161 | |
1162 | |
1163 #define CARD_BM_TEST_MODE 0 | |
1164 | |
1165 class CalcLiveObjectsClosure: public HeapRegionClosure { | |
1166 | |
1167 CMBitMapRO* _bm; | |
1168 ConcurrentMark* _cm; | |
1169 bool _changed; | |
1170 bool _yield; | |
1171 size_t _words_done; | |
1172 size_t _tot_live; | |
1173 size_t _tot_used; | |
1174 size_t _regions_done; | |
1175 double _start_vtime_sec; | |
1176 | |
1177 BitMap* _region_bm; | |
1178 BitMap* _card_bm; | |
1179 intptr_t _bottom_card_num; | |
1180 bool _final; | |
1181 | |
1182 void mark_card_num_range(intptr_t start_card_num, intptr_t last_card_num) { | |
1183 for (intptr_t i = start_card_num; i <= last_card_num; i++) { | |
1184 #if CARD_BM_TEST_MODE | |
1185 guarantee(_card_bm->at(i - _bottom_card_num), | |
1186 "Should already be set."); | |
1187 #else | |
1188 _card_bm->par_at_put(i - _bottom_card_num, 1); | |
1189 #endif | |
1190 } | |
1191 } | |
1192 | |
1193 public: | |
1194 CalcLiveObjectsClosure(bool final, | |
1195 CMBitMapRO *bm, ConcurrentMark *cm, | |
936 | 1196 BitMap* region_bm, BitMap* card_bm) : |
342 | 1197 _bm(bm), _cm(cm), _changed(false), _yield(true), |
1198 _words_done(0), _tot_live(0), _tot_used(0), | |
936 | 1199 _region_bm(region_bm), _card_bm(card_bm),_final(final), |
342 | 1200 _regions_done(0), _start_vtime_sec(0.0) |
1201 { | |
1202 _bottom_card_num = | |
1203 intptr_t(uintptr_t(G1CollectedHeap::heap()->reserved_region().start()) >> | |
1204 CardTableModRefBS::card_shift); | |
1205 } | |
1206 | |
829 | 1207 // It takes a region that's not empty (i.e., it has at least one |
1208 // live object in it and sets its corresponding bit on the region | |
1209 // bitmap to 1. If the region is "starts humongous" it will also set | |
1210 // to 1 the bits on the region bitmap that correspond to its | |
1211 // associated "continues humongous" regions. | |
1212 void set_bit_for_region(HeapRegion* hr) { | |
1213 assert(!hr->continuesHumongous(), "should have filtered those out"); | |
1214 | |
1215 size_t index = hr->hrs_index(); | |
1216 if (!hr->startsHumongous()) { | |
1217 // Normal (non-humongous) case: just set the bit. | |
1218 _region_bm->par_at_put((BitMap::idx_t) index, true); | |
1219 } else { | |
1220 // Starts humongous case: calculate how many regions are part of | |
1221 // this humongous region and then set the bit range. It might | |
1222 // have been a bit more efficient to look at the object that | |
1223 // spans these humongous regions to calculate their number from | |
1224 // the object's size. However, it's a good idea to calculate | |
1225 // this based on the metadata itself, and not the region | |
1226 // contents, so that this code is not aware of what goes into | |
1227 // the humongous regions (in case this changes in the future). | |
1228 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
1229 size_t end_index = index + 1; | |
831 | 1230 while (end_index < g1h->n_regions()) { |
1231 HeapRegion* chr = g1h->region_at(end_index); | |
829 | 1232 if (!chr->continuesHumongous()) { |
1233 break; | |
1234 } | |
1235 end_index += 1; | |
1236 } | |
1237 _region_bm->par_at_put_range((BitMap::idx_t) index, | |
1238 (BitMap::idx_t) end_index, true); | |
1239 } | |
1240 } | |
1241 | |
342 | 1242 bool doHeapRegion(HeapRegion* hr) { |
1243 if (!_final && _regions_done == 0) | |
1244 _start_vtime_sec = os::elapsedVTime(); | |
1245 | |
639 | 1246 if (hr->continuesHumongous()) { |
829 | 1247 // We will ignore these here and process them when their |
1248 // associated "starts humongous" region is processed (see | |
1249 // set_bit_for_heap_region()). Note that we cannot rely on their | |
1250 // associated "starts humongous" region to have their bit set to | |
1251 // 1 since, due to the region chunking in the parallel region | |
1252 // iteration, a "continues humongous" region might be visited | |
1253 // before its associated "starts humongous". | |
639 | 1254 return false; |
1255 } | |
342 | 1256 |
1257 HeapWord* nextTop = hr->next_top_at_mark_start(); | |
1258 HeapWord* start = hr->top_at_conc_mark_count(); | |
1259 assert(hr->bottom() <= start && start <= hr->end() && | |
1260 hr->bottom() <= nextTop && nextTop <= hr->end() && | |
1261 start <= nextTop, | |
1262 "Preconditions."); | |
1263 // Otherwise, record the number of word's we'll examine. | |
1264 size_t words_done = (nextTop - start); | |
1265 // Find the first marked object at or after "start". | |
1266 start = _bm->getNextMarkedWordAddress(start, nextTop); | |
1267 size_t marked_bytes = 0; | |
1268 | |
1269 // Below, the term "card num" means the result of shifting an address | |
1270 // by the card shift -- address 0 corresponds to card number 0. One | |
1271 // must subtract the card num of the bottom of the heap to obtain a | |
1272 // card table index. | |
1273 // The first card num of the sequence of live cards currently being | |
1274 // constructed. -1 ==> no sequence. | |
1275 intptr_t start_card_num = -1; | |
1276 // The last card num of the sequence of live cards currently being | |
1277 // constructed. -1 ==> no sequence. | |
1278 intptr_t last_card_num = -1; | |
1279 | |
1280 while (start < nextTop) { | |
1281 if (_yield && _cm->do_yield_check()) { | |
1282 // We yielded. It might be for a full collection, in which case | |
1283 // all bets are off; terminate the traversal. | |
1284 if (_cm->has_aborted()) { | |
1285 _changed = false; | |
1286 return true; | |
1287 } else { | |
1288 // Otherwise, it might be a collection pause, and the region | |
1289 // we're looking at might be in the collection set. We'll | |
1290 // abandon this region. | |
1291 return false; | |
1292 } | |
1293 } | |
1294 oop obj = oop(start); | |
1295 int obj_sz = obj->size(); | |
1296 // The card num of the start of the current object. | |
1297 intptr_t obj_card_num = | |
1298 intptr_t(uintptr_t(start) >> CardTableModRefBS::card_shift); | |
1299 | |
1300 HeapWord* obj_last = start + obj_sz - 1; | |
1301 intptr_t obj_last_card_num = | |
1302 intptr_t(uintptr_t(obj_last) >> CardTableModRefBS::card_shift); | |
1303 | |
1304 if (obj_card_num != last_card_num) { | |
1305 if (start_card_num == -1) { | |
1306 assert(last_card_num == -1, "Both or neither."); | |
1307 start_card_num = obj_card_num; | |
1308 } else { | |
1309 assert(last_card_num != -1, "Both or neither."); | |
1310 assert(obj_card_num >= last_card_num, "Inv"); | |
1311 if ((obj_card_num - last_card_num) > 1) { | |
1312 // Mark the last run, and start a new one. | |
1313 mark_card_num_range(start_card_num, last_card_num); | |
1314 start_card_num = obj_card_num; | |
1315 } | |
1316 } | |
1317 #if CARD_BM_TEST_MODE | |
1318 /* | |
1319 gclog_or_tty->print_cr("Setting bits from %d/%d.", | |
1320 obj_card_num - _bottom_card_num, | |
1321 obj_last_card_num - _bottom_card_num); | |
1322 */ | |
1323 for (intptr_t j = obj_card_num; j <= obj_last_card_num; j++) { | |
1324 _card_bm->par_at_put(j - _bottom_card_num, 1); | |
1325 } | |
1326 #endif | |
1327 } | |
1328 // In any case, we set the last card num. | |
1329 last_card_num = obj_last_card_num; | |
1330 | |
1331 marked_bytes += obj_sz * HeapWordSize; | |
1332 // Find the next marked object after this one. | |
1333 start = _bm->getNextMarkedWordAddress(start + 1, nextTop); | |
1334 _changed = true; | |
1335 } | |
1336 // Handle the last range, if any. | |
1337 if (start_card_num != -1) | |
1338 mark_card_num_range(start_card_num, last_card_num); | |
1339 if (_final) { | |
1340 // Mark the allocated-since-marking portion... | |
1341 HeapWord* tp = hr->top(); | |
1342 if (nextTop < tp) { | |
1343 start_card_num = | |
1344 intptr_t(uintptr_t(nextTop) >> CardTableModRefBS::card_shift); | |
1345 last_card_num = | |
1346 intptr_t(uintptr_t(tp) >> CardTableModRefBS::card_shift); | |
1347 mark_card_num_range(start_card_num, last_card_num); | |
1348 // This definitely means the region has live objects. | |
829 | 1349 set_bit_for_region(hr); |
342 | 1350 } |
1351 } | |
1352 | |
1353 hr->add_to_marked_bytes(marked_bytes); | |
1354 // Update the live region bitmap. | |
1355 if (marked_bytes > 0) { | |
829 | 1356 set_bit_for_region(hr); |
342 | 1357 } |
1358 hr->set_top_at_conc_mark_count(nextTop); | |
1359 _tot_live += hr->next_live_bytes(); | |
1360 _tot_used += hr->used(); | |
1361 _words_done = words_done; | |
1362 | |
1363 if (!_final) { | |
1364 ++_regions_done; | |
1365 if (_regions_done % 10 == 0) { | |
1366 double end_vtime_sec = os::elapsedVTime(); | |
1367 double elapsed_vtime_sec = end_vtime_sec - _start_vtime_sec; | |
1368 if (elapsed_vtime_sec > (10.0 / 1000.0)) { | |
1369 jlong sleep_time_ms = | |
1370 (jlong) (elapsed_vtime_sec * _cm->cleanup_sleep_factor() * 1000.0); | |
1371 os::sleep(Thread::current(), sleep_time_ms, false); | |
1372 _start_vtime_sec = end_vtime_sec; | |
1373 } | |
1374 } | |
1375 } | |
1376 | |
1377 return false; | |
1378 } | |
1379 | |
1380 bool changed() { return _changed; } | |
1381 void reset() { _changed = false; _words_done = 0; } | |
1382 void no_yield() { _yield = false; } | |
1383 size_t words_done() { return _words_done; } | |
1384 size_t tot_live() { return _tot_live; } | |
1385 size_t tot_used() { return _tot_used; } | |
1386 }; | |
1387 | |
1388 | |
1389 void ConcurrentMark::calcDesiredRegions() { | |
1390 _region_bm.clear(); | |
1391 _card_bm.clear(); | |
1392 CalcLiveObjectsClosure calccl(false /*final*/, | |
1393 nextMarkBitMap(), this, | |
936 | 1394 &_region_bm, &_card_bm); |
342 | 1395 G1CollectedHeap *g1h = G1CollectedHeap::heap(); |
1396 g1h->heap_region_iterate(&calccl); | |
1397 | |
1398 do { | |
1399 calccl.reset(); | |
1400 g1h->heap_region_iterate(&calccl); | |
1401 } while (calccl.changed()); | |
1402 } | |
1403 | |
1404 class G1ParFinalCountTask: public AbstractGangTask { | |
1405 protected: | |
1406 G1CollectedHeap* _g1h; | |
1407 CMBitMap* _bm; | |
1408 size_t _n_workers; | |
1409 size_t *_live_bytes; | |
1410 size_t *_used_bytes; | |
1411 BitMap* _region_bm; | |
1412 BitMap* _card_bm; | |
1413 public: | |
1414 G1ParFinalCountTask(G1CollectedHeap* g1h, CMBitMap* bm, | |
1415 BitMap* region_bm, BitMap* card_bm) : | |
1416 AbstractGangTask("G1 final counting"), _g1h(g1h), | |
1417 _bm(bm), _region_bm(region_bm), _card_bm(card_bm) | |
1418 { | |
1419 if (ParallelGCThreads > 0) | |
1420 _n_workers = _g1h->workers()->total_workers(); | |
1421 else | |
1422 _n_workers = 1; | |
1423 _live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); | |
1424 _used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); | |
1425 } | |
1426 | |
1427 ~G1ParFinalCountTask() { | |
1428 FREE_C_HEAP_ARRAY(size_t, _live_bytes); | |
1429 FREE_C_HEAP_ARRAY(size_t, _used_bytes); | |
1430 } | |
1431 | |
1432 void work(int i) { | |
1433 CalcLiveObjectsClosure calccl(true /*final*/, | |
1434 _bm, _g1h->concurrent_mark(), | |
936 | 1435 _region_bm, _card_bm); |
342 | 1436 calccl.no_yield(); |
1437 if (ParallelGCThreads > 0) { | |
355 | 1438 _g1h->heap_region_par_iterate_chunked(&calccl, i, |
1439 HeapRegion::FinalCountClaimValue); | |
342 | 1440 } else { |
1441 _g1h->heap_region_iterate(&calccl); | |
1442 } | |
1443 assert(calccl.complete(), "Shouldn't have yielded!"); | |
1444 | |
1445 guarantee( (size_t)i < _n_workers, "invariant" ); | |
1446 _live_bytes[i] = calccl.tot_live(); | |
1447 _used_bytes[i] = calccl.tot_used(); | |
1448 } | |
1449 size_t live_bytes() { | |
1450 size_t live_bytes = 0; | |
1451 for (size_t i = 0; i < _n_workers; ++i) | |
1452 live_bytes += _live_bytes[i]; | |
1453 return live_bytes; | |
1454 } | |
1455 size_t used_bytes() { | |
1456 size_t used_bytes = 0; | |
1457 for (size_t i = 0; i < _n_workers; ++i) | |
1458 used_bytes += _used_bytes[i]; | |
1459 return used_bytes; | |
1460 } | |
1461 }; | |
1462 | |
1463 class G1ParNoteEndTask; | |
1464 | |
1465 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { | |
1466 G1CollectedHeap* _g1; | |
1467 int _worker_num; | |
1468 size_t _max_live_bytes; | |
1469 size_t _regions_claimed; | |
1470 size_t _freed_bytes; | |
1471 size_t _cleared_h_regions; | |
1472 size_t _freed_regions; | |
1473 UncleanRegionList* _unclean_region_list; | |
1474 double _claimed_region_time; | |
1475 double _max_region_time; | |
1476 | |
1477 public: | |
1478 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, | |
1479 UncleanRegionList* list, | |
1480 int worker_num); | |
1481 size_t freed_bytes() { return _freed_bytes; } | |
1482 size_t cleared_h_regions() { return _cleared_h_regions; } | |
1483 size_t freed_regions() { return _freed_regions; } | |
1484 UncleanRegionList* unclean_region_list() { | |
1485 return _unclean_region_list; | |
1486 } | |
1487 | |
1488 bool doHeapRegion(HeapRegion *r); | |
1489 | |
1490 size_t max_live_bytes() { return _max_live_bytes; } | |
1491 size_t regions_claimed() { return _regions_claimed; } | |
1492 double claimed_region_time_sec() { return _claimed_region_time; } | |
1493 double max_region_time_sec() { return _max_region_time; } | |
1494 }; | |
1495 | |
1496 class G1ParNoteEndTask: public AbstractGangTask { | |
1497 friend class G1NoteEndOfConcMarkClosure; | |
1498 protected: | |
1499 G1CollectedHeap* _g1h; | |
1500 size_t _max_live_bytes; | |
1501 size_t _freed_bytes; | |
1502 ConcurrentMark::ParCleanupThreadState** _par_cleanup_thread_state; | |
1503 public: | |
1504 G1ParNoteEndTask(G1CollectedHeap* g1h, | |
1505 ConcurrentMark::ParCleanupThreadState** | |
1506 par_cleanup_thread_state) : | |
1507 AbstractGangTask("G1 note end"), _g1h(g1h), | |
1508 _max_live_bytes(0), _freed_bytes(0), | |
1509 _par_cleanup_thread_state(par_cleanup_thread_state) | |
1510 {} | |
1511 | |
1512 void work(int i) { | |
1513 double start = os::elapsedTime(); | |
1514 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, | |
1515 &_par_cleanup_thread_state[i]->list, | |
1516 i); | |
1517 if (ParallelGCThreads > 0) { | |
355 | 1518 _g1h->heap_region_par_iterate_chunked(&g1_note_end, i, |
1519 HeapRegion::NoteEndClaimValue); | |
342 | 1520 } else { |
1521 _g1h->heap_region_iterate(&g1_note_end); | |
1522 } | |
1523 assert(g1_note_end.complete(), "Shouldn't have yielded!"); | |
1524 | |
1525 // Now finish up freeing the current thread's regions. | |
1526 _g1h->finish_free_region_work(g1_note_end.freed_bytes(), | |
1527 g1_note_end.cleared_h_regions(), | |
1528 0, NULL); | |
1529 { | |
1530 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
1531 _max_live_bytes += g1_note_end.max_live_bytes(); | |
1532 _freed_bytes += g1_note_end.freed_bytes(); | |
1533 } | |
1534 double end = os::elapsedTime(); | |
1535 if (G1PrintParCleanupStats) { | |
1536 gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] " | |
1537 "claimed %d regions (tot = %8.3f ms, max = %8.3f ms).\n", | |
1538 i, start, end, (end-start)*1000.0, | |
1539 g1_note_end.regions_claimed(), | |
1540 g1_note_end.claimed_region_time_sec()*1000.0, | |
1541 g1_note_end.max_region_time_sec()*1000.0); | |
1542 } | |
1543 } | |
1544 size_t max_live_bytes() { return _max_live_bytes; } | |
1545 size_t freed_bytes() { return _freed_bytes; } | |
1546 }; | |
1547 | |
1548 class G1ParScrubRemSetTask: public AbstractGangTask { | |
1549 protected: | |
1550 G1RemSet* _g1rs; | |
1551 BitMap* _region_bm; | |
1552 BitMap* _card_bm; | |
1553 public: | |
1554 G1ParScrubRemSetTask(G1CollectedHeap* g1h, | |
1555 BitMap* region_bm, BitMap* card_bm) : | |
1556 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), | |
1557 _region_bm(region_bm), _card_bm(card_bm) | |
1558 {} | |
1559 | |
1560 void work(int i) { | |
1561 if (ParallelGCThreads > 0) { | |
355 | 1562 _g1rs->scrub_par(_region_bm, _card_bm, i, |
1563 HeapRegion::ScrubRemSetClaimValue); | |
342 | 1564 } else { |
1565 _g1rs->scrub(_region_bm, _card_bm); | |
1566 } | |
1567 } | |
1568 | |
1569 }; | |
1570 | |
1571 G1NoteEndOfConcMarkClosure:: | |
1572 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, | |
1573 UncleanRegionList* list, | |
1574 int worker_num) | |
1575 : _g1(g1), _worker_num(worker_num), | |
1576 _max_live_bytes(0), _regions_claimed(0), | |
1577 _freed_bytes(0), _cleared_h_regions(0), _freed_regions(0), | |
1578 _claimed_region_time(0.0), _max_region_time(0.0), | |
1579 _unclean_region_list(list) | |
1580 {} | |
1581 | |
1582 bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *r) { | |
1583 // We use a claim value of zero here because all regions | |
1584 // were claimed with value 1 in the FinalCount task. | |
1585 r->reset_gc_time_stamp(); | |
1586 if (!r->continuesHumongous()) { | |
1587 double start = os::elapsedTime(); | |
1588 _regions_claimed++; | |
1589 r->note_end_of_marking(); | |
1590 _max_live_bytes += r->max_live_bytes(); | |
1591 _g1->free_region_if_totally_empty_work(r, | |
1592 _freed_bytes, | |
1593 _cleared_h_regions, | |
1594 _freed_regions, | |
1595 _unclean_region_list, | |
1596 true /*par*/); | |
1597 double region_time = (os::elapsedTime() - start); | |
1598 _claimed_region_time += region_time; | |
1599 if (region_time > _max_region_time) _max_region_time = region_time; | |
1600 } | |
1601 return false; | |
1602 } | |
1603 | |
1604 void ConcurrentMark::cleanup() { | |
1605 // world is stopped at this checkpoint | |
1606 assert(SafepointSynchronize::is_at_safepoint(), | |
1607 "world should be stopped"); | |
1608 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
1609 | |
1610 // If a full collection has happened, we shouldn't do this. | |
1611 if (has_aborted()) { | |
1612 g1h->set_marking_complete(); // So bitmap clearing isn't confused | |
1613 return; | |
1614 } | |
1615 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1616 if (VerifyDuringGC) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1617 HandleMark hm; // handle scope |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1618 gclog_or_tty->print(" VerifyDuringGC:(before)"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1619 Universe::heap()->prepare_for_verify(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1620 Universe::verify(/* allow dirty */ true, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1621 /* silent */ false, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1622 /* prev marking */ true); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1623 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1624 |
342 | 1625 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); |
1626 g1p->record_concurrent_mark_cleanup_start(); | |
1627 | |
1628 double start = os::elapsedTime(); | |
1629 | |
1630 // Do counting once more with the world stopped for good measure. | |
1631 G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), | |
1632 &_region_bm, &_card_bm); | |
1633 if (ParallelGCThreads > 0) { | |
355 | 1634 assert(g1h->check_heap_region_claim_values( |
1635 HeapRegion::InitialClaimValue), | |
1636 "sanity check"); | |
1637 | |
342 | 1638 int n_workers = g1h->workers()->total_workers(); |
1639 g1h->set_par_threads(n_workers); | |
1640 g1h->workers()->run_task(&g1_par_count_task); | |
1641 g1h->set_par_threads(0); | |
355 | 1642 |
1643 assert(g1h->check_heap_region_claim_values( | |
1644 HeapRegion::FinalCountClaimValue), | |
1645 "sanity check"); | |
342 | 1646 } else { |
1647 g1_par_count_task.work(0); | |
1648 } | |
1649 | |
1650 size_t known_garbage_bytes = | |
1651 g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes(); | |
1652 #if 0 | |
1653 gclog_or_tty->print_cr("used %1.2lf, live %1.2lf, garbage %1.2lf", | |
1654 (double) g1_par_count_task.used_bytes() / (double) (1024 * 1024), | |
1655 (double) g1_par_count_task.live_bytes() / (double) (1024 * 1024), | |
1656 (double) known_garbage_bytes / (double) (1024 * 1024)); | |
1657 #endif // 0 | |
1658 g1p->set_known_garbage_bytes(known_garbage_bytes); | |
1659 | |
1660 size_t start_used_bytes = g1h->used(); | |
1661 _at_least_one_mark_complete = true; | |
1662 g1h->set_marking_complete(); | |
1663 | |
1664 double count_end = os::elapsedTime(); | |
1665 double this_final_counting_time = (count_end - start); | |
1666 if (G1PrintParCleanupStats) { | |
1667 gclog_or_tty->print_cr("Cleanup:"); | |
1668 gclog_or_tty->print_cr(" Finalize counting: %8.3f ms", | |
1669 this_final_counting_time*1000.0); | |
1670 } | |
1671 _total_counting_time += this_final_counting_time; | |
1672 | |
1673 // Install newly created mark bitMap as "prev". | |
1674 swapMarkBitMaps(); | |
1675 | |
1676 g1h->reset_gc_time_stamp(); | |
1677 | |
1678 // Note end of marking in all heap regions. | |
1679 double note_end_start = os::elapsedTime(); | |
1680 G1ParNoteEndTask g1_par_note_end_task(g1h, _par_cleanup_thread_state); | |
1681 if (ParallelGCThreads > 0) { | |
1682 int n_workers = g1h->workers()->total_workers(); | |
1683 g1h->set_par_threads(n_workers); | |
1684 g1h->workers()->run_task(&g1_par_note_end_task); | |
1685 g1h->set_par_threads(0); | |
355 | 1686 |
1687 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue), | |
1688 "sanity check"); | |
342 | 1689 } else { |
1690 g1_par_note_end_task.work(0); | |
1691 } | |
1692 g1h->set_unclean_regions_coming(true); | |
1693 double note_end_end = os::elapsedTime(); | |
1694 // Tell the mutators that there might be unclean regions coming... | |
1695 if (G1PrintParCleanupStats) { | |
1696 gclog_or_tty->print_cr(" note end of marking: %8.3f ms.", | |
1697 (note_end_end - note_end_start)*1000.0); | |
1698 } | |
1699 | |
355 | 1700 |
342 | 1701 // call below, since it affects the metric by which we sort the heap |
1702 // regions. | |
1703 if (G1ScrubRemSets) { | |
1704 double rs_scrub_start = os::elapsedTime(); | |
1705 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm); | |
1706 if (ParallelGCThreads > 0) { | |
1707 int n_workers = g1h->workers()->total_workers(); | |
1708 g1h->set_par_threads(n_workers); | |
1709 g1h->workers()->run_task(&g1_par_scrub_rs_task); | |
1710 g1h->set_par_threads(0); | |
355 | 1711 |
1712 assert(g1h->check_heap_region_claim_values( | |
1713 HeapRegion::ScrubRemSetClaimValue), | |
1714 "sanity check"); | |
342 | 1715 } else { |
1716 g1_par_scrub_rs_task.work(0); | |
1717 } | |
1718 | |
1719 double rs_scrub_end = os::elapsedTime(); | |
1720 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); | |
1721 _total_rs_scrub_time += this_rs_scrub_time; | |
1722 } | |
1723 | |
1724 // this will also free any regions totally full of garbage objects, | |
1725 // and sort the regions. | |
1726 g1h->g1_policy()->record_concurrent_mark_cleanup_end( | |
1727 g1_par_note_end_task.freed_bytes(), | |
1728 g1_par_note_end_task.max_live_bytes()); | |
1729 | |
1730 // Statistics. | |
1731 double end = os::elapsedTime(); | |
1732 _cleanup_times.add((end - start) * 1000.0); | |
1733 | |
1734 // G1CollectedHeap::heap()->print(); | |
1735 // gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d", | |
1736 // G1CollectedHeap::heap()->get_gc_time_stamp()); | |
1737 | |
1738 if (PrintGC || PrintGCDetails) { | |
1739 g1h->print_size_transition(gclog_or_tty, | |
1740 start_used_bytes, | |
1741 g1h->used(), | |
1742 g1h->capacity()); | |
1743 } | |
1744 | |
1745 size_t cleaned_up_bytes = start_used_bytes - g1h->used(); | |
1746 g1p->decrease_known_garbage_bytes(cleaned_up_bytes); | |
1747 | |
1748 // We need to make this be a "collection" so any collection pause that | |
1749 // races with it goes around and waits for completeCleanup to finish. | |
1750 g1h->increment_total_collections(); | |
1751 | |
751 | 1752 if (VerifyDuringGC) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1753 HandleMark hm; // handle scope |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1754 gclog_or_tty->print(" VerifyDuringGC:(after)"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1755 Universe::heap()->prepare_for_verify(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1756 Universe::verify(/* allow dirty */ true, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1757 /* silent */ false, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1758 /* prev marking */ true); |
342 | 1759 } |
1760 } | |
1761 | |
1762 void ConcurrentMark::completeCleanup() { | |
1763 // A full collection intervened. | |
1764 if (has_aborted()) return; | |
1765 | |
1766 int first = 0; | |
1767 int last = (int)MAX2(ParallelGCThreads, (size_t)1); | |
1768 for (int t = 0; t < last; t++) { | |
1769 UncleanRegionList* list = &_par_cleanup_thread_state[t]->list; | |
1770 assert(list->well_formed(), "Inv"); | |
1771 HeapRegion* hd = list->hd(); | |
1772 while (hd != NULL) { | |
1773 // Now finish up the other stuff. | |
1774 hd->rem_set()->clear(); | |
1775 HeapRegion* next_hd = hd->next_from_unclean_list(); | |
1776 (void)list->pop(); | |
1777 guarantee(list->hd() == next_hd, "how not?"); | |
1778 _g1h->put_region_on_unclean_list(hd); | |
1779 if (!hd->isHumongous()) { | |
1780 // Add this to the _free_regions count by 1. | |
1781 _g1h->finish_free_region_work(0, 0, 1, NULL); | |
1782 } | |
1783 hd = list->hd(); | |
1784 guarantee(hd == next_hd, "how not?"); | |
1785 } | |
1786 } | |
1787 } | |
1788 | |
1789 | |
1790 class G1CMIsAliveClosure: public BoolObjectClosure { | |
1791 G1CollectedHeap* _g1; | |
1792 public: | |
1793 G1CMIsAliveClosure(G1CollectedHeap* g1) : | |
1794 _g1(g1) | |
1795 {} | |
1796 | |
1797 void do_object(oop obj) { | |
1798 assert(false, "not to be invoked"); | |
1799 } | |
1800 bool do_object_b(oop obj) { | |
1801 HeapWord* addr = (HeapWord*)obj; | |
1802 return addr != NULL && | |
1803 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); | |
1804 } | |
1805 }; | |
1806 | |
1807 class G1CMKeepAliveClosure: public OopClosure { | |
1808 G1CollectedHeap* _g1; | |
1809 ConcurrentMark* _cm; | |
1810 CMBitMap* _bitMap; | |
1811 public: | |
1812 G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm, | |
1813 CMBitMap* bitMap) : | |
1814 _g1(g1), _cm(cm), | |
1815 _bitMap(bitMap) {} | |
1816 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1817 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1818 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1819 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1820 template <class T> void do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1821 oop thisOop = oopDesc::load_decode_heap_oop(p); |
342 | 1822 HeapWord* addr = (HeapWord*)thisOop; |
1823 if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(thisOop)) { | |
1824 _bitMap->mark(addr); | |
1825 _cm->mark_stack_push(thisOop); | |
1826 } | |
1827 } | |
1828 }; | |
1829 | |
1830 class G1CMDrainMarkingStackClosure: public VoidClosure { | |
1831 CMMarkStack* _markStack; | |
1832 CMBitMap* _bitMap; | |
1833 G1CMKeepAliveClosure* _oopClosure; | |
1834 public: | |
1835 G1CMDrainMarkingStackClosure(CMBitMap* bitMap, CMMarkStack* markStack, | |
1836 G1CMKeepAliveClosure* oopClosure) : | |
1837 _bitMap(bitMap), | |
1838 _markStack(markStack), | |
1839 _oopClosure(oopClosure) | |
1840 {} | |
1841 | |
1842 void do_void() { | |
1843 _markStack->drain((OopClosure*)_oopClosure, _bitMap, false); | |
1844 } | |
1845 }; | |
1846 | |
1847 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { | |
1848 ResourceMark rm; | |
1849 HandleMark hm; | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
355
diff
changeset
|
1850 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
355
diff
changeset
|
1851 ReferenceProcessor* rp = g1h->ref_processor(); |
342 | 1852 |
1853 // Process weak references. | |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
1854 rp->setup_policy(clear_all_soft_refs); |
342 | 1855 assert(_markStack.isEmpty(), "mark stack should be empty"); |
1856 | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
355
diff
changeset
|
1857 G1CMIsAliveClosure g1IsAliveClosure (g1h); |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
355
diff
changeset
|
1858 G1CMKeepAliveClosure g1KeepAliveClosure(g1h, this, nextMarkBitMap()); |
342 | 1859 G1CMDrainMarkingStackClosure |
1860 g1DrainMarkingStackClosure(nextMarkBitMap(), &_markStack, | |
1861 &g1KeepAliveClosure); | |
1862 | |
1863 // XXXYYY Also: copy the parallel ref processing code from CMS. | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
355
diff
changeset
|
1864 rp->process_discovered_references(&g1IsAliveClosure, |
342 | 1865 &g1KeepAliveClosure, |
1866 &g1DrainMarkingStackClosure, | |
1867 NULL); | |
1868 assert(_markStack.overflow() || _markStack.isEmpty(), | |
1869 "mark stack should be empty (unless it overflowed)"); | |
1870 if (_markStack.overflow()) { | |
1871 set_has_overflown(); | |
1872 } | |
1873 | |
1874 rp->enqueue_discovered_references(); | |
1875 rp->verify_no_references_recorded(); | |
1876 assert(!rp->discovery_enabled(), "should have been disabled"); | |
1877 | |
1878 // Now clean up stale oops in SymbolTable and StringTable | |
1879 SymbolTable::unlink(&g1IsAliveClosure); | |
1880 StringTable::unlink(&g1IsAliveClosure); | |
1881 } | |
1882 | |
1883 void ConcurrentMark::swapMarkBitMaps() { | |
1884 CMBitMapRO* temp = _prevMarkBitMap; | |
1885 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; | |
1886 _nextMarkBitMap = (CMBitMap*) temp; | |
1887 } | |
1888 | |
1889 class CMRemarkTask: public AbstractGangTask { | |
1890 private: | |
1891 ConcurrentMark *_cm; | |
1892 | |
1893 public: | |
1894 void work(int worker_i) { | |
1895 // Since all available tasks are actually started, we should | |
1896 // only proceed if we're supposed to be actived. | |
1897 if ((size_t)worker_i < _cm->active_tasks()) { | |
1898 CMTask* task = _cm->task(worker_i); | |
1899 task->record_start_time(); | |
1900 do { | |
1901 task->do_marking_step(1000000000.0 /* something very large */); | |
1902 } while (task->has_aborted() && !_cm->has_overflown()); | |
1903 // If we overflow, then we do not want to restart. We instead | |
1904 // want to abort remark and do concurrent marking again. | |
1905 task->record_end_time(); | |
1906 } | |
1907 } | |
1908 | |
1909 CMRemarkTask(ConcurrentMark* cm) : | |
1910 AbstractGangTask("Par Remark"), _cm(cm) { } | |
1911 }; | |
1912 | |
1913 void ConcurrentMark::checkpointRootsFinalWork() { | |
1914 ResourceMark rm; | |
1915 HandleMark hm; | |
1916 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
1917 | |
1918 g1h->ensure_parsability(false); | |
1919 | |
1920 if (ParallelGCThreads > 0) { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
912
diff
changeset
|
1921 G1CollectedHeap::StrongRootsScope srs(g1h); |
342 | 1922 // this is remark, so we'll use up all available threads |
1923 int active_workers = ParallelGCThreads; | |
1924 set_phase(active_workers, false); | |
1925 | |
1926 CMRemarkTask remarkTask(this); | |
1927 // We will start all available threads, even if we decide that the | |
1928 // active_workers will be fewer. The extra ones will just bail out | |
1929 // immediately. | |
1930 int n_workers = g1h->workers()->total_workers(); | |
1931 g1h->set_par_threads(n_workers); | |
1932 g1h->workers()->run_task(&remarkTask); | |
1933 g1h->set_par_threads(0); | |
1934 | |
1935 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); | |
1936 guarantee( satb_mq_set.completed_buffers_num() == 0, "invariant" ); | |
1937 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
912
diff
changeset
|
1938 G1CollectedHeap::StrongRootsScope srs(g1h); |
342 | 1939 // this is remark, so we'll use up all available threads |
1940 int active_workers = 1; | |
1941 set_phase(active_workers, false); | |
1942 | |
1943 CMRemarkTask remarkTask(this); | |
1944 // We will start all available threads, even if we decide that the | |
1945 // active_workers will be fewer. The extra ones will just bail out | |
1946 // immediately. | |
1947 remarkTask.work(0); | |
1948 | |
1949 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); | |
1950 guarantee( satb_mq_set.completed_buffers_num() == 0, "invariant" ); | |
1951 } | |
1952 | |
1953 print_stats(); | |
1954 | |
1955 if (!restart_for_overflow()) | |
1956 set_non_marking_state(); | |
1957 | |
1958 #if VERIFY_OBJS_PROCESSED | |
1959 if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) { | |
1960 gclog_or_tty->print_cr("Processed = %d, enqueued = %d.", | |
1961 _scan_obj_cl.objs_processed, | |
1962 ThreadLocalObjQueue::objs_enqueued); | |
1963 guarantee(_scan_obj_cl.objs_processed == | |
1964 ThreadLocalObjQueue::objs_enqueued, | |
1965 "Different number of objs processed and enqueued."); | |
1966 } | |
1967 #endif | |
1968 } | |
1969 | |
1970 class ReachablePrinterOopClosure: public OopClosure { | |
1971 private: | |
1972 G1CollectedHeap* _g1h; | |
1973 CMBitMapRO* _bitmap; | |
1974 outputStream* _out; | |
1975 | |
1976 public: | |
1977 ReachablePrinterOopClosure(CMBitMapRO* bitmap, outputStream* out) : | |
1978 _bitmap(bitmap), _g1h(G1CollectedHeap::heap()), _out(out) { } | |
1979 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1980 void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1981 void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1982 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1983 template <class T> void do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
1984 oop obj = oopDesc::load_decode_heap_oop(p); |
342 | 1985 const char* str = NULL; |
1986 const char* str2 = ""; | |
1987 | |
1988 if (!_g1h->is_in_g1_reserved(obj)) | |
1989 str = "outside G1 reserved"; | |
1990 else { | |
1991 HeapRegion* hr = _g1h->heap_region_containing(obj); | |
1992 guarantee( hr != NULL, "invariant" ); | |
1993 if (hr->obj_allocated_since_prev_marking(obj)) { | |
1994 str = "over TAMS"; | |
1995 if (_bitmap->isMarked((HeapWord*) obj)) | |
1996 str2 = " AND MARKED"; | |
1997 } else if (_bitmap->isMarked((HeapWord*) obj)) | |
1998 str = "marked"; | |
1999 else | |
2000 str = "#### NOT MARKED ####"; | |
2001 } | |
2002 | |
2003 _out->print_cr(" "PTR_FORMAT" contains "PTR_FORMAT" %s%s", | |
2004 p, (void*) obj, str, str2); | |
2005 } | |
2006 }; | |
2007 | |
2008 class ReachablePrinterClosure: public BitMapClosure { | |
2009 private: | |
2010 CMBitMapRO* _bitmap; | |
2011 outputStream* _out; | |
2012 | |
2013 public: | |
2014 ReachablePrinterClosure(CMBitMapRO* bitmap, outputStream* out) : | |
2015 _bitmap(bitmap), _out(out) { } | |
2016 | |
2017 bool do_bit(size_t offset) { | |
2018 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
2019 ReachablePrinterOopClosure oopCl(_bitmap, _out); | |
2020 | |
2021 _out->print_cr(" obj "PTR_FORMAT", offset %10d (marked)", addr, offset); | |
2022 oop(addr)->oop_iterate(&oopCl); | |
2023 _out->print_cr(""); | |
2024 | |
2025 return true; | |
2026 } | |
2027 }; | |
2028 | |
2029 class ObjInRegionReachablePrinterClosure : public ObjectClosure { | |
2030 private: | |
2031 CMBitMapRO* _bitmap; | |
2032 outputStream* _out; | |
2033 | |
2034 public: | |
2035 void do_object(oop o) { | |
2036 ReachablePrinterOopClosure oopCl(_bitmap, _out); | |
2037 | |
2038 _out->print_cr(" obj "PTR_FORMAT" (over TAMS)", (void*) o); | |
2039 o->oop_iterate(&oopCl); | |
2040 _out->print_cr(""); | |
2041 } | |
2042 | |
2043 ObjInRegionReachablePrinterClosure(CMBitMapRO* bitmap, outputStream* out) : | |
2044 _bitmap(bitmap), _out(out) { } | |
2045 }; | |
2046 | |
2047 class RegionReachablePrinterClosure : public HeapRegionClosure { | |
2048 private: | |
2049 CMBitMapRO* _bitmap; | |
2050 outputStream* _out; | |
2051 | |
2052 public: | |
2053 bool doHeapRegion(HeapRegion* hr) { | |
2054 HeapWord* b = hr->bottom(); | |
2055 HeapWord* e = hr->end(); | |
2056 HeapWord* t = hr->top(); | |
2057 HeapWord* p = hr->prev_top_at_mark_start(); | |
2058 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " | |
2059 "PTAMS: "PTR_FORMAT, b, e, t, p); | |
2060 _out->print_cr(""); | |
2061 | |
2062 ObjInRegionReachablePrinterClosure ocl(_bitmap, _out); | |
2063 hr->object_iterate_mem_careful(MemRegion(p, t), &ocl); | |
2064 | |
2065 return false; | |
2066 } | |
2067 | |
2068 RegionReachablePrinterClosure(CMBitMapRO* bitmap, | |
2069 outputStream* out) : | |
2070 _bitmap(bitmap), _out(out) { } | |
2071 }; | |
2072 | |
2073 void ConcurrentMark::print_prev_bitmap_reachable() { | |
2074 outputStream* out = gclog_or_tty; | |
2075 | |
2076 #if SEND_HEAP_DUMP_TO_FILE | |
2077 guarantee(heap_dump_file == NULL, "Protocol"); | |
2078 char fn_buf[100]; | |
2079 sprintf(fn_buf, "/tmp/dump.txt.%d", os::current_process_id()); | |
2080 heap_dump_file = fopen(fn_buf, "w"); | |
2081 fileStream fstream(heap_dump_file); | |
2082 out = &fstream; | |
2083 #endif // SEND_HEAP_DUMP_TO_FILE | |
2084 | |
2085 RegionReachablePrinterClosure rcl(_prevMarkBitMap, out); | |
2086 out->print_cr("--- ITERATING OVER REGIONS WITH PTAMS < TOP"); | |
2087 _g1h->heap_region_iterate(&rcl); | |
2088 out->print_cr(""); | |
2089 | |
2090 ReachablePrinterClosure cl(_prevMarkBitMap, out); | |
2091 out->print_cr("--- REACHABLE OBJECTS ON THE BITMAP"); | |
2092 _prevMarkBitMap->iterate(&cl); | |
2093 out->print_cr(""); | |
2094 | |
2095 #if SEND_HEAP_DUMP_TO_FILE | |
2096 fclose(heap_dump_file); | |
2097 heap_dump_file = NULL; | |
2098 #endif // SEND_HEAP_DUMP_TO_FILE | |
2099 } | |
2100 | |
2101 // This note is for drainAllSATBBuffers and the code in between. | |
2102 // In the future we could reuse a task to do this work during an | |
2103 // evacuation pause (since now tasks are not active and can be claimed | |
2104 // during an evacuation pause). This was a late change to the code and | |
2105 // is currently not being taken advantage of. | |
2106 | |
2107 class CMGlobalObjectClosure : public ObjectClosure { | |
2108 private: | |
2109 ConcurrentMark* _cm; | |
2110 | |
2111 public: | |
2112 void do_object(oop obj) { | |
2113 _cm->deal_with_reference(obj); | |
2114 } | |
2115 | |
2116 CMGlobalObjectClosure(ConcurrentMark* cm) : _cm(cm) { } | |
2117 }; | |
2118 | |
2119 void ConcurrentMark::deal_with_reference(oop obj) { | |
2120 if (verbose_high()) | |
2121 gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT, | |
2122 (void*) obj); | |
2123 | |
2124 | |
2125 HeapWord* objAddr = (HeapWord*) obj; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2126 assert(obj->is_oop_or_null(true /* ignore mark word */), "Error"); |
342 | 2127 if (_g1h->is_in_g1_reserved(objAddr)) { |
2128 tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" ); | |
2129 HeapRegion* hr = _g1h->heap_region_containing(obj); | |
2130 if (_g1h->is_obj_ill(obj, hr)) { | |
2131 if (verbose_high()) | |
2132 gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered " | |
2133 "marked", (void*) obj); | |
2134 | |
2135 // we need to mark it first | |
2136 if (_nextMarkBitMap->parMark(objAddr)) { | |
2137 // No OrderAccess:store_load() is needed. It is implicit in the | |
2138 // CAS done in parMark(objAddr) above | |
2139 HeapWord* finger = _finger; | |
2140 if (objAddr < finger) { | |
2141 if (verbose_high()) | |
2142 gclog_or_tty->print_cr("[global] below the global finger " | |
2143 "("PTR_FORMAT"), pushing it", finger); | |
2144 if (!mark_stack_push(obj)) { | |
2145 if (verbose_low()) | |
2146 gclog_or_tty->print_cr("[global] global stack overflow during " | |
2147 "deal_with_reference"); | |
2148 } | |
2149 } | |
2150 } | |
2151 } | |
2152 } | |
2153 } | |
2154 | |
2155 void ConcurrentMark::drainAllSATBBuffers() { | |
2156 CMGlobalObjectClosure oc(this); | |
2157 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); | |
2158 satb_mq_set.set_closure(&oc); | |
2159 | |
2160 while (satb_mq_set.apply_closure_to_completed_buffer()) { | |
2161 if (verbose_medium()) | |
2162 gclog_or_tty->print_cr("[global] processed an SATB buffer"); | |
2163 } | |
2164 | |
2165 // no need to check whether we should do this, as this is only | |
2166 // called during an evacuation pause | |
2167 satb_mq_set.iterate_closure_all_threads(); | |
2168 | |
2169 satb_mq_set.set_closure(NULL); | |
2170 guarantee( satb_mq_set.completed_buffers_num() == 0, "invariant" ); | |
2171 } | |
2172 | |
2173 void ConcurrentMark::markPrev(oop p) { | |
2174 // Note we are overriding the read-only view of the prev map here, via | |
2175 // the cast. | |
2176 ((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*)p); | |
2177 } | |
2178 | |
2179 void ConcurrentMark::clear(oop p) { | |
2180 assert(p != NULL && p->is_oop(), "expected an oop"); | |
2181 HeapWord* addr = (HeapWord*)p; | |
2182 assert(addr >= _nextMarkBitMap->startWord() || | |
2183 addr < _nextMarkBitMap->endWord(), "in a region"); | |
2184 | |
2185 _nextMarkBitMap->clear(addr); | |
2186 } | |
2187 | |
2188 void ConcurrentMark::clearRangeBothMaps(MemRegion mr) { | |
2189 // Note we are overriding the read-only view of the prev map here, via | |
2190 // the cast. | |
2191 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); | |
2192 _nextMarkBitMap->clearRange(mr); | |
2193 } | |
2194 | |
2195 HeapRegion* | |
2196 ConcurrentMark::claim_region(int task_num) { | |
2197 // "checkpoint" the finger | |
2198 HeapWord* finger = _finger; | |
2199 | |
2200 // _heap_end will not change underneath our feet; it only changes at | |
2201 // yield points. | |
2202 while (finger < _heap_end) { | |
2203 tmp_guarantee_CM( _g1h->is_in_g1_reserved(finger), "invariant" ); | |
2204 | |
2205 // is the gap between reading the finger and doing the CAS too long? | |
2206 | |
2207 HeapRegion* curr_region = _g1h->heap_region_containing(finger); | |
2208 HeapWord* bottom = curr_region->bottom(); | |
2209 HeapWord* end = curr_region->end(); | |
2210 HeapWord* limit = curr_region->next_top_at_mark_start(); | |
2211 | |
2212 if (verbose_low()) | |
2213 gclog_or_tty->print_cr("[%d] curr_region = "PTR_FORMAT" " | |
2214 "["PTR_FORMAT", "PTR_FORMAT"), " | |
2215 "limit = "PTR_FORMAT, | |
2216 task_num, curr_region, bottom, end, limit); | |
2217 | |
2218 HeapWord* res = | |
2219 (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); | |
2220 if (res == finger) { | |
2221 // we succeeded | |
2222 | |
2223 // notice that _finger == end cannot be guaranteed here since, | |
2224 // someone else might have moved the finger even further | |
2225 guarantee( _finger >= end, "the finger should have moved forward" ); | |
2226 | |
2227 if (verbose_low()) | |
2228 gclog_or_tty->print_cr("[%d] we were successful with region = " | |
2229 PTR_FORMAT, task_num, curr_region); | |
2230 | |
2231 if (limit > bottom) { | |
2232 if (verbose_low()) | |
2233 gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is not empty, " | |
2234 "returning it ", task_num, curr_region); | |
2235 return curr_region; | |
2236 } else { | |
2237 tmp_guarantee_CM( limit == bottom, | |
2238 "the region limit should be at bottom" ); | |
2239 if (verbose_low()) | |
2240 gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is empty, " | |
2241 "returning NULL", task_num, curr_region); | |
2242 // we return NULL and the caller should try calling | |
2243 // claim_region() again. | |
2244 return NULL; | |
2245 } | |
2246 } else { | |
2247 guarantee( _finger > finger, "the finger should have moved forward" ); | |
2248 if (verbose_low()) | |
2249 gclog_or_tty->print_cr("[%d] somebody else moved the finger, " | |
2250 "global finger = "PTR_FORMAT", " | |
2251 "our finger = "PTR_FORMAT, | |
2252 task_num, _finger, finger); | |
2253 | |
2254 // read it again | |
2255 finger = _finger; | |
2256 } | |
2257 } | |
2258 | |
2259 return NULL; | |
2260 } | |
2261 | |
2262 void ConcurrentMark::oops_do(OopClosure* cl) { | |
2263 if (_markStack.size() > 0 && verbose_low()) | |
2264 gclog_or_tty->print_cr("[global] scanning the global marking stack, " | |
2265 "size = %d", _markStack.size()); | |
2266 // we first iterate over the contents of the mark stack... | |
2267 _markStack.oops_do(cl); | |
2268 | |
2269 for (int i = 0; i < (int)_max_task_num; ++i) { | |
2270 OopTaskQueue* queue = _task_queues->queue((int)i); | |
2271 | |
2272 if (queue->size() > 0 && verbose_low()) | |
2273 gclog_or_tty->print_cr("[global] scanning task queue of task %d, " | |
2274 "size = %d", i, queue->size()); | |
2275 | |
2276 // ...then over the contents of the all the task queues. | |
2277 queue->oops_do(cl); | |
2278 } | |
2279 | |
2280 // finally, invalidate any entries that in the region stack that | |
2281 // point into the collection set | |
2282 if (_regionStack.invalidate_entries_into_cset()) { | |
2283 // otherwise, any gray objects copied during the evacuation pause | |
2284 // might not be visited. | |
2285 guarantee( _should_gray_objects, "invariant" ); | |
2286 } | |
2287 } | |
2288 | |
2289 void ConcurrentMark::clear_marking_state() { | |
2290 _markStack.setEmpty(); | |
2291 _markStack.clear_overflow(); | |
2292 _regionStack.setEmpty(); | |
2293 _regionStack.clear_overflow(); | |
2294 clear_has_overflown(); | |
2295 _finger = _heap_start; | |
2296 | |
2297 for (int i = 0; i < (int)_max_task_num; ++i) { | |
2298 OopTaskQueue* queue = _task_queues->queue(i); | |
2299 queue->set_empty(); | |
2300 } | |
2301 } | |
2302 | |
2303 void ConcurrentMark::print_stats() { | |
2304 if (verbose_stats()) { | |
2305 gclog_or_tty->print_cr("---------------------------------------------------------------------"); | |
2306 for (size_t i = 0; i < _active_tasks; ++i) { | |
2307 _tasks[i]->print_stats(); | |
2308 gclog_or_tty->print_cr("---------------------------------------------------------------------"); | |
2309 } | |
2310 } | |
2311 } | |
2312 | |
2313 class CSMarkOopClosure: public OopClosure { | |
2314 friend class CSMarkBitMapClosure; | |
2315 | |
2316 G1CollectedHeap* _g1h; | |
2317 CMBitMap* _bm; | |
2318 ConcurrentMark* _cm; | |
2319 oop* _ms; | |
2320 jint* _array_ind_stack; | |
2321 int _ms_size; | |
2322 int _ms_ind; | |
2323 int _array_increment; | |
2324 | |
2325 bool push(oop obj, int arr_ind = 0) { | |
2326 if (_ms_ind == _ms_size) { | |
2327 gclog_or_tty->print_cr("Mark stack is full."); | |
2328 return false; | |
2329 } | |
2330 _ms[_ms_ind] = obj; | |
2331 if (obj->is_objArray()) _array_ind_stack[_ms_ind] = arr_ind; | |
2332 _ms_ind++; | |
2333 return true; | |
2334 } | |
2335 | |
2336 oop pop() { | |
2337 if (_ms_ind == 0) return NULL; | |
2338 else { | |
2339 _ms_ind--; | |
2340 return _ms[_ms_ind]; | |
2341 } | |
2342 } | |
2343 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2344 template <class T> bool drain() { |
342 | 2345 while (_ms_ind > 0) { |
2346 oop obj = pop(); | |
2347 assert(obj != NULL, "Since index was non-zero."); | |
2348 if (obj->is_objArray()) { | |
2349 jint arr_ind = _array_ind_stack[_ms_ind]; | |
2350 objArrayOop aobj = objArrayOop(obj); | |
2351 jint len = aobj->length(); | |
2352 jint next_arr_ind = arr_ind + _array_increment; | |
2353 if (next_arr_ind < len) { | |
2354 push(obj, next_arr_ind); | |
2355 } | |
2356 // Now process this portion of this one. | |
2357 int lim = MIN2(next_arr_ind, len); | |
2358 for (int j = arr_ind; j < lim; j++) { | |
912
308762b2bf14
6872000: G1: compilation fails on linux/older gcc
apetrusenko
parents:
866
diff
changeset
|
2359 do_oop(aobj->objArrayOopDesc::obj_at_addr<T>(j)); |
342 | 2360 } |
2361 | |
2362 } else { | |
2363 obj->oop_iterate(this); | |
2364 } | |
2365 if (abort()) return false; | |
2366 } | |
2367 return true; | |
2368 } | |
2369 | |
2370 public: | |
2371 CSMarkOopClosure(ConcurrentMark* cm, int ms_size) : | |
2372 _g1h(G1CollectedHeap::heap()), | |
2373 _cm(cm), | |
2374 _bm(cm->nextMarkBitMap()), | |
2375 _ms_size(ms_size), _ms_ind(0), | |
2376 _ms(NEW_C_HEAP_ARRAY(oop, ms_size)), | |
2377 _array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)), | |
2378 _array_increment(MAX2(ms_size/8, 16)) | |
2379 {} | |
2380 | |
2381 ~CSMarkOopClosure() { | |
2382 FREE_C_HEAP_ARRAY(oop, _ms); | |
2383 FREE_C_HEAP_ARRAY(jint, _array_ind_stack); | |
2384 } | |
2385 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2386 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2387 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2388 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2389 template <class T> void do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2390 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2391 if (oopDesc::is_null(heap_oop)) return; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2392 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
342 | 2393 if (obj->is_forwarded()) { |
2394 // If the object has already been forwarded, we have to make sure | |
2395 // that it's marked. So follow the forwarding pointer. Note that | |
2396 // this does the right thing for self-forwarding pointers in the | |
2397 // evacuation failure case. | |
2398 obj = obj->forwardee(); | |
2399 } | |
2400 HeapRegion* hr = _g1h->heap_region_containing(obj); | |
2401 if (hr != NULL) { | |
2402 if (hr->in_collection_set()) { | |
2403 if (_g1h->is_obj_ill(obj)) { | |
2404 _bm->mark((HeapWord*)obj); | |
2405 if (!push(obj)) { | |
2406 gclog_or_tty->print_cr("Setting abort in CSMarkOopClosure because push failed."); | |
2407 set_abort(); | |
2408 } | |
2409 } | |
2410 } else { | |
2411 // Outside the collection set; we need to gray it | |
2412 _cm->deal_with_reference(obj); | |
2413 } | |
2414 } | |
2415 } | |
2416 }; | |
2417 | |
2418 class CSMarkBitMapClosure: public BitMapClosure { | |
2419 G1CollectedHeap* _g1h; | |
2420 CMBitMap* _bitMap; | |
2421 ConcurrentMark* _cm; | |
2422 CSMarkOopClosure _oop_cl; | |
2423 public: | |
2424 CSMarkBitMapClosure(ConcurrentMark* cm, int ms_size) : | |
2425 _g1h(G1CollectedHeap::heap()), | |
2426 _bitMap(cm->nextMarkBitMap()), | |
2427 _oop_cl(cm, ms_size) | |
2428 {} | |
2429 | |
2430 ~CSMarkBitMapClosure() {} | |
2431 | |
2432 bool do_bit(size_t offset) { | |
2433 // convert offset into a HeapWord* | |
2434 HeapWord* addr = _bitMap->offsetToHeapWord(offset); | |
2435 assert(_bitMap->endWord() && addr < _bitMap->endWord(), | |
2436 "address out of range"); | |
2437 assert(_bitMap->isMarked(addr), "tautology"); | |
2438 oop obj = oop(addr); | |
2439 if (!obj->is_forwarded()) { | |
2440 if (!_oop_cl.push(obj)) return false; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2441 if (UseCompressedOops) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2442 if (!_oop_cl.drain<narrowOop>()) return false; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2443 } else { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2444 if (!_oop_cl.drain<oop>()) return false; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2445 } |
342 | 2446 } |
2447 // Otherwise... | |
2448 return true; | |
2449 } | |
2450 }; | |
2451 | |
2452 | |
2453 class CompleteMarkingInCSHRClosure: public HeapRegionClosure { | |
2454 CMBitMap* _bm; | |
2455 CSMarkBitMapClosure _bit_cl; | |
2456 enum SomePrivateConstants { | |
2457 MSSize = 1000 | |
2458 }; | |
2459 bool _completed; | |
2460 public: | |
2461 CompleteMarkingInCSHRClosure(ConcurrentMark* cm) : | |
2462 _bm(cm->nextMarkBitMap()), | |
2463 _bit_cl(cm, MSSize), | |
2464 _completed(true) | |
2465 {} | |
2466 | |
2467 ~CompleteMarkingInCSHRClosure() {} | |
2468 | |
2469 bool doHeapRegion(HeapRegion* r) { | |
2470 if (!r->evacuation_failed()) { | |
2471 MemRegion mr = MemRegion(r->bottom(), r->next_top_at_mark_start()); | |
2472 if (!mr.is_empty()) { | |
2473 if (!_bm->iterate(&_bit_cl, mr)) { | |
2474 _completed = false; | |
2475 return true; | |
2476 } | |
2477 } | |
2478 } | |
2479 return false; | |
2480 } | |
2481 | |
2482 bool completed() { return _completed; } | |
2483 }; | |
2484 | |
2485 class ClearMarksInHRClosure: public HeapRegionClosure { | |
2486 CMBitMap* _bm; | |
2487 public: | |
2488 ClearMarksInHRClosure(CMBitMap* bm): _bm(bm) { } | |
2489 | |
2490 bool doHeapRegion(HeapRegion* r) { | |
2491 if (!r->used_region().is_empty() && !r->evacuation_failed()) { | |
2492 MemRegion usedMR = r->used_region(); | |
2493 _bm->clearRange(r->used_region()); | |
2494 } | |
2495 return false; | |
2496 } | |
2497 }; | |
2498 | |
2499 void ConcurrentMark::complete_marking_in_collection_set() { | |
2500 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
2501 | |
2502 if (!g1h->mark_in_progress()) { | |
2503 g1h->g1_policy()->record_mark_closure_time(0.0); | |
2504 return; | |
2505 } | |
2506 | |
2507 int i = 1; | |
2508 double start = os::elapsedTime(); | |
2509 while (true) { | |
2510 i++; | |
2511 CompleteMarkingInCSHRClosure cmplt(this); | |
2512 g1h->collection_set_iterate(&cmplt); | |
2513 if (cmplt.completed()) break; | |
2514 } | |
2515 double end_time = os::elapsedTime(); | |
2516 double elapsed_time_ms = (end_time - start) * 1000.0; | |
2517 g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms); | |
2518 if (PrintGCDetails) { | |
2519 gclog_or_tty->print_cr("Mark closure took %5.2f ms.", elapsed_time_ms); | |
2520 } | |
2521 | |
2522 ClearMarksInHRClosure clr(nextMarkBitMap()); | |
2523 g1h->collection_set_iterate(&clr); | |
2524 } | |
2525 | |
2526 // The next two methods deal with the following optimisation. Some | |
2527 // objects are gray by being marked and located above the finger. If | |
2528 // they are copied, during an evacuation pause, below the finger then | |
2529 // the need to be pushed on the stack. The observation is that, if | |
2530 // there are no regions in the collection set located above the | |
2531 // finger, then the above cannot happen, hence we do not need to | |
2532 // explicitly gray any objects when copying them to below the | |
2533 // finger. The global stack will be scanned to ensure that, if it | |
2534 // points to objects being copied, it will update their | |
2535 // location. There is a tricky situation with the gray objects in | |
2536 // region stack that are being coped, however. See the comment in | |
2537 // newCSet(). | |
2538 | |
2539 void ConcurrentMark::newCSet() { | |
2540 if (!concurrent_marking_in_progress()) | |
2541 // nothing to do if marking is not in progress | |
2542 return; | |
2543 | |
2544 // find what the lowest finger is among the global and local fingers | |
2545 _min_finger = _finger; | |
2546 for (int i = 0; i < (int)_max_task_num; ++i) { | |
2547 CMTask* task = _tasks[i]; | |
2548 HeapWord* task_finger = task->finger(); | |
2549 if (task_finger != NULL && task_finger < _min_finger) | |
2550 _min_finger = task_finger; | |
2551 } | |
2552 | |
2553 _should_gray_objects = false; | |
2554 | |
2555 // This fixes a very subtle and fustrating bug. It might be the case | |
2556 // that, during en evacuation pause, heap regions that contain | |
2557 // objects that are gray (by being in regions contained in the | |
2558 // region stack) are included in the collection set. Since such gray | |
2559 // objects will be moved, and because it's not easy to redirect | |
2560 // region stack entries to point to a new location (because objects | |
2561 // in one region might be scattered to multiple regions after they | |
2562 // are copied), one option is to ensure that all marked objects | |
2563 // copied during a pause are pushed on the stack. Notice, however, | |
2564 // that this problem can only happen when the region stack is not | |
2565 // empty during an evacuation pause. So, we make the fix a bit less | |
2566 // conservative and ensure that regions are pushed on the stack, | |
2567 // irrespective whether all collection set regions are below the | |
2568 // finger, if the region stack is not empty. This is expected to be | |
2569 // a rare case, so I don't think it's necessary to be smarted about it. | |
2570 if (!region_stack_empty()) | |
2571 _should_gray_objects = true; | |
2572 } | |
2573 | |
2574 void ConcurrentMark::registerCSetRegion(HeapRegion* hr) { | |
2575 if (!concurrent_marking_in_progress()) | |
2576 return; | |
2577 | |
2578 HeapWord* region_end = hr->end(); | |
2579 if (region_end > _min_finger) | |
2580 _should_gray_objects = true; | |
2581 } | |
2582 | |
2583 // abandon current marking iteration due to a Full GC | |
2584 void ConcurrentMark::abort() { | |
2585 // Clear all marks to force marking thread to do nothing | |
2586 _nextMarkBitMap->clearAll(); | |
2587 // Empty mark stack | |
2588 clear_marking_state(); | |
2589 for (int i = 0; i < (int)_max_task_num; ++i) | |
2590 _tasks[i]->clear_region_fields(); | |
2591 _has_aborted = true; | |
2592 | |
2593 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); | |
2594 satb_mq_set.abandon_partial_marking(); | |
2595 satb_mq_set.set_active_all_threads(false); | |
2596 } | |
2597 | |
2598 static void print_ms_time_info(const char* prefix, const char* name, | |
2599 NumberSeq& ns) { | |
2600 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", | |
2601 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); | |
2602 if (ns.num() > 0) { | |
2603 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", | |
2604 prefix, ns.sd(), ns.maximum()); | |
2605 } | |
2606 } | |
2607 | |
2608 void ConcurrentMark::print_summary_info() { | |
2609 gclog_or_tty->print_cr(" Concurrent marking:"); | |
2610 print_ms_time_info(" ", "init marks", _init_times); | |
2611 print_ms_time_info(" ", "remarks", _remark_times); | |
2612 { | |
2613 print_ms_time_info(" ", "final marks", _remark_mark_times); | |
2614 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); | |
2615 | |
2616 } | |
2617 print_ms_time_info(" ", "cleanups", _cleanup_times); | |
2618 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", | |
2619 _total_counting_time, | |
2620 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / | |
2621 (double)_cleanup_times.num() | |
2622 : 0.0)); | |
2623 if (G1ScrubRemSets) { | |
2624 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", | |
2625 _total_rs_scrub_time, | |
2626 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / | |
2627 (double)_cleanup_times.num() | |
2628 : 0.0)); | |
2629 } | |
2630 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", | |
2631 (_init_times.sum() + _remark_times.sum() + | |
2632 _cleanup_times.sum())/1000.0); | |
2633 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " | |
2634 "(%8.2f s marking, %8.2f s counting).", | |
2635 cmThread()->vtime_accum(), | |
2636 cmThread()->vtime_mark_accum(), | |
2637 cmThread()->vtime_count_accum()); | |
2638 } | |
2639 | |
1019 | 2640 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { |
2641 _parallel_workers->print_worker_threads_on(st); | |
2642 } | |
2643 | |
342 | 2644 // Closures |
2645 // XXX: there seems to be a lot of code duplication here; | |
2646 // should refactor and consolidate the shared code. | |
2647 | |
2648 // This closure is used to mark refs into the CMS generation in | |
2649 // the CMS bit map. Called at the first checkpoint. | |
2650 | |
2651 // We take a break if someone is trying to stop the world. | |
2652 bool ConcurrentMark::do_yield_check(int worker_i) { | |
2653 if (should_yield()) { | |
2654 if (worker_i == 0) | |
2655 _g1h->g1_policy()->record_concurrent_pause(); | |
2656 cmThread()->yield(); | |
2657 if (worker_i == 0) | |
2658 _g1h->g1_policy()->record_concurrent_pause_end(); | |
2659 return true; | |
2660 } else { | |
2661 return false; | |
2662 } | |
2663 } | |
2664 | |
2665 bool ConcurrentMark::should_yield() { | |
2666 return cmThread()->should_yield(); | |
2667 } | |
2668 | |
2669 bool ConcurrentMark::containing_card_is_marked(void* p) { | |
2670 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1); | |
2671 return _card_bm.at(offset >> CardTableModRefBS::card_shift); | |
2672 } | |
2673 | |
2674 bool ConcurrentMark::containing_cards_are_marked(void* start, | |
2675 void* last) { | |
2676 return | |
2677 containing_card_is_marked(start) && | |
2678 containing_card_is_marked(last); | |
2679 } | |
2680 | |
2681 #ifndef PRODUCT | |
2682 // for debugging purposes | |
2683 void ConcurrentMark::print_finger() { | |
2684 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, | |
2685 _heap_start, _heap_end, _finger); | |
2686 for (int i = 0; i < (int) _max_task_num; ++i) { | |
2687 gclog_or_tty->print(" %d: "PTR_FORMAT, i, _tasks[i]->finger()); | |
2688 } | |
2689 gclog_or_tty->print_cr(""); | |
2690 } | |
2691 #endif | |
2692 | |
2693 // Closure for iteration over bitmaps | |
2694 class CMBitMapClosure : public BitMapClosure { | |
2695 private: | |
2696 // the bitmap that is being iterated over | |
2697 CMBitMap* _nextMarkBitMap; | |
2698 ConcurrentMark* _cm; | |
2699 CMTask* _task; | |
2700 // true if we're scanning a heap region claimed by the task (so that | |
2701 // we move the finger along), false if we're not, i.e. currently when | |
2702 // scanning a heap region popped from the region stack (so that we | |
2703 // do not move the task finger along; it'd be a mistake if we did so). | |
2704 bool _scanning_heap_region; | |
2705 | |
2706 public: | |
2707 CMBitMapClosure(CMTask *task, | |
2708 ConcurrentMark* cm, | |
2709 CMBitMap* nextMarkBitMap) | |
2710 : _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } | |
2711 | |
2712 void set_scanning_heap_region(bool scanning_heap_region) { | |
2713 _scanning_heap_region = scanning_heap_region; | |
2714 } | |
2715 | |
2716 bool do_bit(size_t offset) { | |
2717 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); | |
2718 tmp_guarantee_CM( _nextMarkBitMap->isMarked(addr), "invariant" ); | |
2719 tmp_guarantee_CM( addr < _cm->finger(), "invariant" ); | |
2720 | |
2721 if (_scanning_heap_region) { | |
2722 statsOnly( _task->increase_objs_found_on_bitmap() ); | |
2723 tmp_guarantee_CM( addr >= _task->finger(), "invariant" ); | |
2724 // We move that task's local finger along. | |
2725 _task->move_finger_to(addr); | |
2726 } else { | |
2727 // We move the task's region finger along. | |
2728 _task->move_region_finger_to(addr); | |
2729 } | |
2730 | |
2731 _task->scan_object(oop(addr)); | |
2732 // we only partially drain the local queue and global stack | |
2733 _task->drain_local_queue(true); | |
2734 _task->drain_global_stack(true); | |
2735 | |
2736 // if the has_aborted flag has been raised, we need to bail out of | |
2737 // the iteration | |
2738 return !_task->has_aborted(); | |
2739 } | |
2740 }; | |
2741 | |
2742 // Closure for iterating over objects, currently only used for | |
2743 // processing SATB buffers. | |
2744 class CMObjectClosure : public ObjectClosure { | |
2745 private: | |
2746 CMTask* _task; | |
2747 | |
2748 public: | |
2749 void do_object(oop obj) { | |
2750 _task->deal_with_reference(obj); | |
2751 } | |
2752 | |
2753 CMObjectClosure(CMTask* task) : _task(task) { } | |
2754 }; | |
2755 | |
2756 // Closure for iterating over object fields | |
2757 class CMOopClosure : public OopClosure { | |
2758 private: | |
2759 G1CollectedHeap* _g1h; | |
2760 ConcurrentMark* _cm; | |
2761 CMTask* _task; | |
2762 | |
2763 public: | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2764 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2765 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2766 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2767 template <class T> void do_oop_work(T* p) { |
342 | 2768 tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant" ); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2769 tmp_guarantee_CM( !_g1h->heap_region_containing((HeapWord*) p)->is_on_free_list(), "invariant" ); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2770 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2771 oop obj = oopDesc::load_decode_heap_oop(p); |
342 | 2772 if (_cm->verbose_high()) |
2773 gclog_or_tty->print_cr("[%d] we're looking at location " | |
2774 "*"PTR_FORMAT" = "PTR_FORMAT, | |
2775 _task->task_id(), p, (void*) obj); | |
2776 _task->deal_with_reference(obj); | |
2777 } | |
2778 | |
2779 CMOopClosure(G1CollectedHeap* g1h, | |
2780 ConcurrentMark* cm, | |
2781 CMTask* task) | |
2782 : _g1h(g1h), _cm(cm), _task(task) { } | |
2783 }; | |
2784 | |
2785 void CMTask::setup_for_region(HeapRegion* hr) { | |
2786 tmp_guarantee_CM( hr != NULL && !hr->continuesHumongous(), | |
2787 "claim_region() should have filtered out continues humongous regions" ); | |
2788 | |
2789 if (_cm->verbose_low()) | |
2790 gclog_or_tty->print_cr("[%d] setting up for region "PTR_FORMAT, | |
2791 _task_id, hr); | |
2792 | |
2793 _curr_region = hr; | |
2794 _finger = hr->bottom(); | |
2795 update_region_limit(); | |
2796 } | |
2797 | |
2798 void CMTask::update_region_limit() { | |
2799 HeapRegion* hr = _curr_region; | |
2800 HeapWord* bottom = hr->bottom(); | |
2801 HeapWord* limit = hr->next_top_at_mark_start(); | |
2802 | |
2803 if (limit == bottom) { | |
2804 if (_cm->verbose_low()) | |
2805 gclog_or_tty->print_cr("[%d] found an empty region " | |
2806 "["PTR_FORMAT", "PTR_FORMAT")", | |
2807 _task_id, bottom, limit); | |
2808 // The region was collected underneath our feet. | |
2809 // We set the finger to bottom to ensure that the bitmap | |
2810 // iteration that will follow this will not do anything. | |
2811 // (this is not a condition that holds when we set the region up, | |
2812 // as the region is not supposed to be empty in the first place) | |
2813 _finger = bottom; | |
2814 } else if (limit >= _region_limit) { | |
2815 tmp_guarantee_CM( limit >= _finger, "peace of mind" ); | |
2816 } else { | |
2817 tmp_guarantee_CM( limit < _region_limit, "only way to get here" ); | |
2818 // This can happen under some pretty unusual circumstances. An | |
2819 // evacuation pause empties the region underneath our feet (NTAMS | |
2820 // at bottom). We then do some allocation in the region (NTAMS | |
2821 // stays at bottom), followed by the region being used as a GC | |
2822 // alloc region (NTAMS will move to top() and the objects | |
2823 // originally below it will be grayed). All objects now marked in | |
2824 // the region are explicitly grayed, if below the global finger, | |
2825 // and we do not need in fact to scan anything else. So, we simply | |
2826 // set _finger to be limit to ensure that the bitmap iteration | |
2827 // doesn't do anything. | |
2828 _finger = limit; | |
2829 } | |
2830 | |
2831 _region_limit = limit; | |
2832 } | |
2833 | |
2834 void CMTask::giveup_current_region() { | |
2835 tmp_guarantee_CM( _curr_region != NULL, "invariant" ); | |
2836 if (_cm->verbose_low()) | |
2837 gclog_or_tty->print_cr("[%d] giving up region "PTR_FORMAT, | |
2838 _task_id, _curr_region); | |
2839 clear_region_fields(); | |
2840 } | |
2841 | |
2842 void CMTask::clear_region_fields() { | |
2843 // Values for these three fields that indicate that we're not | |
2844 // holding on to a region. | |
2845 _curr_region = NULL; | |
2846 _finger = NULL; | |
2847 _region_limit = NULL; | |
2848 | |
2849 _region_finger = NULL; | |
2850 } | |
2851 | |
2852 void CMTask::reset(CMBitMap* nextMarkBitMap) { | |
2853 guarantee( nextMarkBitMap != NULL, "invariant" ); | |
2854 | |
2855 if (_cm->verbose_low()) | |
2856 gclog_or_tty->print_cr("[%d] resetting", _task_id); | |
2857 | |
2858 _nextMarkBitMap = nextMarkBitMap; | |
2859 clear_region_fields(); | |
2860 | |
2861 _calls = 0; | |
2862 _elapsed_time_ms = 0.0; | |
2863 _termination_time_ms = 0.0; | |
2864 _termination_start_time_ms = 0.0; | |
2865 | |
2866 #if _MARKING_STATS_ | |
2867 _local_pushes = 0; | |
2868 _local_pops = 0; | |
2869 _local_max_size = 0; | |
2870 _objs_scanned = 0; | |
2871 _global_pushes = 0; | |
2872 _global_pops = 0; | |
2873 _global_max_size = 0; | |
2874 _global_transfers_to = 0; | |
2875 _global_transfers_from = 0; | |
2876 _region_stack_pops = 0; | |
2877 _regions_claimed = 0; | |
2878 _objs_found_on_bitmap = 0; | |
2879 _satb_buffers_processed = 0; | |
2880 _steal_attempts = 0; | |
2881 _steals = 0; | |
2882 _aborted = 0; | |
2883 _aborted_overflow = 0; | |
2884 _aborted_cm_aborted = 0; | |
2885 _aborted_yield = 0; | |
2886 _aborted_timed_out = 0; | |
2887 _aborted_satb = 0; | |
2888 _aborted_termination = 0; | |
2889 #endif // _MARKING_STATS_ | |
2890 } | |
2891 | |
2892 bool CMTask::should_exit_termination() { | |
2893 regular_clock_call(); | |
2894 // This is called when we are in the termination protocol. We should | |
2895 // quit if, for some reason, this task wants to abort or the global | |
2896 // stack is not empty (this means that we can get work from it). | |
2897 return !_cm->mark_stack_empty() || has_aborted(); | |
2898 } | |
2899 | |
2900 // This determines whether the method below will check both the local | |
2901 // and global fingers when determining whether to push on the stack a | |
2902 // gray object (value 1) or whether it will only check the global one | |
2903 // (value 0). The tradeoffs are that the former will be a bit more | |
2904 // accurate and possibly push less on the stack, but it might also be | |
2905 // a little bit slower. | |
2906 | |
2907 #define _CHECK_BOTH_FINGERS_ 1 | |
2908 | |
2909 void CMTask::deal_with_reference(oop obj) { | |
2910 if (_cm->verbose_high()) | |
2911 gclog_or_tty->print_cr("[%d] we're dealing with reference = "PTR_FORMAT, | |
2912 _task_id, (void*) obj); | |
2913 | |
2914 ++_refs_reached; | |
2915 | |
2916 HeapWord* objAddr = (HeapWord*) obj; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2917 assert(obj->is_oop_or_null(true /* ignore mark word */), "Error"); |
342 | 2918 if (_g1h->is_in_g1_reserved(objAddr)) { |
2919 tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" ); | |
2920 HeapRegion* hr = _g1h->heap_region_containing(obj); | |
2921 if (_g1h->is_obj_ill(obj, hr)) { | |
2922 if (_cm->verbose_high()) | |
2923 gclog_or_tty->print_cr("[%d] "PTR_FORMAT" is not considered marked", | |
2924 _task_id, (void*) obj); | |
2925 | |
2926 // we need to mark it first | |
2927 if (_nextMarkBitMap->parMark(objAddr)) { | |
2928 // No OrderAccess:store_load() is needed. It is implicit in the | |
2929 // CAS done in parMark(objAddr) above | |
2930 HeapWord* global_finger = _cm->finger(); | |
2931 | |
2932 #if _CHECK_BOTH_FINGERS_ | |
2933 // we will check both the local and global fingers | |
2934 | |
2935 if (_finger != NULL && objAddr < _finger) { | |
2936 if (_cm->verbose_high()) | |
2937 gclog_or_tty->print_cr("[%d] below the local finger ("PTR_FORMAT"), " | |
2938 "pushing it", _task_id, _finger); | |
2939 push(obj); | |
2940 } else if (_curr_region != NULL && objAddr < _region_limit) { | |
2941 // do nothing | |
2942 } else if (objAddr < global_finger) { | |
2943 // Notice that the global finger might be moving forward | |
2944 // concurrently. This is not a problem. In the worst case, we | |
2945 // mark the object while it is above the global finger and, by | |
2946 // the time we read the global finger, it has moved forward | |
2947 // passed this object. In this case, the object will probably | |
2948 // be visited when a task is scanning the region and will also | |
2949 // be pushed on the stack. So, some duplicate work, but no | |
2950 // correctness problems. | |
2951 | |
2952 if (_cm->verbose_high()) | |
2953 gclog_or_tty->print_cr("[%d] below the global finger " | |
2954 "("PTR_FORMAT"), pushing it", | |
2955 _task_id, global_finger); | |
2956 push(obj); | |
2957 } else { | |
2958 // do nothing | |
2959 } | |
2960 #else // _CHECK_BOTH_FINGERS_ | |
2961 // we will only check the global finger | |
2962 | |
2963 if (objAddr < global_finger) { | |
2964 // see long comment above | |
2965 | |
2966 if (_cm->verbose_high()) | |
2967 gclog_or_tty->print_cr("[%d] below the global finger " | |
2968 "("PTR_FORMAT"), pushing it", | |
2969 _task_id, global_finger); | |
2970 push(obj); | |
2971 } | |
2972 #endif // _CHECK_BOTH_FINGERS_ | |
2973 } | |
2974 } | |
2975 } | |
2976 } | |
2977 | |
2978 void CMTask::push(oop obj) { | |
2979 HeapWord* objAddr = (HeapWord*) obj; | |
2980 tmp_guarantee_CM( _g1h->is_in_g1_reserved(objAddr), "invariant" ); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
2981 tmp_guarantee_CM( !_g1h->heap_region_containing(objAddr)->is_on_free_list(), "invariant" ); |
342 | 2982 tmp_guarantee_CM( !_g1h->is_obj_ill(obj), "invariant" ); |
2983 tmp_guarantee_CM( _nextMarkBitMap->isMarked(objAddr), "invariant" ); | |
2984 | |
2985 if (_cm->verbose_high()) | |
2986 gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj); | |
2987 | |
2988 if (!_task_queue->push(obj)) { | |
2989 // The local task queue looks full. We need to push some entries | |
2990 // to the global stack. | |
2991 | |
2992 if (_cm->verbose_medium()) | |
2993 gclog_or_tty->print_cr("[%d] task queue overflow, " | |
2994 "moving entries to the global stack", | |
2995 _task_id); | |
2996 move_entries_to_global_stack(); | |
2997 | |
2998 // this should succeed since, even if we overflow the global | |
2999 // stack, we should have definitely removed some entries from the | |
3000 // local queue. So, there must be space on it. | |
3001 bool success = _task_queue->push(obj); | |
3002 tmp_guarantee_CM( success, "invariant" ); | |
3003 } | |
3004 | |
3005 statsOnly( int tmp_size = _task_queue->size(); | |
3006 if (tmp_size > _local_max_size) | |
3007 _local_max_size = tmp_size; | |
3008 ++_local_pushes ); | |
3009 } | |
3010 | |
3011 void CMTask::reached_limit() { | |
3012 tmp_guarantee_CM( _words_scanned >= _words_scanned_limit || | |
3013 _refs_reached >= _refs_reached_limit , | |
3014 "shouldn't have been called otherwise" ); | |
3015 regular_clock_call(); | |
3016 } | |
3017 | |
3018 void CMTask::regular_clock_call() { | |
3019 if (has_aborted()) | |
3020 return; | |
3021 | |
3022 // First, we need to recalculate the words scanned and refs reached | |
3023 // limits for the next clock call. | |
3024 recalculate_limits(); | |
3025 | |
3026 // During the regular clock call we do the following | |
3027 | |
3028 // (1) If an overflow has been flagged, then we abort. | |
3029 if (_cm->has_overflown()) { | |
3030 set_has_aborted(); | |
3031 return; | |
3032 } | |
3033 | |
3034 // If we are not concurrent (i.e. we're doing remark) we don't need | |
3035 // to check anything else. The other steps are only needed during | |
3036 // the concurrent marking phase. | |
3037 if (!concurrent()) | |
3038 return; | |
3039 | |
3040 // (2) If marking has been aborted for Full GC, then we also abort. | |
3041 if (_cm->has_aborted()) { | |
3042 set_has_aborted(); | |
3043 statsOnly( ++_aborted_cm_aborted ); | |
3044 return; | |
3045 } | |
3046 | |
3047 double curr_time_ms = os::elapsedVTime() * 1000.0; | |
3048 | |
3049 // (3) If marking stats are enabled, then we update the step history. | |
3050 #if _MARKING_STATS_ | |
3051 if (_words_scanned >= _words_scanned_limit) | |
3052 ++_clock_due_to_scanning; | |
3053 if (_refs_reached >= _refs_reached_limit) | |
3054 ++_clock_due_to_marking; | |
3055 | |
3056 double last_interval_ms = curr_time_ms - _interval_start_time_ms; | |
3057 _interval_start_time_ms = curr_time_ms; | |
3058 _all_clock_intervals_ms.add(last_interval_ms); | |
3059 | |
3060 if (_cm->verbose_medium()) { | |
3061 gclog_or_tty->print_cr("[%d] regular clock, interval = %1.2lfms, " | |
3062 "scanned = %d%s, refs reached = %d%s", | |
3063 _task_id, last_interval_ms, | |
3064 _words_scanned, | |
3065 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", | |
3066 _refs_reached, | |
3067 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); | |
3068 } | |
3069 #endif // _MARKING_STATS_ | |
3070 | |
3071 // (4) We check whether we should yield. If we have to, then we abort. | |
3072 if (_cm->should_yield()) { | |
3073 // We should yield. To do this we abort the task. The caller is | |
3074 // responsible for yielding. | |
3075 set_has_aborted(); | |
3076 statsOnly( ++_aborted_yield ); | |
3077 return; | |
3078 } | |
3079 | |
3080 // (5) We check whether we've reached our time quota. If we have, | |
3081 // then we abort. | |
3082 double elapsed_time_ms = curr_time_ms - _start_time_ms; | |
3083 if (elapsed_time_ms > _time_target_ms) { | |
3084 set_has_aborted(); | |
3085 _has_aborted_timed_out = true; | |
3086 statsOnly( ++_aborted_timed_out ); | |
3087 return; | |
3088 } | |
3089 | |
3090 // (6) Finally, we check whether there are enough completed STAB | |
3091 // buffers available for processing. If there are, we abort. | |
3092 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); | |
3093 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { | |
3094 if (_cm->verbose_low()) | |
3095 gclog_or_tty->print_cr("[%d] aborting to deal with pending SATB buffers", | |
3096 _task_id); | |
3097 // we do need to process SATB buffers, we'll abort and restart | |
3098 // the marking task to do so | |
3099 set_has_aborted(); | |
3100 statsOnly( ++_aborted_satb ); | |
3101 return; | |
3102 } | |
3103 } | |
3104 | |
3105 void CMTask::recalculate_limits() { | |
3106 _real_words_scanned_limit = _words_scanned + words_scanned_period; | |
3107 _words_scanned_limit = _real_words_scanned_limit; | |
3108 | |
3109 _real_refs_reached_limit = _refs_reached + refs_reached_period; | |
3110 _refs_reached_limit = _real_refs_reached_limit; | |
3111 } | |
3112 | |
3113 void CMTask::decrease_limits() { | |
3114 // This is called when we believe that we're going to do an infrequent | |
3115 // operation which will increase the per byte scanned cost (i.e. move | |
3116 // entries to/from the global stack). It basically tries to decrease the | |
3117 // scanning limit so that the clock is called earlier. | |
3118 | |
3119 if (_cm->verbose_medium()) | |
3120 gclog_or_tty->print_cr("[%d] decreasing limits", _task_id); | |
3121 | |
3122 _words_scanned_limit = _real_words_scanned_limit - | |
3123 3 * words_scanned_period / 4; | |
3124 _refs_reached_limit = _real_refs_reached_limit - | |
3125 3 * refs_reached_period / 4; | |
3126 } | |
3127 | |
3128 void CMTask::move_entries_to_global_stack() { | |
3129 // local array where we'll store the entries that will be popped | |
3130 // from the local queue | |
3131 oop buffer[global_stack_transfer_size]; | |
3132 | |
3133 int n = 0; | |
3134 oop obj; | |
3135 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { | |
3136 buffer[n] = obj; | |
3137 ++n; | |
3138 } | |
3139 | |
3140 if (n > 0) { | |
3141 // we popped at least one entry from the local queue | |
3142 | |
3143 statsOnly( ++_global_transfers_to; _local_pops += n ); | |
3144 | |
3145 if (!_cm->mark_stack_push(buffer, n)) { | |
3146 if (_cm->verbose_low()) | |
3147 gclog_or_tty->print_cr("[%d] aborting due to global stack overflow", _task_id); | |
3148 set_has_aborted(); | |
3149 } else { | |
3150 // the transfer was successful | |
3151 | |
3152 if (_cm->verbose_medium()) | |
3153 gclog_or_tty->print_cr("[%d] pushed %d entries to the global stack", | |
3154 _task_id, n); | |
3155 statsOnly( int tmp_size = _cm->mark_stack_size(); | |
3156 if (tmp_size > _global_max_size) | |
3157 _global_max_size = tmp_size; | |
3158 _global_pushes += n ); | |
3159 } | |
3160 } | |
3161 | |
3162 // this operation was quite expensive, so decrease the limits | |
3163 decrease_limits(); | |
3164 } | |
3165 | |
3166 void CMTask::get_entries_from_global_stack() { | |
3167 // local array where we'll store the entries that will be popped | |
3168 // from the global stack. | |
3169 oop buffer[global_stack_transfer_size]; | |
3170 int n; | |
3171 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); | |
3172 tmp_guarantee_CM( n <= global_stack_transfer_size, | |
3173 "we should not pop more than the given limit" ); | |
3174 if (n > 0) { | |
3175 // yes, we did actually pop at least one entry | |
3176 | |
3177 statsOnly( ++_global_transfers_from; _global_pops += n ); | |
3178 if (_cm->verbose_medium()) | |
3179 gclog_or_tty->print_cr("[%d] popped %d entries from the global stack", | |
3180 _task_id, n); | |
3181 for (int i = 0; i < n; ++i) { | |
3182 bool success = _task_queue->push(buffer[i]); | |
3183 // We only call this when the local queue is empty or under a | |
3184 // given target limit. So, we do not expect this push to fail. | |
3185 tmp_guarantee_CM( success, "invariant" ); | |
3186 } | |
3187 | |
3188 statsOnly( int tmp_size = _task_queue->size(); | |
3189 if (tmp_size > _local_max_size) | |
3190 _local_max_size = tmp_size; | |
3191 _local_pushes += n ); | |
3192 } | |
3193 | |
3194 // this operation was quite expensive, so decrease the limits | |
3195 decrease_limits(); | |
3196 } | |
3197 | |
3198 void CMTask::drain_local_queue(bool partially) { | |
3199 if (has_aborted()) | |
3200 return; | |
3201 | |
3202 // Decide what the target size is, depending whether we're going to | |
3203 // drain it partially (so that other tasks can steal if they run out | |
3204 // of things to do) or totally (at the very end). | |
3205 size_t target_size; | |
3206 if (partially) | |
3207 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); | |
3208 else | |
3209 target_size = 0; | |
3210 | |
3211 if (_task_queue->size() > target_size) { | |
3212 if (_cm->verbose_high()) | |
3213 gclog_or_tty->print_cr("[%d] draining local queue, target size = %d", | |
3214 _task_id, target_size); | |
3215 | |
3216 oop obj; | |
3217 bool ret = _task_queue->pop_local(obj); | |
3218 while (ret) { | |
3219 statsOnly( ++_local_pops ); | |
3220 | |
3221 if (_cm->verbose_high()) | |
3222 gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id, | |
3223 (void*) obj); | |
3224 | |
3225 tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) obj), | |
3226 "invariant" ); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
3227 tmp_guarantee_CM( !_g1h->heap_region_containing(obj)->is_on_free_list(), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
831
diff
changeset
|
3228 "invariant" ); |
342 | 3229 |
3230 scan_object(obj); | |
3231 | |
3232 if (_task_queue->size() <= target_size || has_aborted()) | |
3233 ret = false; | |
3234 else | |
3235 ret = _task_queue->pop_local(obj); | |
3236 } | |
3237 | |
3238 if (_cm->verbose_high()) | |
3239 gclog_or_tty->print_cr("[%d] drained local queue, size = %d", | |
3240 _task_id, _task_queue->size()); | |
3241 } | |
3242 } | |
3243 | |
3244 void CMTask::drain_global_stack(bool partially) { | |
3245 if (has_aborted()) | |
3246 return; | |
3247 | |
3248 // We have a policy to drain the local queue before we attempt to | |
3249 // drain the global stack. | |
3250 tmp_guarantee_CM( partially || _task_queue->size() == 0, "invariant" ); | |
3251 | |
3252 // Decide what the target size is, depending whether we're going to | |
3253 // drain it partially (so that other tasks can steal if they run out | |
3254 // of things to do) or totally (at the very end). Notice that, | |
3255 // because we move entries from the global stack in chunks or | |
3256 // because another task might be doing the same, we might in fact | |
3257 // drop below the target. But, this is not a problem. | |
3258 size_t target_size; | |
3259 if (partially) | |
3260 target_size = _cm->partial_mark_stack_size_target(); | |
3261 else | |
3262 target_size = 0; | |
3263 | |
3264 if (_cm->mark_stack_size() > target_size) { | |
3265 if (_cm->verbose_low()) | |
3266 gclog_or_tty->print_cr("[%d] draining global_stack, target size %d", | |
3267 _task_id, target_size); | |
3268 | |
3269 while (!has_aborted() && _cm->mark_stack_size() > target_size) { | |
3270 get_entries_from_global_stack(); | |
3271 drain_local_queue(partially); | |
3272 } | |
3273 | |
3274 if (_cm->verbose_low()) | |
3275 gclog_or_tty->print_cr("[%d] drained global stack, size = %d", | |
3276 _task_id, _cm->mark_stack_size()); | |
3277 } | |
3278 } | |
3279 | |
3280 // SATB Queue has several assumptions on whether to call the par or | |
3281 // non-par versions of the methods. this is why some of the code is | |
3282 // replicated. We should really get rid of the single-threaded version | |
3283 // of the code to simplify things. | |
3284 void CMTask::drain_satb_buffers() { | |
3285 if (has_aborted()) | |
3286 return; | |
3287 | |
3288 // We set this so that the regular clock knows that we're in the | |
3289 // middle of draining buffers and doesn't set the abort flag when it | |
3290 // notices that SATB buffers are available for draining. It'd be | |
3291 // very counter productive if it did that. :-) | |
3292 _draining_satb_buffers = true; | |
3293 | |
3294 CMObjectClosure oc(this); | |
3295 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); | |
3296 if (ParallelGCThreads > 0) | |
3297 satb_mq_set.set_par_closure(_task_id, &oc); | |
3298 else | |
3299 satb_mq_set.set_closure(&oc); | |
3300 | |
3301 // This keeps claiming and applying the closure to completed buffers | |
3302 // until we run out of buffers or we need to abort. | |
3303 if (ParallelGCThreads > 0) { | |
3304 while (!has_aborted() && | |
3305 satb_mq_set.par_apply_closure_to_completed_buffer(_task_id)) { | |
3306 if (_cm->verbose_medium()) | |
3307 gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id); | |
3308 statsOnly( ++_satb_buffers_processed ); | |
3309 regular_clock_call(); | |
3310 } | |
3311 } else { | |
3312 while (!has_aborted() && | |
3313 satb_mq_set.apply_closure_to_completed_buffer()) { | |
3314 if (_cm->verbose_medium()) | |
3315 gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id); | |
3316 statsOnly( ++_satb_buffers_processed ); | |
3317 regular_clock_call(); | |
3318 } | |
3319 } | |
3320 | |
3321 if (!concurrent() && !has_aborted()) { | |
3322 // We should only do this during remark. | |
3323 if (ParallelGCThreads > 0) | |
3324 satb_mq_set.par_iterate_closure_all_threads(_task_id); | |
3325 else | |
3326 satb_mq_set.iterate_closure_all_threads(); | |
3327 } | |
3328 | |
3329 _draining_satb_buffers = false; | |
3330 | |
3331 tmp_guarantee_CM( has_aborted() || | |
3332 concurrent() || | |
3333 satb_mq_set.completed_buffers_num() == 0, "invariant" ); | |
3334 | |
3335 if (ParallelGCThreads > 0) | |
3336 satb_mq_set.set_par_closure(_task_id, NULL); | |
3337 else | |
3338 satb_mq_set.set_closure(NULL); | |
3339 | |
3340 // again, this was a potentially expensive operation, decrease the | |
3341 // limits to get the regular clock call early | |
3342 decrease_limits(); | |
3343 } | |
3344 | |
3345 void CMTask::drain_region_stack(BitMapClosure* bc) { | |
3346 if (has_aborted()) | |
3347 return; | |
3348 | |
3349 tmp_guarantee_CM( _region_finger == NULL, | |
3350 "it should be NULL when we're not scanning a region" ); | |
3351 | |
3352 if (!_cm->region_stack_empty()) { | |
3353 if (_cm->verbose_low()) | |
3354 gclog_or_tty->print_cr("[%d] draining region stack, size = %d", | |
3355 _task_id, _cm->region_stack_size()); | |
3356 | |
3357 MemRegion mr = _cm->region_stack_pop(); | |
3358 // it returns MemRegion() if the pop fails | |
3359 statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); | |
3360 | |
3361 while (mr.start() != NULL) { | |
3362 if (_cm->verbose_medium()) | |
3363 gclog_or_tty->print_cr("[%d] we are scanning region " | |
3364 "["PTR_FORMAT", "PTR_FORMAT")", | |
3365 _task_id, mr.start(), mr.end()); | |
3366 tmp_guarantee_CM( mr.end() <= _cm->finger(), | |
3367 "otherwise the region shouldn't be on the stack" ); | |
3368 assert(!mr.is_empty(), "Only non-empty regions live on the region stack"); | |
3369 if (_nextMarkBitMap->iterate(bc, mr)) { | |
3370 tmp_guarantee_CM( !has_aborted(), | |
3371 "cannot abort the task without aborting the bitmap iteration" ); | |
3372 | |
3373 // We finished iterating over the region without aborting. | |
3374 regular_clock_call(); | |
3375 if (has_aborted()) | |
3376 mr = MemRegion(); | |
3377 else { | |
3378 mr = _cm->region_stack_pop(); | |
3379 // it returns MemRegion() if the pop fails | |
3380 statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); | |
3381 } | |
3382 } else { | |
3383 guarantee( has_aborted(), "currently the only way to do so" ); | |
3384 | |
3385 // The only way to abort the bitmap iteration is to return | |
3386 // false from the do_bit() method. However, inside the | |
3387 // do_bit() method we move the _region_finger to point to the | |
3388 // object currently being looked at. So, if we bail out, we | |
3389 // have definitely set _region_finger to something non-null. | |
3390 guarantee( _region_finger != NULL, "invariant" ); | |
3391 | |
3392 // The iteration was actually aborted. So now _region_finger | |
3393 // points to the address of the object we last scanned. If we | |
3394 // leave it there, when we restart this task, we will rescan | |
3395 // the object. It is easy to avoid this. We move the finger by | |
3396 // enough to point to the next possible object header (the | |
3397 // bitmap knows by how much we need to move it as it knows its | |
3398 // granularity). | |
3399 MemRegion newRegion = | |
3400 MemRegion(_nextMarkBitMap->nextWord(_region_finger), mr.end()); | |
3401 | |
3402 if (!newRegion.is_empty()) { | |
3403 if (_cm->verbose_low()) { | |
3404 gclog_or_tty->print_cr("[%d] pushing unscanned region" | |
3405 "[" PTR_FORMAT "," PTR_FORMAT ") on region stack", | |
3406 _task_id, | |
3407 newRegion.start(), newRegion.end()); | |
3408 } | |
3409 // Now push the part of the region we didn't scan on the | |
3410 // region stack to make sure a task scans it later. | |
3411 _cm->region_stack_push(newRegion); | |
3412 } | |
3413 // break from while | |
3414 mr = MemRegion(); | |
3415 } | |
3416 _region_finger = NULL; | |
3417 } | |
3418 | |
3419 if (_cm->verbose_low()) | |
3420 gclog_or_tty->print_cr("[%d] drained region stack, size = %d", | |
3421 _task_id, _cm->region_stack_size()); | |
3422 } | |
3423 } | |
3424 | |
3425 void CMTask::print_stats() { | |
3426 gclog_or_tty->print_cr("Marking Stats, task = %d, calls = %d", | |
3427 _task_id, _calls); | |
3428 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", | |
3429 _elapsed_time_ms, _termination_time_ms); | |
3430 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", | |
3431 _step_times_ms.num(), _step_times_ms.avg(), | |
3432 _step_times_ms.sd()); | |
3433 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", | |
3434 _step_times_ms.maximum(), _step_times_ms.sum()); | |
3435 | |
3436 #if _MARKING_STATS_ | |
3437 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", | |
3438 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), | |
3439 _all_clock_intervals_ms.sd()); | |
3440 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", | |
3441 _all_clock_intervals_ms.maximum(), | |
3442 _all_clock_intervals_ms.sum()); | |
3443 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", | |
3444 _clock_due_to_scanning, _clock_due_to_marking); | |
3445 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", | |
3446 _objs_scanned, _objs_found_on_bitmap); | |
3447 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", | |
3448 _local_pushes, _local_pops, _local_max_size); | |
3449 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", | |
3450 _global_pushes, _global_pops, _global_max_size); | |
3451 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", | |
3452 _global_transfers_to,_global_transfers_from); | |
3453 gclog_or_tty->print_cr(" Regions: claimed = %d, Region Stack: pops = %d", | |
3454 _regions_claimed, _region_stack_pops); | |
3455 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); | |
3456 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", | |
3457 _steal_attempts, _steals); | |
3458 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); | |
3459 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", | |
3460 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); | |
3461 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", | |
3462 _aborted_timed_out, _aborted_satb, _aborted_termination); | |
3463 #endif // _MARKING_STATS_ | |
3464 } | |
3465 | |
3466 /***************************************************************************** | |
3467 | |
3468 The do_marking_step(time_target_ms) method is the building block | |
3469 of the parallel marking framework. It can be called in parallel | |
3470 with other invocations of do_marking_step() on different tasks | |
3471 (but only one per task, obviously) and concurrently with the | |
3472 mutator threads, or during remark, hence it eliminates the need | |
3473 for two versions of the code. When called during remark, it will | |
3474 pick up from where the task left off during the concurrent marking | |
3475 phase. Interestingly, tasks are also claimable during evacuation | |
3476 pauses too, since do_marking_step() ensures that it aborts before | |
3477 it needs to yield. | |
3478 | |
3479 The data structures that is uses to do marking work are the | |
3480 following: | |
3481 | |
3482 (1) Marking Bitmap. If there are gray objects that appear only | |
3483 on the bitmap (this happens either when dealing with an overflow | |
3484 or when the initial marking phase has simply marked the roots | |
3485 and didn't push them on the stack), then tasks claim heap | |
3486 regions whose bitmap they then scan to find gray objects. A | |
3487 global finger indicates where the end of the last claimed region | |
3488 is. A local finger indicates how far into the region a task has | |
3489 scanned. The two fingers are used to determine how to gray an | |
3490 object (i.e. whether simply marking it is OK, as it will be | |
3491 visited by a task in the future, or whether it needs to be also | |
3492 pushed on a stack). | |
3493 | |
3494 (2) Local Queue. The local queue of the task which is accessed | |
3495 reasonably efficiently by the task. Other tasks can steal from | |
3496 it when they run out of work. Throughout the marking phase, a | |
3497 task attempts to keep its local queue short but not totally | |
3498 empty, so that entries are available for stealing by other | |
3499 tasks. Only when there is no more work, a task will totally | |
3500 drain its local queue. | |
3501 | |
3502 (3) Global Mark Stack. This handles local queue overflow. During | |
3503 marking only sets of entries are moved between it and the local | |
3504 queues, as access to it requires a mutex and more fine-grain | |
3505 interaction with it which might cause contention. If it | |
3506 overflows, then the marking phase should restart and iterate | |
3507 over the bitmap to identify gray objects. Throughout the marking | |
3508 phase, tasks attempt to keep the global mark stack at a small | |
3509 length but not totally empty, so that entries are available for | |
3510 popping by other tasks. Only when there is no more work, tasks | |
3511 will totally drain the global mark stack. | |
3512 | |
3513 (4) Global Region Stack. Entries on it correspond to areas of | |
3514 the bitmap that need to be scanned since they contain gray | |
3515 objects. Pushes on the region stack only happen during | |
3516 evacuation pauses and typically correspond to areas covered by | |
3517 GC LABS. If it overflows, then the marking phase should restart | |
3518 and iterate over the bitmap to identify gray objects. Tasks will | |
3519 try to totally drain the region stack as soon as possible. | |
3520 | |
3521 (5) SATB Buffer Queue. This is where completed SATB buffers are | |
3522 made available. Buffers are regularly removed from this queue | |
3523 and scanned for roots, so that the queue doesn't get too | |
3524 long. During remark, all completed buffers are processed, as | |
3525 well as the filled in parts of any uncompleted buffers. | |
3526 | |
3527 The do_marking_step() method tries to abort when the time target | |
3528 has been reached. There are a few other cases when the | |
3529 do_marking_step() method also aborts: | |
3530 | |
3531 (1) When the marking phase has been aborted (after a Full GC). | |
3532 | |
3533 (2) When a global overflow (either on the global stack or the | |
3534 region stack) has been triggered. Before the task aborts, it | |
3535 will actually sync up with the other tasks to ensure that all | |
3536 the marking data structures (local queues, stacks, fingers etc.) | |
3537 are re-initialised so that when do_marking_step() completes, | |
3538 the marking phase can immediately restart. | |
3539 | |
3540 (3) When enough completed SATB buffers are available. The | |
3541 do_marking_step() method only tries to drain SATB buffers right | |
3542 at the beginning. So, if enough buffers are available, the | |
3543 marking step aborts and the SATB buffers are processed at | |
3544 the beginning of the next invocation. | |
3545 | |
3546 (4) To yield. when we have to yield then we abort and yield | |
3547 right at the end of do_marking_step(). This saves us from a lot | |
3548 of hassle as, by yielding we might allow a Full GC. If this | |
3549 happens then objects will be compacted underneath our feet, the | |
3550 heap might shrink, etc. We save checking for this by just | |
3551 aborting and doing the yield right at the end. | |
3552 | |
3553 From the above it follows that the do_marking_step() method should | |
3554 be called in a loop (or, otherwise, regularly) until it completes. | |
3555 | |
3556 If a marking step completes without its has_aborted() flag being | |
3557 true, it means it has completed the current marking phase (and | |
3558 also all other marking tasks have done so and have all synced up). | |
3559 | |
3560 A method called regular_clock_call() is invoked "regularly" (in | |
3561 sub ms intervals) throughout marking. It is this clock method that | |
3562 checks all the abort conditions which were mentioned above and | |
3563 decides when the task should abort. A work-based scheme is used to | |
3564 trigger this clock method: when the number of object words the | |
3565 marking phase has scanned or the number of references the marking | |
3566 phase has visited reach a given limit. Additional invocations to | |
3567 the method clock have been planted in a few other strategic places | |
3568 too. The initial reason for the clock method was to avoid calling | |
3569 vtime too regularly, as it is quite expensive. So, once it was in | |
3570 place, it was natural to piggy-back all the other conditions on it | |
3571 too and not constantly check them throughout the code. | |
3572 | |
3573 *****************************************************************************/ | |
3574 | |
3575 void CMTask::do_marking_step(double time_target_ms) { | |
3576 guarantee( time_target_ms >= 1.0, "minimum granularity is 1ms" ); | |
3577 guarantee( concurrent() == _cm->concurrent(), "they should be the same" ); | |
3578 | |
3579 guarantee( concurrent() || _cm->region_stack_empty(), | |
3580 "the region stack should have been cleared before remark" ); | |
3581 guarantee( _region_finger == NULL, | |
3582 "this should be non-null only when a region is being scanned" ); | |
3583 | |
3584 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); | |
3585 guarantee( _task_queues != NULL, "invariant" ); | |
3586 guarantee( _task_queue != NULL, "invariant" ); | |
3587 guarantee( _task_queues->queue(_task_id) == _task_queue, "invariant" ); | |
3588 | |
3589 guarantee( !_claimed, | |
3590 "only one thread should claim this task at any one time" ); | |
3591 | |
3592 // OK, this doesn't safeguard again all possible scenarios, as it is | |
3593 // possible for two threads to set the _claimed flag at the same | |
3594 // time. But it is only for debugging purposes anyway and it will | |
3595 // catch most problems. | |
3596 _claimed = true; | |
3597 | |
3598 _start_time_ms = os::elapsedVTime() * 1000.0; | |
3599 statsOnly( _interval_start_time_ms = _start_time_ms ); | |
3600 | |
3601 double diff_prediction_ms = | |
3602 g1_policy->get_new_prediction(&_marking_step_diffs_ms); | |
3603 _time_target_ms = time_target_ms - diff_prediction_ms; | |
3604 | |
3605 // set up the variables that are used in the work-based scheme to | |
3606 // call the regular clock method | |
3607 _words_scanned = 0; | |
3608 _refs_reached = 0; | |
3609 recalculate_limits(); | |
3610 | |
3611 // clear all flags | |
3612 clear_has_aborted(); | |
3613 _has_aborted_timed_out = false; | |
3614 _draining_satb_buffers = false; | |
3615 | |
3616 ++_calls; | |
3617 | |
3618 if (_cm->verbose_low()) | |
3619 gclog_or_tty->print_cr("[%d] >>>>>>>>>> START, call = %d, " | |
3620 "target = %1.2lfms >>>>>>>>>>", | |
3621 _task_id, _calls, _time_target_ms); | |
3622 | |
3623 // Set up the bitmap and oop closures. Anything that uses them is | |
3624 // eventually called from this method, so it is OK to allocate these | |
3625 // statically. | |
3626 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); | |
3627 CMOopClosure oop_closure(_g1h, _cm, this); | |
3628 set_oop_closure(&oop_closure); | |
3629 | |
3630 if (_cm->has_overflown()) { | |
3631 // This can happen if the region stack or the mark stack overflows | |
3632 // during a GC pause and this task, after a yield point, | |
3633 // restarts. We have to abort as we need to get into the overflow | |
3634 // protocol which happens right at the end of this task. | |
3635 set_has_aborted(); | |
3636 } | |
3637 | |
3638 // First drain any available SATB buffers. After this, we will not | |
3639 // look at SATB buffers before the next invocation of this method. | |
3640 // If enough completed SATB buffers are queued up, the regular clock | |
3641 // will abort this task so that it restarts. | |
3642 drain_satb_buffers(); | |
3643 // ...then partially drain the local queue and the global stack | |
3644 drain_local_queue(true); | |
3645 drain_global_stack(true); | |
3646 | |
3647 // Then totally drain the region stack. We will not look at | |
3648 // it again before the next invocation of this method. Entries on | |
3649 // the region stack are only added during evacuation pauses, for | |
3650 // which we have to yield. When we do, we abort the task anyway so | |
3651 // it will look at the region stack again when it restarts. | |
3652 bitmap_closure.set_scanning_heap_region(false); | |
3653 drain_region_stack(&bitmap_closure); | |
3654 // ...then partially drain the local queue and the global stack | |
3655 drain_local_queue(true); | |
3656 drain_global_stack(true); | |
3657 | |
3658 do { | |
3659 if (!has_aborted() && _curr_region != NULL) { | |
3660 // This means that we're already holding on to a region. | |
3661 tmp_guarantee_CM( _finger != NULL, | |
3662 "if region is not NULL, then the finger " | |
3663 "should not be NULL either" ); | |
3664 | |
3665 // We might have restarted this task after an evacuation pause | |
3666 // which might have evacuated the region we're holding on to | |
3667 // underneath our feet. Let's read its limit again to make sure | |
3668 // that we do not iterate over a region of the heap that | |
3669 // contains garbage (update_region_limit() will also move | |
3670 // _finger to the start of the region if it is found empty). | |
3671 update_region_limit(); | |
3672 // We will start from _finger not from the start of the region, | |
3673 // as we might be restarting this task after aborting half-way | |
3674 // through scanning this region. In this case, _finger points to | |
3675 // the address where we last found a marked object. If this is a | |
3676 // fresh region, _finger points to start(). | |
3677 MemRegion mr = MemRegion(_finger, _region_limit); | |
3678 | |
3679 if (_cm->verbose_low()) | |
3680 gclog_or_tty->print_cr("[%d] we're scanning part " | |
3681 "["PTR_FORMAT", "PTR_FORMAT") " | |
3682 "of region "PTR_FORMAT, | |
3683 _task_id, _finger, _region_limit, _curr_region); | |
3684 | |
3685 // Let's iterate over the bitmap of the part of the | |
3686 // region that is left. | |
3687 bitmap_closure.set_scanning_heap_region(true); | |
3688 if (mr.is_empty() || | |
3689 _nextMarkBitMap->iterate(&bitmap_closure, mr)) { | |
3690 // We successfully completed iterating over the region. Now, | |
3691 // let's give up the region. | |
3692 giveup_current_region(); | |
3693 regular_clock_call(); | |
3694 } else { | |
3695 guarantee( has_aborted(), "currently the only way to do so" ); | |
3696 // The only way to abort the bitmap iteration is to return | |
3697 // false from the do_bit() method. However, inside the | |
3698 // do_bit() method we move the _finger to point to the | |
3699 // object currently being looked at. So, if we bail out, we | |
3700 // have definitely set _finger to something non-null. | |
3701 guarantee( _finger != NULL, "invariant" ); | |
3702 | |
3703 // Region iteration was actually aborted. So now _finger | |
3704 // points to the address of the object we last scanned. If we | |
3705 // leave it there, when we restart this task, we will rescan | |
3706 // the object. It is easy to avoid this. We move the finger by | |
3707 // enough to point to the next possible object header (the | |
3708 // bitmap knows by how much we need to move it as it knows its | |
3709 // granularity). | |
3710 move_finger_to(_nextMarkBitMap->nextWord(_finger)); | |
3711 } | |
3712 } | |
3713 // At this point we have either completed iterating over the | |
3714 // region we were holding on to, or we have aborted. | |
3715 | |
3716 // We then partially drain the local queue and the global stack. | |
3717 // (Do we really need this?) | |
3718 drain_local_queue(true); | |
3719 drain_global_stack(true); | |
3720 | |
3721 // Read the note on the claim_region() method on why it might | |
3722 // return NULL with potentially more regions available for | |
3723 // claiming and why we have to check out_of_regions() to determine | |
3724 // whether we're done or not. | |
3725 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { | |
3726 // We are going to try to claim a new region. We should have | |
3727 // given up on the previous one. | |
3728 tmp_guarantee_CM( _curr_region == NULL && | |
3729 _finger == NULL && | |
3730 _region_limit == NULL, "invariant" ); | |
3731 if (_cm->verbose_low()) | |
3732 gclog_or_tty->print_cr("[%d] trying to claim a new region", _task_id); | |
3733 HeapRegion* claimed_region = _cm->claim_region(_task_id); | |
3734 if (claimed_region != NULL) { | |
3735 // Yes, we managed to claim one | |
3736 statsOnly( ++_regions_claimed ); | |
3737 | |
3738 if (_cm->verbose_low()) | |
3739 gclog_or_tty->print_cr("[%d] we successfully claimed " | |
3740 "region "PTR_FORMAT, | |
3741 _task_id, claimed_region); | |
3742 | |
3743 setup_for_region(claimed_region); | |
3744 tmp_guarantee_CM( _curr_region == claimed_region, "invariant" ); | |
3745 } | |
3746 // It is important to call the regular clock here. It might take | |
3747 // a while to claim a region if, for example, we hit a large | |
3748 // block of empty regions. So we need to call the regular clock | |
3749 // method once round the loop to make sure it's called | |
3750 // frequently enough. | |
3751 regular_clock_call(); | |
3752 } | |
3753 | |
3754 if (!has_aborted() && _curr_region == NULL) { | |
3755 tmp_guarantee_CM( _cm->out_of_regions(), | |
3756 "at this point we should be out of regions" ); | |
3757 } | |
3758 } while ( _curr_region != NULL && !has_aborted()); | |
3759 | |
3760 if (!has_aborted()) { | |
3761 // We cannot check whether the global stack is empty, since other | |
343
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
3762 // tasks might be pushing objects to it concurrently. We also cannot |
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
3763 // check if the region stack is empty because if a thread is aborting |
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
3764 // it can push a partially done region back. |
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
3765 tmp_guarantee_CM( _cm->out_of_regions(), |
342 | 3766 "at this point we should be out of regions" ); |
3767 | |
3768 if (_cm->verbose_low()) | |
3769 gclog_or_tty->print_cr("[%d] all regions claimed", _task_id); | |
3770 | |
3771 // Try to reduce the number of available SATB buffers so that | |
3772 // remark has less work to do. | |
3773 drain_satb_buffers(); | |
3774 } | |
3775 | |
3776 // Since we've done everything else, we can now totally drain the | |
3777 // local queue and global stack. | |
3778 drain_local_queue(false); | |
3779 drain_global_stack(false); | |
3780 | |
3781 // Attempt at work stealing from other task's queues. | |
3782 if (!has_aborted()) { | |
3783 // We have not aborted. This means that we have finished all that | |
3784 // we could. Let's try to do some stealing... | |
3785 | |
3786 // We cannot check whether the global stack is empty, since other | |
343
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
3787 // tasks might be pushing objects to it concurrently. We also cannot |
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
3788 // check if the region stack is empty because if a thread is aborting |
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
3789 // it can push a partially done region back. |
342 | 3790 guarantee( _cm->out_of_regions() && |
3791 _task_queue->size() == 0, "only way to reach here" ); | |
3792 | |
3793 if (_cm->verbose_low()) | |
3794 gclog_or_tty->print_cr("[%d] starting to steal", _task_id); | |
3795 | |
3796 while (!has_aborted()) { | |
3797 oop obj; | |
3798 statsOnly( ++_steal_attempts ); | |
3799 | |
3800 if (_cm->try_stealing(_task_id, &_hash_seed, obj)) { | |
3801 if (_cm->verbose_medium()) | |
3802 gclog_or_tty->print_cr("[%d] stolen "PTR_FORMAT" successfully", | |
3803 _task_id, (void*) obj); | |
3804 | |
3805 statsOnly( ++_steals ); | |
3806 | |
3807 tmp_guarantee_CM( _nextMarkBitMap->isMarked((HeapWord*) obj), | |
3808 "any stolen object should be marked" ); | |
3809 scan_object(obj); | |
3810 | |
3811 // And since we're towards the end, let's totally drain the | |
3812 // local queue and global stack. | |
3813 drain_local_queue(false); | |
3814 drain_global_stack(false); | |
3815 } else { | |
3816 break; | |
3817 } | |
3818 } | |
3819 } | |
3820 | |
3821 // We still haven't aborted. Now, let's try to get into the | |
3822 // termination protocol. | |
3823 if (!has_aborted()) { | |
3824 // We cannot check whether the global stack is empty, since other | |
343
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
3825 // tasks might be concurrently pushing objects on it. We also cannot |
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
3826 // check if the region stack is empty because if a thread is aborting |
afc1ce1efe66
6710665: G1: guarantee(_cm->out_of_regions() && _cm->region_stack_empty() && _task_queue->size() == 0, ...)
iveresov
parents:
342
diff
changeset
|
3827 // it can push a partially done region back. |
342 | 3828 guarantee( _cm->out_of_regions() && |
3829 _task_queue->size() == 0, "only way to reach here" ); | |
3830 | |
3831 if (_cm->verbose_low()) | |
3832 gclog_or_tty->print_cr("[%d] starting termination protocol", _task_id); | |
3833 | |
3834 _termination_start_time_ms = os::elapsedVTime() * 1000.0; | |
3835 // The CMTask class also extends the TerminatorTerminator class, | |
3836 // hence its should_exit_termination() method will also decide | |
3837 // whether to exit the termination protocol or not. | |
3838 bool finished = _cm->terminator()->offer_termination(this); | |
3839 double termination_end_time_ms = os::elapsedVTime() * 1000.0; | |
3840 _termination_time_ms += | |
3841 termination_end_time_ms - _termination_start_time_ms; | |
3842 | |
3843 if (finished) { | |
3844 // We're all done. | |
3845 | |
3846 if (_task_id == 0) { | |
3847 // let's allow task 0 to do this | |
3848 if (concurrent()) { | |
3849 guarantee( _cm->concurrent_marking_in_progress(), "invariant" ); | |
3850 // we need to set this to false before the next | |
3851 // safepoint. This way we ensure that the marking phase | |
3852 // doesn't observe any more heap expansions. | |
3853 _cm->clear_concurrent_marking_in_progress(); | |
3854 } | |
3855 } | |
3856 | |
3857 // We can now guarantee that the global stack is empty, since | |
3858 // all other tasks have finished. | |
3859 guarantee( _cm->out_of_regions() && | |
3860 _cm->region_stack_empty() && | |
3861 _cm->mark_stack_empty() && | |
3862 _task_queue->size() == 0 && | |
3863 !_cm->has_overflown() && | |
3864 !_cm->mark_stack_overflow() && | |
3865 !_cm->region_stack_overflow(), | |
3866 "only way to reach here" ); | |
3867 | |
3868 if (_cm->verbose_low()) | |
3869 gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id); | |
3870 } else { | |
3871 // Apparently there's more work to do. Let's abort this task. It | |
3872 // will restart it and we can hopefully find more things to do. | |
3873 | |
3874 if (_cm->verbose_low()) | |
3875 gclog_or_tty->print_cr("[%d] apparently there is more work to do", _task_id); | |
3876 | |
3877 set_has_aborted(); | |
3878 statsOnly( ++_aborted_termination ); | |
3879 } | |
3880 } | |
3881 | |
3882 // Mainly for debugging purposes to make sure that a pointer to the | |
3883 // closure which was statically allocated in this frame doesn't | |
3884 // escape it by accident. | |
3885 set_oop_closure(NULL); | |
3886 double end_time_ms = os::elapsedVTime() * 1000.0; | |
3887 double elapsed_time_ms = end_time_ms - _start_time_ms; | |
3888 // Update the step history. | |
3889 _step_times_ms.add(elapsed_time_ms); | |
3890 | |
3891 if (has_aborted()) { | |
3892 // The task was aborted for some reason. | |
3893 | |
3894 statsOnly( ++_aborted ); | |
3895 | |
3896 if (_has_aborted_timed_out) { | |
3897 double diff_ms = elapsed_time_ms - _time_target_ms; | |
3898 // Keep statistics of how well we did with respect to hitting | |
3899 // our target only if we actually timed out (if we aborted for | |
3900 // other reasons, then the results might get skewed). | |
3901 _marking_step_diffs_ms.add(diff_ms); | |
3902 } | |
3903 | |
3904 if (_cm->has_overflown()) { | |
3905 // This is the interesting one. We aborted because a global | |
3906 // overflow was raised. This means we have to restart the | |
3907 // marking phase and start iterating over regions. However, in | |
3908 // order to do this we have to make sure that all tasks stop | |
3909 // what they are doing and re-initialise in a safe manner. We | |
3910 // will achieve this with the use of two barrier sync points. | |
3911 | |
3912 if (_cm->verbose_low()) | |
3913 gclog_or_tty->print_cr("[%d] detected overflow", _task_id); | |
3914 | |
3915 _cm->enter_first_sync_barrier(_task_id); | |
3916 // When we exit this sync barrier we know that all tasks have | |
3917 // stopped doing marking work. So, it's now safe to | |
3918 // re-initialise our data structures. At the end of this method, | |
3919 // task 0 will clear the global data structures. | |
3920 | |
3921 statsOnly( ++_aborted_overflow ); | |
3922 | |
3923 // We clear the local state of this task... | |
3924 clear_region_fields(); | |
3925 | |
3926 // ...and enter the second barrier. | |
3927 _cm->enter_second_sync_barrier(_task_id); | |
3928 // At this point everything has bee re-initialised and we're | |
3929 // ready to restart. | |
3930 } | |
3931 | |
3932 if (_cm->verbose_low()) { | |
3933 gclog_or_tty->print_cr("[%d] <<<<<<<<<< ABORTING, target = %1.2lfms, " | |
3934 "elapsed = %1.2lfms <<<<<<<<<<", | |
3935 _task_id, _time_target_ms, elapsed_time_ms); | |
3936 if (_cm->has_aborted()) | |
3937 gclog_or_tty->print_cr("[%d] ========== MARKING ABORTED ==========", | |
3938 _task_id); | |
3939 } | |
3940 } else { | |
3941 if (_cm->verbose_low()) | |
3942 gclog_or_tty->print_cr("[%d] <<<<<<<<<< FINISHED, target = %1.2lfms, " | |
3943 "elapsed = %1.2lfms <<<<<<<<<<", | |
3944 _task_id, _time_target_ms, elapsed_time_ms); | |
3945 } | |
3946 | |
3947 _claimed = false; | |
3948 } | |
3949 | |
3950 CMTask::CMTask(int task_id, | |
3951 ConcurrentMark* cm, | |
3952 CMTaskQueue* task_queue, | |
3953 CMTaskQueueSet* task_queues) | |
3954 : _g1h(G1CollectedHeap::heap()), | |
3955 _task_id(task_id), _cm(cm), | |
3956 _claimed(false), | |
3957 _nextMarkBitMap(NULL), _hash_seed(17), | |
3958 _task_queue(task_queue), | |
3959 _task_queues(task_queues), | |
3960 _oop_closure(NULL) { | |
3961 guarantee( task_queue != NULL, "invariant" ); | |
3962 guarantee( task_queues != NULL, "invariant" ); | |
3963 | |
3964 statsOnly( _clock_due_to_scanning = 0; | |
3965 _clock_due_to_marking = 0 ); | |
3966 | |
3967 _marking_step_diffs_ms.add(0.5); | |
3968 } |