Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 7397:442f942757c0
8000244: G1: Ergonomically set MarkStackSize and use virtual space for global marking stack
Summary: Set the value of MarkStackSize to a value based on the number of parallel marking threads with a reasonable minimum. Expand the marking stack if we have to restart marking due to an overflow up to a reasonable maximum. Allocate the underlying space for the marking stack from virtual memory.
Reviewed-by: jmasa, brutisso
author | johnc |
---|---|
date | Mon, 01 Oct 2012 09:28:13 -0700 |
parents | 4202510ee0fe |
children | d275c3dc73e6 |
comparison
equal
deleted
inserted
replaced
7208:eade6b2e4782 | 7397:442f942757c0 |
---|---|
44 #include "runtime/java.hpp" | 44 #include "runtime/java.hpp" |
45 #include "services/memTracker.hpp" | 45 #include "services/memTracker.hpp" |
46 | 46 |
47 // Concurrent marking bit map wrapper | 47 // Concurrent marking bit map wrapper |
48 | 48 |
49 CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter) : | 49 CMBitMapRO::CMBitMapRO(int shifter) : |
50 _bm((uintptr_t*)NULL,0), | 50 _bm(), |
51 _shifter(shifter) { | 51 _shifter(shifter) { |
52 _bmStartWord = (HeapWord*)(rs.base()); | 52 _bmStartWord = 0; |
53 _bmWordSize = rs.size()/HeapWordSize; // rs.size() is in bytes | 53 _bmWordSize = 0; |
54 ReservedSpace brs(ReservedSpace::allocation_align_size_up( | |
55 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); | |
56 | |
57 MemTracker::record_virtual_memory_type((address)brs.base(), mtGC); | |
58 | |
59 guarantee(brs.is_reserved(), "couldn't allocate concurrent marking bit map"); | |
60 // For now we'll just commit all of the bit map up fromt. | |
61 // Later on we'll try to be more parsimonious with swap. | |
62 guarantee(_virtual_space.initialize(brs, brs.size()), | |
63 "couldn't reseve backing store for concurrent marking bit map"); | |
64 assert(_virtual_space.committed_size() == brs.size(), | |
65 "didn't reserve backing store for all of concurrent marking bit map?"); | |
66 _bm.set_map((uintptr_t*)_virtual_space.low()); | |
67 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= | |
68 _bmWordSize, "inconsistency in bit map sizing"); | |
69 _bm.set_size(_bmWordSize >> _shifter); | |
70 } | 54 } |
71 | 55 |
72 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr, | 56 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr, |
73 HeapWord* limit) const { | 57 HeapWord* limit) const { |
74 // First we must round addr *up* to a possible object boundary. | 58 // First we must round addr *up* to a possible object boundary. |
106 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); | 90 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); |
107 return (int) (diff >> _shifter); | 91 return (int) (diff >> _shifter); |
108 } | 92 } |
109 | 93 |
110 #ifndef PRODUCT | 94 #ifndef PRODUCT |
111 bool CMBitMapRO::covers(ReservedSpace rs) const { | 95 bool CMBitMapRO::covers(ReservedSpace heap_rs) const { |
112 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); | 96 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); |
113 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, | 97 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, |
114 "size inconsistency"); | 98 "size inconsistency"); |
115 return _bmStartWord == (HeapWord*)(rs.base()) && | 99 return _bmStartWord == (HeapWord*)(heap_rs.base()) && |
116 _bmWordSize == rs.size()>>LogHeapWordSize; | 100 _bmWordSize == heap_rs.size()>>LogHeapWordSize; |
117 } | 101 } |
118 #endif | 102 #endif |
103 | |
104 bool CMBitMap::allocate(ReservedSpace heap_rs) { | |
105 _bmStartWord = (HeapWord*)(heap_rs.base()); | |
106 _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes | |
107 ReservedSpace brs(ReservedSpace::allocation_align_size_up( | |
108 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); | |
109 if (!brs.is_reserved()) { | |
110 warning("ConcurrentMark marking bit map allocation failure"); | |
111 return false; | |
112 } | |
113 MemTracker::record_virtual_memory_type((address)brs.base(), mtGC); | |
114 // For now we'll just commit all of the bit map up front. | |
115 // Later on we'll try to be more parsimonious with swap. | |
116 if (!_virtual_space.initialize(brs, brs.size())) { | |
117 warning("ConcurrentMark marking bit map backing store failure"); | |
118 return false; | |
119 } | |
120 assert(_virtual_space.committed_size() == brs.size(), | |
121 "didn't reserve backing store for all of concurrent marking bit map?"); | |
122 _bm.set_map((uintptr_t*)_virtual_space.low()); | |
123 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= | |
124 _bmWordSize, "inconsistency in bit map sizing"); | |
125 _bm.set_size(_bmWordSize >> _shifter); | |
126 return true; | |
127 } | |
119 | 128 |
120 void CMBitMap::clearAll() { | 129 void CMBitMap::clearAll() { |
121 _bm.clear(); | 130 _bm.clear(); |
122 return; | 131 return; |
123 } | 132 } |
161 , _drain_in_progress(false) | 170 , _drain_in_progress(false) |
162 , _drain_in_progress_yields(false) | 171 , _drain_in_progress_yields(false) |
163 #endif | 172 #endif |
164 {} | 173 {} |
165 | 174 |
166 void CMMarkStack::allocate(size_t size) { | 175 bool CMMarkStack::allocate(size_t capacity) { |
167 _base = NEW_C_HEAP_ARRAY(oop, size, mtGC); | 176 // allocate a stack of the requisite depth |
168 if (_base == NULL) { | 177 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); |
169 vm_exit_during_initialization("Failed to allocate CM region mark stack"); | 178 if (!rs.is_reserved()) { |
170 } | 179 warning("ConcurrentMark MarkStack allocation failure"); |
171 _index = 0; | 180 return false; |
172 _capacity = (jint) size; | 181 } |
182 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); | |
183 if (!_virtual_space.initialize(rs, rs.size())) { | |
184 warning("ConcurrentMark MarkStack backing store failure"); | |
185 // Release the virtual memory reserved for the marking stack | |
186 rs.release(); | |
187 return false; | |
188 } | |
189 assert(_virtual_space.committed_size() == rs.size(), | |
190 "Didn't reserve backing store for all of ConcurrentMark stack?"); | |
191 _base = (oop*) _virtual_space.low(); | |
192 setEmpty(); | |
193 _capacity = (jint) capacity; | |
173 _saved_index = -1; | 194 _saved_index = -1; |
174 NOT_PRODUCT(_max_depth = 0); | 195 NOT_PRODUCT(_max_depth = 0); |
196 return true; | |
197 } | |
198 | |
199 void CMMarkStack::expand() { | |
200 // Called, during remark, if we've overflown the marking stack during marking. | |
201 assert(isEmpty(), "stack should been emptied while handling overflow"); | |
202 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); | |
203 // Clear expansion flag | |
204 _should_expand = false; | |
205 if (_capacity == (jint) MarkStackSizeMax) { | |
206 if (PrintGCDetails && Verbose) { | |
207 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); | |
208 } | |
209 return; | |
210 } | |
211 // Double capacity if possible | |
212 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); | |
213 // Do not give up existing stack until we have managed to | |
214 // get the double capacity that we desired. | |
215 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * | |
216 sizeof(oop))); | |
217 if (rs.is_reserved()) { | |
218 // Release the backing store associated with old stack | |
219 _virtual_space.release(); | |
220 // Reinitialize virtual space for new stack | |
221 if (!_virtual_space.initialize(rs, rs.size())) { | |
222 fatal("Not enough swap for expanded marking stack capacity"); | |
223 } | |
224 _base = (oop*)(_virtual_space.low()); | |
225 _index = 0; | |
226 _capacity = new_capacity; | |
227 } else { | |
228 if (PrintGCDetails && Verbose) { | |
229 // Failed to double capacity, continue; | |
230 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " | |
231 SIZE_FORMAT"K to " SIZE_FORMAT"K", | |
232 _capacity / K, new_capacity / K); | |
233 } | |
234 } | |
235 } | |
236 | |
237 void CMMarkStack::set_should_expand() { | |
238 // If we're resetting the marking state because of an | |
239 // marking stack overflow, record that we should, if | |
240 // possible, expand the stack. | |
241 _should_expand = _cm->has_overflown(); | |
175 } | 242 } |
176 | 243 |
177 CMMarkStack::~CMMarkStack() { | 244 CMMarkStack::~CMMarkStack() { |
178 if (_base != NULL) { | 245 if (_base != NULL) { |
179 FREE_C_HEAP_ARRAY(oop, _base, mtGC); | 246 _base = NULL; |
247 _virtual_space.release(); | |
180 } | 248 } |
181 } | 249 } |
182 | 250 |
183 void CMMarkStack::par_push(oop ptr) { | 251 void CMMarkStack::par_push(oop ptr) { |
184 while (true) { | 252 while (true) { |
215 return; | 283 return; |
216 } | 284 } |
217 jint res = Atomic::cmpxchg(next_index, &_index, index); | 285 jint res = Atomic::cmpxchg(next_index, &_index, index); |
218 if (res == index) { | 286 if (res == index) { |
219 for (int i = 0; i < n; i++) { | 287 for (int i = 0; i < n; i++) { |
220 int ind = index + i; | 288 int ind = index + i; |
221 assert(ind < _capacity, "By overflow test above."); | 289 assert(ind < _capacity, "By overflow test above."); |
222 _base[ind] = ptr_arr[i]; | 290 _base[ind] = ptr_arr[i]; |
223 } | 291 } |
224 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); | 292 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); |
225 return; | 293 return; |
226 } | 294 } |
227 // Otherwise, we need to try again. | 295 // Otherwise, we need to try again. |
228 } | 296 } |
229 } | 297 } |
230 | |
231 | 298 |
232 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { | 299 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { |
233 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | 300 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
234 jint start = _index; | 301 jint start = _index; |
235 jint next_index = start + n; | 302 jint next_index = start + n; |
242 for (int i = 0; i < n; i++) { | 309 for (int i = 0; i < n; i++) { |
243 int ind = start + i; | 310 int ind = start + i; |
244 assert(ind < _capacity, "By overflow test above."); | 311 assert(ind < _capacity, "By overflow test above."); |
245 _base[ind] = ptr_arr[i]; | 312 _base[ind] = ptr_arr[i]; |
246 } | 313 } |
247 } | 314 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); |
248 | 315 } |
249 | 316 |
250 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { | 317 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { |
251 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | 318 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
252 jint index = _index; | 319 jint index = _index; |
253 if (index == 0) { | 320 if (index == 0) { |
254 *n = 0; | 321 *n = 0; |
255 return false; | 322 return false; |
256 } else { | 323 } else { |
257 int k = MIN2(max, index); | 324 int k = MIN2(max, index); |
258 jint new_ind = index - k; | 325 jint new_ind = index - k; |
259 for (int j = 0; j < k; j++) { | 326 for (int j = 0; j < k; j++) { |
260 ptr_arr[j] = _base[new_ind + j]; | 327 ptr_arr[j] = _base[new_ind + j]; |
261 } | 328 } |
262 _index = new_ind; | 329 _index = new_ind; |
263 *n = k; | 330 *n = k; |
402 | 469 |
403 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { | 470 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { |
404 return MAX2((n_par_threads + 2) / 4, 1U); | 471 return MAX2((n_par_threads + 2) / 4, 1U); |
405 } | 472 } |
406 | 473 |
407 ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) : | 474 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) : |
408 _markBitMap1(rs, MinObjAlignment - 1), | 475 _g1h(g1h), |
409 _markBitMap2(rs, MinObjAlignment - 1), | 476 _markBitMap1(MinObjAlignment - 1), |
477 _markBitMap2(MinObjAlignment - 1), | |
410 | 478 |
411 _parallel_marking_threads(0), | 479 _parallel_marking_threads(0), |
412 _max_parallel_marking_threads(0), | 480 _max_parallel_marking_threads(0), |
413 _sleep_factor(0.0), | 481 _sleep_factor(0.0), |
414 _marking_task_overhead(1.0), | 482 _marking_task_overhead(1.0), |
415 _cleanup_sleep_factor(0.0), | 483 _cleanup_sleep_factor(0.0), |
416 _cleanup_task_overhead(1.0), | 484 _cleanup_task_overhead(1.0), |
417 _cleanup_list("Cleanup List"), | 485 _cleanup_list("Cleanup List"), |
418 _region_bm((BitMap::idx_t) max_regions, false /* in_resource_area*/), | 486 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), |
419 _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >> | 487 _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >> |
420 CardTableModRefBS::card_shift, | 488 CardTableModRefBS::card_shift, |
421 false /* in_resource_area*/), | 489 false /* in_resource_area*/), |
422 | 490 |
423 _prevMarkBitMap(&_markBitMap1), | 491 _prevMarkBitMap(&_markBitMap1), |
424 _nextMarkBitMap(&_markBitMap2), | 492 _nextMarkBitMap(&_markBitMap2), |
425 | 493 |
426 _markStack(this), | 494 _markStack(this), |
447 _total_rs_scrub_time(0.0), | 515 _total_rs_scrub_time(0.0), |
448 | 516 |
449 _parallel_workers(NULL), | 517 _parallel_workers(NULL), |
450 | 518 |
451 _count_card_bitmaps(NULL), | 519 _count_card_bitmaps(NULL), |
452 _count_marked_bytes(NULL) { | 520 _count_marked_bytes(NULL), |
521 _completed_initialization(false) { | |
453 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; | 522 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; |
454 if (verbose_level < no_verbose) { | 523 if (verbose_level < no_verbose) { |
455 verbose_level = no_verbose; | 524 verbose_level = no_verbose; |
456 } | 525 } |
457 if (verbose_level > high_verbose) { | 526 if (verbose_level > high_verbose) { |
462 if (verbose_low()) { | 531 if (verbose_low()) { |
463 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " | 532 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " |
464 "heap end = "PTR_FORMAT, _heap_start, _heap_end); | 533 "heap end = "PTR_FORMAT, _heap_start, _heap_end); |
465 } | 534 } |
466 | 535 |
467 _markStack.allocate(MarkStackSize); | 536 if (!_markBitMap1.allocate(heap_rs)) { |
537 warning("Failed to allocate first CM bit map"); | |
538 return; | |
539 } | |
540 if (!_markBitMap2.allocate(heap_rs)) { | |
541 warning("Failed to allocate second CM bit map"); | |
542 return; | |
543 } | |
468 | 544 |
469 // Create & start a ConcurrentMark thread. | 545 // Create & start a ConcurrentMark thread. |
470 _cmThread = new ConcurrentMarkThread(this); | 546 _cmThread = new ConcurrentMarkThread(this); |
471 assert(cmThread() != NULL, "CM Thread should have been created"); | 547 assert(cmThread() != NULL, "CM Thread should have been created"); |
472 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); | 548 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); |
473 | 549 |
474 _g1h = G1CollectedHeap::heap(); | |
475 assert(CGC_lock != NULL, "Where's the CGC_lock?"); | 550 assert(CGC_lock != NULL, "Where's the CGC_lock?"); |
476 assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency"); | 551 assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency"); |
477 assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency"); | 552 assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency"); |
478 | 553 |
479 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); | 554 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); |
480 satb_qs.set_buffer_size(G1SATBBufferSize); | 555 satb_qs.set_buffer_size(G1SATBBufferSize); |
481 | 556 |
482 _root_regions.init(_g1h, this); | 557 _root_regions.init(_g1h, this); |
483 | 558 |
484 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); | |
485 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); | |
486 | |
487 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); | |
488 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); | |
489 | |
490 BitMap::idx_t card_bm_size = _card_bm.size(); | |
491 | |
492 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail | |
493 _active_tasks = _max_worker_id; | |
494 for (uint i = 0; i < _max_worker_id; ++i) { | |
495 CMTaskQueue* task_queue = new CMTaskQueue(); | |
496 task_queue->initialize(); | |
497 _task_queues->register_queue(i, task_queue); | |
498 | |
499 _count_card_bitmaps[i] = BitMap(card_bm_size, false); | |
500 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions, mtGC); | |
501 | |
502 _tasks[i] = new CMTask(i, this, | |
503 _count_marked_bytes[i], | |
504 &_count_card_bitmaps[i], | |
505 task_queue, _task_queues); | |
506 | |
507 _accum_task_vtime[i] = 0.0; | |
508 } | |
509 | |
510 // Calculate the card number for the bottom of the heap. Used | |
511 // in biasing indexes into the accounting card bitmaps. | |
512 _heap_bottom_card_num = | |
513 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> | |
514 CardTableModRefBS::card_shift); | |
515 | |
516 // Clear all the liveness counting data | |
517 clear_all_count_data(); | |
518 | |
519 if (ConcGCThreads > ParallelGCThreads) { | 559 if (ConcGCThreads > ParallelGCThreads) { |
520 vm_exit_during_initialization("Can't have more ConcGCThreads " | 560 warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") " |
521 "than ParallelGCThreads."); | 561 "than ParallelGCThreads (" UINT32_FORMAT ").", |
562 ConcGCThreads, ParallelGCThreads); | |
563 return; | |
522 } | 564 } |
523 if (ParallelGCThreads == 0) { | 565 if (ParallelGCThreads == 0) { |
524 // if we are not running with any parallel GC threads we will not | 566 // if we are not running with any parallel GC threads we will not |
525 // spawn any marking threads either | 567 // spawn any marking threads either |
526 _parallel_marking_threads = 0; | 568 _parallel_marking_threads = 0; |
588 } else { | 630 } else { |
589 _parallel_workers->initialize_workers(); | 631 _parallel_workers->initialize_workers(); |
590 } | 632 } |
591 } | 633 } |
592 | 634 |
635 if (FLAG_IS_DEFAULT(MarkStackSize)) { | |
636 uintx mark_stack_size = | |
637 MIN2(MarkStackSizeMax, | |
638 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE))); | |
639 // Verify that the calculated value for MarkStackSize is in range. | |
640 // It would be nice to use the private utility routine from Arguments. | |
641 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { | |
642 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): " | |
643 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, | |
644 mark_stack_size, 1, MarkStackSizeMax); | |
645 return; | |
646 } | |
647 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size); | |
648 } else { | |
649 // Verify MarkStackSize is in range. | |
650 if (FLAG_IS_CMDLINE(MarkStackSize)) { | |
651 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { | |
652 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { | |
653 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): " | |
654 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, | |
655 MarkStackSize, 1, MarkStackSizeMax); | |
656 return; | |
657 } | |
658 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { | |
659 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { | |
660 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")" | |
661 " or for MarkStackSizeMax (" UINTX_FORMAT ")", | |
662 MarkStackSize, MarkStackSizeMax); | |
663 return; | |
664 } | |
665 } | |
666 } | |
667 } | |
668 | |
669 if (!_markStack.allocate(MarkStackSize)) { | |
670 warning("Failed to allocate CM marking stack"); | |
671 return; | |
672 } | |
673 | |
674 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); | |
675 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); | |
676 | |
677 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); | |
678 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); | |
679 | |
680 BitMap::idx_t card_bm_size = _card_bm.size(); | |
681 | |
682 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail | |
683 _active_tasks = _max_worker_id; | |
684 | |
685 size_t max_regions = (size_t) _g1h->max_regions(); | |
686 for (uint i = 0; i < _max_worker_id; ++i) { | |
687 CMTaskQueue* task_queue = new CMTaskQueue(); | |
688 task_queue->initialize(); | |
689 _task_queues->register_queue(i, task_queue); | |
690 | |
691 _count_card_bitmaps[i] = BitMap(card_bm_size, false); | |
692 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); | |
693 | |
694 _tasks[i] = new CMTask(i, this, | |
695 _count_marked_bytes[i], | |
696 &_count_card_bitmaps[i], | |
697 task_queue, _task_queues); | |
698 | |
699 _accum_task_vtime[i] = 0.0; | |
700 } | |
701 | |
702 // Calculate the card number for the bottom of the heap. Used | |
703 // in biasing indexes into the accounting card bitmaps. | |
704 _heap_bottom_card_num = | |
705 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> | |
706 CardTableModRefBS::card_shift); | |
707 | |
708 // Clear all the liveness counting data | |
709 clear_all_count_data(); | |
710 | |
593 // so that the call below can read a sensible value | 711 // so that the call below can read a sensible value |
594 _heap_start = (HeapWord*) rs.base(); | 712 _heap_start = (HeapWord*) heap_rs.base(); |
595 set_non_marking_state(); | 713 set_non_marking_state(); |
714 _completed_initialization = true; | |
596 } | 715 } |
597 | 716 |
598 void ConcurrentMark::update_g1_committed(bool force) { | 717 void ConcurrentMark::update_g1_committed(bool force) { |
599 // If concurrent marking is not in progress, then we do not need to | 718 // If concurrent marking is not in progress, then we do not need to |
600 // update _heap_end. | 719 // update _heap_end. |
1161 Universe::heap()->prepare_for_verify(); | 1280 Universe::heap()->prepare_for_verify(); |
1162 Universe::verify(/* silent */ false, | 1281 Universe::verify(/* silent */ false, |
1163 /* option */ VerifyOption_G1UseNextMarking); | 1282 /* option */ VerifyOption_G1UseNextMarking); |
1164 } | 1283 } |
1165 assert(!restart_for_overflow(), "sanity"); | 1284 assert(!restart_for_overflow(), "sanity"); |
1285 } | |
1286 | |
1287 // Expand the marking stack, if we have to and if we can. | |
1288 if (_markStack.should_expand()) { | |
1289 _markStack.expand(); | |
1166 } | 1290 } |
1167 | 1291 |
1168 // Reset the marking state if marking completed | 1292 // Reset the marking state if marking completed |
1169 if (!restart_for_overflow()) { | 1293 if (!restart_for_overflow()) { |
1170 set_non_marking_state(); | 1294 set_non_marking_state(); |
2783 _markStack.oops_do(&cl); | 2907 _markStack.oops_do(&cl); |
2784 | 2908 |
2785 // Verify entries on the task queues | 2909 // Verify entries on the task queues |
2786 for (uint i = 0; i < _max_worker_id; i += 1) { | 2910 for (uint i = 0; i < _max_worker_id; i += 1) { |
2787 cl.set_phase(VerifyNoCSetOopsQueues, i); | 2911 cl.set_phase(VerifyNoCSetOopsQueues, i); |
2788 OopTaskQueue* queue = _task_queues->queue(i); | 2912 CMTaskQueue* queue = _task_queues->queue(i); |
2789 queue->oops_do(&cl); | 2913 queue->oops_do(&cl); |
2790 } | 2914 } |
2791 } | 2915 } |
2792 | 2916 |
2793 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); | 2917 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); |
2838 } | 2962 } |
2839 } | 2963 } |
2840 #endif // PRODUCT | 2964 #endif // PRODUCT |
2841 | 2965 |
2842 void ConcurrentMark::clear_marking_state(bool clear_overflow) { | 2966 void ConcurrentMark::clear_marking_state(bool clear_overflow) { |
2843 _markStack.setEmpty(); | 2967 _markStack.set_should_expand(); |
2844 _markStack.clear_overflow(); | 2968 _markStack.setEmpty(); // Also clears the _markStack overflow flag |
2845 if (clear_overflow) { | 2969 if (clear_overflow) { |
2846 clear_has_overflown(); | 2970 clear_has_overflown(); |
2847 } else { | 2971 } else { |
2848 assert(has_overflown(), "pre-condition"); | 2972 assert(has_overflown(), "pre-condition"); |
2849 } | 2973 } |
2850 _finger = _heap_start; | 2974 _finger = _heap_start; |
2851 | 2975 |
2852 for (uint i = 0; i < _max_worker_id; ++i) { | 2976 for (uint i = 0; i < _max_worker_id; ++i) { |
2853 OopTaskQueue* queue = _task_queues->queue(i); | 2977 CMTaskQueue* queue = _task_queues->queue(i); |
2854 queue->set_empty(); | 2978 queue->set_empty(); |
2855 } | 2979 } |
2856 } | 2980 } |
2857 | 2981 |
2858 // Aggregate the counting data that was constructed concurrently | 2982 // Aggregate the counting data that was constructed concurrently |