comparison src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 1023:11d4857fe5e1

6888619: G1: too many guarantees in concurrent marking Summary: change more guarantees in concurrent marking into asserts. Reviewed-by: apetrusenko, iveresov
author tonyp
date Wed, 07 Oct 2009 10:09:57 -0400
parents 4c3458a31e17
children dfdaf65c3423
comparison
equal deleted inserted replaced
1022:4c3458a31e17 1023:11d4857fe5e1
235 } 235 }
236 // Otherwise. 236 // Otherwise.
237 _index = next_index; 237 _index = next_index;
238 for (int i = 0; i < n; i++) { 238 for (int i = 0; i < n; i++) {
239 int ind = start + i; 239 int ind = start + i;
240 guarantee(ind < _capacity, "By overflow test above."); 240 assert(ind < _capacity, "By overflow test above.");
241 _base[ind] = ptr_arr[i]; 241 _base[ind] = ptr_arr[i];
242 } 242 }
243 } 243 }
244 244
245 245
308 jint next_index = index-1; 308 jint next_index = index-1;
309 jint res = Atomic::cmpxchg(next_index, &_index, index); 309 jint res = Atomic::cmpxchg(next_index, &_index, index);
310 if (res == index) { 310 if (res == index) {
311 MemRegion mr = _base[next_index]; 311 MemRegion mr = _base[next_index];
312 if (mr.start() != NULL) { 312 if (mr.start() != NULL) {
313 tmp_guarantee_CM( mr.end() != NULL, "invariant" ); 313 assert(mr.end() != NULL, "invariant");
314 tmp_guarantee_CM( mr.word_size() > 0, "invariant" ); 314 assert(mr.word_size() > 0, "invariant");
315 return mr; 315 return mr;
316 } else { 316 } else {
317 // that entry was invalidated... let's skip it 317 // that entry was invalidated... let's skip it
318 tmp_guarantee_CM( mr.end() == NULL, "invariant" ); 318 assert(mr.end() == NULL, "invariant");
319 } 319 }
320 } 320 }
321 // Otherwise, we need to try again. 321 // Otherwise, we need to try again.
322 } 322 }
323 } 323 }
326 bool result = false; 326 bool result = false;
327 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 327 G1CollectedHeap* g1h = G1CollectedHeap::heap();
328 for (int i = 0; i < _oops_do_bound; ++i) { 328 for (int i = 0; i < _oops_do_bound; ++i) {
329 MemRegion mr = _base[i]; 329 MemRegion mr = _base[i];
330 if (mr.start() != NULL) { 330 if (mr.start() != NULL) {
331 tmp_guarantee_CM( mr.end() != NULL, "invariant"); 331 assert(mr.end() != NULL, "invariant");
332 tmp_guarantee_CM( mr.word_size() > 0, "invariant" ); 332 assert(mr.word_size() > 0, "invariant");
333 HeapRegion* hr = g1h->heap_region_containing(mr.start()); 333 HeapRegion* hr = g1h->heap_region_containing(mr.start());
334 tmp_guarantee_CM( hr != NULL, "invariant" ); 334 assert(hr != NULL, "invariant");
335 if (hr->in_collection_set()) { 335 if (hr->in_collection_set()) {
336 // The region points into the collection set 336 // The region points into the collection set
337 _base[i] = MemRegion(); 337 _base[i] = MemRegion();
338 result = true; 338 result = true;
339 } 339 }
340 } else { 340 } else {
341 // that entry was invalidated... let's skip it 341 // that entry was invalidated... let's skip it
342 tmp_guarantee_CM( mr.end() == NULL, "invariant" ); 342 assert(mr.end() == NULL, "invariant");
343 } 343 }
344 } 344 }
345 return result; 345 return result;
346 } 346 }
347 347
540 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 540 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor());
541 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 541 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
542 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 542 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor());
543 #endif 543 #endif
544 544
545 guarantee( parallel_marking_threads() > 0, "peace of mind" ); 545 guarantee(parallel_marking_threads() > 0, "peace of mind");
546 _parallel_workers = new WorkGang("G1 Parallel Marking Threads", 546 _parallel_workers = new WorkGang("G1 Parallel Marking Threads",
547 (int) parallel_marking_threads(), false, true); 547 (int) parallel_marking_threads(), false, true);
548 if (_parallel_workers == NULL) 548 if (_parallel_workers == NULL)
549 vm_exit_during_initialization("Failed necessary allocation."); 549 vm_exit_during_initialization("Failed necessary allocation.");
550 } 550 }
567 // not have this problem. 567 // not have this problem.
568 if (!concurrent_marking_in_progress() && !force) 568 if (!concurrent_marking_in_progress() && !force)
569 return; 569 return;
570 570
571 MemRegion committed = _g1h->g1_committed(); 571 MemRegion committed = _g1h->g1_committed();
572 tmp_guarantee_CM( committed.start() == _heap_start, 572 assert(committed.start() == _heap_start, "start shouldn't change");
573 "start shouldn't change" );
574 HeapWord* new_end = committed.end(); 573 HeapWord* new_end = committed.end();
575 if (new_end > _heap_end) { 574 if (new_end > _heap_end) {
576 // The heap has been expanded. 575 // The heap has been expanded.
577 576
578 _heap_end = new_end; 577 _heap_end = new_end;
590 // inactive. 589 // inactive.
591 MemRegion committed = _g1h->g1_committed(); 590 MemRegion committed = _g1h->g1_committed();
592 _heap_start = committed.start(); 591 _heap_start = committed.start();
593 _heap_end = committed.end(); 592 _heap_end = committed.end();
594 593
595 guarantee( _heap_start != NULL && 594 // Separated the asserts so that we know which one fires.
596 _heap_end != NULL && 595 assert(_heap_start != NULL, "heap bounds should look ok");
597 _heap_start < _heap_end, "heap bounds should look ok" ); 596 assert(_heap_end != NULL, "heap bounds should look ok");
597 assert(_heap_start < _heap_end, "heap bounds should look ok");
598 598
599 // reset all the marking data structures and any necessary flags 599 // reset all the marking data structures and any necessary flags
600 clear_marking_state(); 600 clear_marking_state();
601 601
602 if (verbose_low()) 602 if (verbose_low())
612 // pause with initial mark piggy-backed 612 // pause with initial mark piggy-backed
613 set_concurrent_marking_in_progress(); 613 set_concurrent_marking_in_progress();
614 } 614 }
615 615
616 void ConcurrentMark::set_phase(size_t active_tasks, bool concurrent) { 616 void ConcurrentMark::set_phase(size_t active_tasks, bool concurrent) {
617 guarantee( active_tasks <= _max_task_num, "we should not have more" ); 617 assert(active_tasks <= _max_task_num, "we should not have more");
618 618
619 _active_tasks = active_tasks; 619 _active_tasks = active_tasks;
620 // Need to update the three data structures below according to the 620 // Need to update the three data structures below according to the
621 // number of active threads for this phase. 621 // number of active threads for this phase.
622 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 622 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
632 set_concurrent_marking_in_progress(); 632 set_concurrent_marking_in_progress();
633 } else { 633 } else {
634 // We currently assume that the concurrent flag has been set to 634 // We currently assume that the concurrent flag has been set to
635 // false before we start remark. At this point we should also be 635 // false before we start remark. At this point we should also be
636 // in a STW phase. 636 // in a STW phase.
637 guarantee( !concurrent_marking_in_progress(), "invariant" ); 637 assert(!concurrent_marking_in_progress(), "invariant");
638 guarantee( _finger == _heap_end, "only way to get here" ); 638 assert(_finger == _heap_end, "only way to get here");
639 update_g1_committed(true); 639 update_g1_committed(true);
640 } 640 }
641 } 641 }
642 642
643 void ConcurrentMark::set_non_marking_state() { 643 void ConcurrentMark::set_non_marking_state() {
931 // We can't really check against _heap_start and _heap_end, since it 931 // We can't really check against _heap_start and _heap_end, since it
932 // is possible during an evacuation pause with piggy-backed 932 // is possible during an evacuation pause with piggy-backed
933 // initial-mark that the committed space is expanded during the 933 // initial-mark that the committed space is expanded during the
934 // pause without CM observing this change. So the assertions below 934 // pause without CM observing this change. So the assertions below
935 // is a bit conservative; but better than nothing. 935 // is a bit conservative; but better than nothing.
936 tmp_guarantee_CM( _g1h->g1_committed().contains(addr), 936 assert(_g1h->g1_committed().contains(addr),
937 "address should be within the heap bounds" ); 937 "address should be within the heap bounds");
938 938
939 if (!_nextMarkBitMap->isMarked(addr)) 939 if (!_nextMarkBitMap->isMarked(addr))
940 _nextMarkBitMap->parMark(addr); 940 _nextMarkBitMap->parMark(addr);
941 } 941 }
942 942
958 PTR_FORMAT, mr.start(), mr.end(), finger); 958 PTR_FORMAT, mr.start(), mr.end(), finger);
959 959
960 if (mr.start() < finger) { 960 if (mr.start() < finger) {
961 // The finger is always heap region aligned and it is not possible 961 // The finger is always heap region aligned and it is not possible
962 // for mr to span heap regions. 962 // for mr to span heap regions.
963 tmp_guarantee_CM( mr.end() <= finger, "invariant" ); 963 assert(mr.end() <= finger, "invariant");
964 964
965 tmp_guarantee_CM( mr.start() <= mr.end() && 965 // Separated the asserts so that we know which one fires.
966 _heap_start <= mr.start() && 966 assert(mr.start() <= mr.end(),
967 mr.end() <= _heap_end, 967 "region boundaries should fall within the committed space");
968 "region boundaries should fall within the committed space" ); 968 assert(_heap_start <= mr.start(),
969 "region boundaries should fall within the committed space");
970 assert(mr.end() <= _heap_end,
971 "region boundaries should fall within the committed space");
969 if (verbose_low()) 972 if (verbose_low())
970 gclog_or_tty->print_cr("[global] region ["PTR_FORMAT", "PTR_FORMAT") " 973 gclog_or_tty->print_cr("[global] region ["PTR_FORMAT", "PTR_FORMAT") "
971 "below the finger, pushing it", 974 "below the finger, pushing it",
972 mr.start(), mr.end()); 975 mr.start(), mr.end());
973 976
1012 ConcurrentMark* _cm; 1015 ConcurrentMark* _cm;
1013 ConcurrentMarkThread* _cmt; 1016 ConcurrentMarkThread* _cmt;
1014 1017
1015 public: 1018 public:
1016 void work(int worker_i) { 1019 void work(int worker_i) {
1017 guarantee( Thread::current()->is_ConcurrentGC_thread(), 1020 assert(Thread::current()->is_ConcurrentGC_thread(),
1018 "this should only be done by a conc GC thread" ); 1021 "this should only be done by a conc GC thread");
1019 1022
1020 double start_vtime = os::elapsedVTime(); 1023 double start_vtime = os::elapsedVTime();
1021 1024
1022 ConcurrentGCThread::stsJoin(); 1025 ConcurrentGCThread::stsJoin();
1023 1026
1024 guarantee( (size_t)worker_i < _cm->active_tasks(), "invariant" ); 1027 assert((size_t) worker_i < _cm->active_tasks(), "invariant");
1025 CMTask* the_task = _cm->task(worker_i); 1028 CMTask* the_task = _cm->task(worker_i);
1026 the_task->record_start_time(); 1029 the_task->record_start_time();
1027 if (!_cm->has_aborted()) { 1030 if (!_cm->has_aborted()) {
1028 do { 1031 do {
1029 double start_vtime_sec = os::elapsedVTime(); 1032 double start_vtime_sec = os::elapsedVTime();
1057 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0); 1060 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
1058 #endif 1061 #endif
1059 } while (!_cm->has_aborted() && the_task->has_aborted()); 1062 } while (!_cm->has_aborted() && the_task->has_aborted());
1060 } 1063 }
1061 the_task->record_end_time(); 1064 the_task->record_end_time();
1062 guarantee( !the_task->has_aborted() || _cm->has_aborted(), "invariant" ); 1065 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1063 1066
1064 ConcurrentGCThread::stsLeave(); 1067 ConcurrentGCThread::stsLeave();
1065 1068
1066 double end_vtime = os::elapsedVTime(); 1069 double end_vtime = os::elapsedVTime();
1067 _cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime); 1070 _cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime);
1180 bool _final; 1183 bool _final;
1181 1184
1182 void mark_card_num_range(intptr_t start_card_num, intptr_t last_card_num) { 1185 void mark_card_num_range(intptr_t start_card_num, intptr_t last_card_num) {
1183 for (intptr_t i = start_card_num; i <= last_card_num; i++) { 1186 for (intptr_t i = start_card_num; i <= last_card_num; i++) {
1184 #if CARD_BM_TEST_MODE 1187 #if CARD_BM_TEST_MODE
1185 guarantee(_card_bm->at(i - _bottom_card_num), 1188 guarantee(_card_bm->at(i - _bottom_card_num), "Should already be set.");
1186 "Should already be set.");
1187 #else 1189 #else
1188 _card_bm->par_at_put(i - _bottom_card_num, 1); 1190 _card_bm->par_at_put(i - _bottom_card_num, 1);
1189 #endif 1191 #endif
1190 } 1192 }
1191 } 1193 }
1440 } else { 1442 } else {
1441 _g1h->heap_region_iterate(&calccl); 1443 _g1h->heap_region_iterate(&calccl);
1442 } 1444 }
1443 assert(calccl.complete(), "Shouldn't have yielded!"); 1445 assert(calccl.complete(), "Shouldn't have yielded!");
1444 1446
1445 guarantee( (size_t)i < _n_workers, "invariant" ); 1447 assert((size_t) i < _n_workers, "invariant");
1446 _live_bytes[i] = calccl.tot_live(); 1448 _live_bytes[i] = calccl.tot_live();
1447 _used_bytes[i] = calccl.tot_used(); 1449 _used_bytes[i] = calccl.tot_used();
1448 } 1450 }
1449 size_t live_bytes() { 1451 size_t live_bytes() {
1450 size_t live_bytes = 0; 1452 size_t live_bytes = 0;
1772 while (hd != NULL) { 1774 while (hd != NULL) {
1773 // Now finish up the other stuff. 1775 // Now finish up the other stuff.
1774 hd->rem_set()->clear(); 1776 hd->rem_set()->clear();
1775 HeapRegion* next_hd = hd->next_from_unclean_list(); 1777 HeapRegion* next_hd = hd->next_from_unclean_list();
1776 (void)list->pop(); 1778 (void)list->pop();
1777 guarantee(list->hd() == next_hd, "how not?"); 1779 assert(list->hd() == next_hd, "how not?");
1778 _g1h->put_region_on_unclean_list(hd); 1780 _g1h->put_region_on_unclean_list(hd);
1779 if (!hd->isHumongous()) { 1781 if (!hd->isHumongous()) {
1780 // Add this to the _free_regions count by 1. 1782 // Add this to the _free_regions count by 1.
1781 _g1h->finish_free_region_work(0, 0, 1, NULL); 1783 _g1h->finish_free_region_work(0, 0, 1, NULL);
1782 } 1784 }
1783 hd = list->hd(); 1785 hd = list->hd();
1784 guarantee(hd == next_hd, "how not?"); 1786 assert(hd == next_hd, "how not?");
1785 } 1787 }
1786 } 1788 }
1787 } 1789 }
1788 1790
1789 1791
1929 // immediately. 1931 // immediately.
1930 int n_workers = g1h->workers()->total_workers(); 1932 int n_workers = g1h->workers()->total_workers();
1931 g1h->set_par_threads(n_workers); 1933 g1h->set_par_threads(n_workers);
1932 g1h->workers()->run_task(&remarkTask); 1934 g1h->workers()->run_task(&remarkTask);
1933 g1h->set_par_threads(0); 1935 g1h->set_par_threads(0);
1934
1935 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1936 guarantee( satb_mq_set.completed_buffers_num() == 0, "invariant" );
1937 } else { 1936 } else {
1938 G1CollectedHeap::StrongRootsScope srs(g1h); 1937 G1CollectedHeap::StrongRootsScope srs(g1h);
1939 // this is remark, so we'll use up all available threads 1938 // this is remark, so we'll use up all available threads
1940 int active_workers = 1; 1939 int active_workers = 1;
1941 set_phase(active_workers, false); 1940 set_phase(active_workers, false);
1943 CMRemarkTask remarkTask(this); 1942 CMRemarkTask remarkTask(this);
1944 // We will start all available threads, even if we decide that the 1943 // We will start all available threads, even if we decide that the
1945 // active_workers will be fewer. The extra ones will just bail out 1944 // active_workers will be fewer. The extra ones will just bail out
1946 // immediately. 1945 // immediately.
1947 remarkTask.work(0); 1946 remarkTask.work(0);
1948 1947 }
1949 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1948 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1950 guarantee( satb_mq_set.completed_buffers_num() == 0, "invariant" ); 1949 guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant");
1951 }
1952 1950
1953 print_stats(); 1951 print_stats();
1954 1952
1955 if (!restart_for_overflow()) 1953 if (!restart_for_overflow())
1956 set_non_marking_state(); 1954 set_non_marking_state();
1987 1985
1988 if (!_g1h->is_in_g1_reserved(obj)) 1986 if (!_g1h->is_in_g1_reserved(obj))
1989 str = "outside G1 reserved"; 1987 str = "outside G1 reserved";
1990 else { 1988 else {
1991 HeapRegion* hr = _g1h->heap_region_containing(obj); 1989 HeapRegion* hr = _g1h->heap_region_containing(obj);
1992 guarantee( hr != NULL, "invariant" ); 1990 guarantee(hr != NULL, "invariant");
1993 if (hr->obj_allocated_since_prev_marking(obj)) { 1991 if (hr->obj_allocated_since_prev_marking(obj)) {
1994 str = "over TAMS"; 1992 str = "over TAMS";
1995 if (_bitmap->isMarked((HeapWord*) obj)) 1993 if (_bitmap->isMarked((HeapWord*) obj))
1996 str2 = " AND MARKED"; 1994 str2 = " AND MARKED";
1997 } else if (_bitmap->isMarked((HeapWord*) obj)) 1995 } else if (_bitmap->isMarked((HeapWord*) obj))
2123 2121
2124 2122
2125 HeapWord* objAddr = (HeapWord*) obj; 2123 HeapWord* objAddr = (HeapWord*) obj;
2126 assert(obj->is_oop_or_null(true /* ignore mark word */), "Error"); 2124 assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
2127 if (_g1h->is_in_g1_reserved(objAddr)) { 2125 if (_g1h->is_in_g1_reserved(objAddr)) {
2128 tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" ); 2126 assert(obj != NULL, "is_in_g1_reserved should ensure this");
2129 HeapRegion* hr = _g1h->heap_region_containing(obj); 2127 HeapRegion* hr = _g1h->heap_region_containing(obj);
2130 if (_g1h->is_obj_ill(obj, hr)) { 2128 if (_g1h->is_obj_ill(obj, hr)) {
2131 if (verbose_high()) 2129 if (verbose_high())
2132 gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered " 2130 gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered "
2133 "marked", (void*) obj); 2131 "marked", (void*) obj);
2165 // no need to check whether we should do this, as this is only 2163 // no need to check whether we should do this, as this is only
2166 // called during an evacuation pause 2164 // called during an evacuation pause
2167 satb_mq_set.iterate_closure_all_threads(); 2165 satb_mq_set.iterate_closure_all_threads();
2168 2166
2169 satb_mq_set.set_closure(NULL); 2167 satb_mq_set.set_closure(NULL);
2170 guarantee( satb_mq_set.completed_buffers_num() == 0, "invariant" ); 2168 assert(satb_mq_set.completed_buffers_num() == 0, "invariant");
2171 } 2169 }
2172 2170
2173 void ConcurrentMark::markPrev(oop p) { 2171 void ConcurrentMark::markPrev(oop p) {
2174 // Note we are overriding the read-only view of the prev map here, via 2172 // Note we are overriding the read-only view of the prev map here, via
2175 // the cast. 2173 // the cast.
2198 HeapWord* finger = _finger; 2196 HeapWord* finger = _finger;
2199 2197
2200 // _heap_end will not change underneath our feet; it only changes at 2198 // _heap_end will not change underneath our feet; it only changes at
2201 // yield points. 2199 // yield points.
2202 while (finger < _heap_end) { 2200 while (finger < _heap_end) {
2203 tmp_guarantee_CM( _g1h->is_in_g1_reserved(finger), "invariant" ); 2201 assert(_g1h->is_in_g1_reserved(finger), "invariant");
2204 2202
2205 // is the gap between reading the finger and doing the CAS too long? 2203 // is the gap between reading the finger and doing the CAS too long?
2206 2204
2207 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 2205 HeapRegion* curr_region = _g1h->heap_region_containing(finger);
2208 HeapWord* bottom = curr_region->bottom(); 2206 HeapWord* bottom = curr_region->bottom();
2220 if (res == finger) { 2218 if (res == finger) {
2221 // we succeeded 2219 // we succeeded
2222 2220
2223 // notice that _finger == end cannot be guaranteed here since, 2221 // notice that _finger == end cannot be guaranteed here since,
2224 // someone else might have moved the finger even further 2222 // someone else might have moved the finger even further
2225 guarantee( _finger >= end, "the finger should have moved forward" ); 2223 assert(_finger >= end, "the finger should have moved forward");
2226 2224
2227 if (verbose_low()) 2225 if (verbose_low())
2228 gclog_or_tty->print_cr("[%d] we were successful with region = " 2226 gclog_or_tty->print_cr("[%d] we were successful with region = "
2229 PTR_FORMAT, task_num, curr_region); 2227 PTR_FORMAT, task_num, curr_region);
2230 2228
2232 if (verbose_low()) 2230 if (verbose_low())
2233 gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is not empty, " 2231 gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is not empty, "
2234 "returning it ", task_num, curr_region); 2232 "returning it ", task_num, curr_region);
2235 return curr_region; 2233 return curr_region;
2236 } else { 2234 } else {
2237 tmp_guarantee_CM( limit == bottom, 2235 assert(limit == bottom,
2238 "the region limit should be at bottom" ); 2236 "the region limit should be at bottom");
2239 if (verbose_low()) 2237 if (verbose_low())
2240 gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is empty, " 2238 gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is empty, "
2241 "returning NULL", task_num, curr_region); 2239 "returning NULL", task_num, curr_region);
2242 // we return NULL and the caller should try calling 2240 // we return NULL and the caller should try calling
2243 // claim_region() again. 2241 // claim_region() again.
2244 return NULL; 2242 return NULL;
2245 } 2243 }
2246 } else { 2244 } else {
2247 guarantee( _finger > finger, "the finger should have moved forward" ); 2245 assert(_finger > finger, "the finger should have moved forward");
2248 if (verbose_low()) 2246 if (verbose_low())
2249 gclog_or_tty->print_cr("[%d] somebody else moved the finger, " 2247 gclog_or_tty->print_cr("[%d] somebody else moved the finger, "
2250 "global finger = "PTR_FORMAT", " 2248 "global finger = "PTR_FORMAT", "
2251 "our finger = "PTR_FORMAT, 2249 "our finger = "PTR_FORMAT,
2252 task_num, _finger, finger); 2250 task_num, _finger, finger);
2280 // finally, invalidate any entries that in the region stack that 2278 // finally, invalidate any entries that in the region stack that
2281 // point into the collection set 2279 // point into the collection set
2282 if (_regionStack.invalidate_entries_into_cset()) { 2280 if (_regionStack.invalidate_entries_into_cset()) {
2283 // otherwise, any gray objects copied during the evacuation pause 2281 // otherwise, any gray objects copied during the evacuation pause
2284 // might not be visited. 2282 // might not be visited.
2285 guarantee( _should_gray_objects, "invariant" ); 2283 assert(_should_gray_objects, "invariant");
2286 } 2284 }
2287 } 2285 }
2288 2286
2289 void ConcurrentMark::clear_marking_state() { 2287 void ConcurrentMark::clear_marking_state() {
2290 _markStack.setEmpty(); 2288 _markStack.setEmpty();
2713 _scanning_heap_region = scanning_heap_region; 2711 _scanning_heap_region = scanning_heap_region;
2714 } 2712 }
2715 2713
2716 bool do_bit(size_t offset) { 2714 bool do_bit(size_t offset) {
2717 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 2715 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
2718 tmp_guarantee_CM( _nextMarkBitMap->isMarked(addr), "invariant" ); 2716 assert(_nextMarkBitMap->isMarked(addr), "invariant");
2719 tmp_guarantee_CM( addr < _cm->finger(), "invariant" ); 2717 assert( addr < _cm->finger(), "invariant");
2720 2718
2721 if (_scanning_heap_region) { 2719 if (_scanning_heap_region) {
2722 statsOnly( _task->increase_objs_found_on_bitmap() ); 2720 statsOnly( _task->increase_objs_found_on_bitmap() );
2723 tmp_guarantee_CM( addr >= _task->finger(), "invariant" ); 2721 assert(addr >= _task->finger(), "invariant");
2724 // We move that task's local finger along. 2722 // We move that task's local finger along.
2725 _task->move_finger_to(addr); 2723 _task->move_finger_to(addr);
2726 } else { 2724 } else {
2727 // We move the task's region finger along. 2725 // We move the task's region finger along.
2728 _task->move_region_finger_to(addr); 2726 _task->move_region_finger_to(addr);
2763 public: 2761 public:
2764 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2762 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2765 virtual void do_oop( oop* p) { do_oop_work(p); } 2763 virtual void do_oop( oop* p) { do_oop_work(p); }
2766 2764
2767 template <class T> void do_oop_work(T* p) { 2765 template <class T> void do_oop_work(T* p) {
2768 tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant" ); 2766 assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
2769 tmp_guarantee_CM( !_g1h->heap_region_containing((HeapWord*) p)->is_on_free_list(), "invariant" ); 2767 assert(!_g1h->heap_region_containing((HeapWord*) p)->is_on_free_list(),
2768 "invariant");
2770 2769
2771 oop obj = oopDesc::load_decode_heap_oop(p); 2770 oop obj = oopDesc::load_decode_heap_oop(p);
2772 if (_cm->verbose_high()) 2771 if (_cm->verbose_high())
2773 gclog_or_tty->print_cr("[%d] we're looking at location " 2772 gclog_or_tty->print_cr("[%d] we're looking at location "
2774 "*"PTR_FORMAT" = "PTR_FORMAT, 2773 "*"PTR_FORMAT" = "PTR_FORMAT,
2781 CMTask* task) 2780 CMTask* task)
2782 : _g1h(g1h), _cm(cm), _task(task) { } 2781 : _g1h(g1h), _cm(cm), _task(task) { }
2783 }; 2782 };
2784 2783
2785 void CMTask::setup_for_region(HeapRegion* hr) { 2784 void CMTask::setup_for_region(HeapRegion* hr) {
2786 tmp_guarantee_CM( hr != NULL && !hr->continuesHumongous(), 2785 // Separated the asserts so that we know which one fires.
2787 "claim_region() should have filtered out continues humongous regions" ); 2786 assert(hr != NULL,
2787 "claim_region() should have filtered out continues humongous regions");
2788 assert(!hr->continuesHumongous(),
2789 "claim_region() should have filtered out continues humongous regions");
2788 2790
2789 if (_cm->verbose_low()) 2791 if (_cm->verbose_low())
2790 gclog_or_tty->print_cr("[%d] setting up for region "PTR_FORMAT, 2792 gclog_or_tty->print_cr("[%d] setting up for region "PTR_FORMAT,
2791 _task_id, hr); 2793 _task_id, hr);
2792 2794
2810 // iteration that will follow this will not do anything. 2812 // iteration that will follow this will not do anything.
2811 // (this is not a condition that holds when we set the region up, 2813 // (this is not a condition that holds when we set the region up,
2812 // as the region is not supposed to be empty in the first place) 2814 // as the region is not supposed to be empty in the first place)
2813 _finger = bottom; 2815 _finger = bottom;
2814 } else if (limit >= _region_limit) { 2816 } else if (limit >= _region_limit) {
2815 tmp_guarantee_CM( limit >= _finger, "peace of mind" ); 2817 assert(limit >= _finger, "peace of mind");
2816 } else { 2818 } else {
2817 tmp_guarantee_CM( limit < _region_limit, "only way to get here" ); 2819 assert(limit < _region_limit, "only way to get here");
2818 // This can happen under some pretty unusual circumstances. An 2820 // This can happen under some pretty unusual circumstances. An
2819 // evacuation pause empties the region underneath our feet (NTAMS 2821 // evacuation pause empties the region underneath our feet (NTAMS
2820 // at bottom). We then do some allocation in the region (NTAMS 2822 // at bottom). We then do some allocation in the region (NTAMS
2821 // stays at bottom), followed by the region being used as a GC 2823 // stays at bottom), followed by the region being used as a GC
2822 // alloc region (NTAMS will move to top() and the objects 2824 // alloc region (NTAMS will move to top() and the objects
2830 2832
2831 _region_limit = limit; 2833 _region_limit = limit;
2832 } 2834 }
2833 2835
2834 void CMTask::giveup_current_region() { 2836 void CMTask::giveup_current_region() {
2835 tmp_guarantee_CM( _curr_region != NULL, "invariant" ); 2837 assert(_curr_region != NULL, "invariant");
2836 if (_cm->verbose_low()) 2838 if (_cm->verbose_low())
2837 gclog_or_tty->print_cr("[%d] giving up region "PTR_FORMAT, 2839 gclog_or_tty->print_cr("[%d] giving up region "PTR_FORMAT,
2838 _task_id, _curr_region); 2840 _task_id, _curr_region);
2839 clear_region_fields(); 2841 clear_region_fields();
2840 } 2842 }
2848 2850
2849 _region_finger = NULL; 2851 _region_finger = NULL;
2850 } 2852 }
2851 2853
2852 void CMTask::reset(CMBitMap* nextMarkBitMap) { 2854 void CMTask::reset(CMBitMap* nextMarkBitMap) {
2853 guarantee( nextMarkBitMap != NULL, "invariant" ); 2855 guarantee(nextMarkBitMap != NULL, "invariant");
2854 2856
2855 if (_cm->verbose_low()) 2857 if (_cm->verbose_low())
2856 gclog_or_tty->print_cr("[%d] resetting", _task_id); 2858 gclog_or_tty->print_cr("[%d] resetting", _task_id);
2857 2859
2858 _nextMarkBitMap = nextMarkBitMap; 2860 _nextMarkBitMap = nextMarkBitMap;
2914 ++_refs_reached; 2916 ++_refs_reached;
2915 2917
2916 HeapWord* objAddr = (HeapWord*) obj; 2918 HeapWord* objAddr = (HeapWord*) obj;
2917 assert(obj->is_oop_or_null(true /* ignore mark word */), "Error"); 2919 assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
2918 if (_g1h->is_in_g1_reserved(objAddr)) { 2920 if (_g1h->is_in_g1_reserved(objAddr)) {
2919 tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" ); 2921 assert(obj != NULL, "is_in_g1_reserved should ensure this");
2920 HeapRegion* hr = _g1h->heap_region_containing(obj); 2922 HeapRegion* hr = _g1h->heap_region_containing(obj);
2921 if (_g1h->is_obj_ill(obj, hr)) { 2923 if (_g1h->is_obj_ill(obj, hr)) {
2922 if (_cm->verbose_high()) 2924 if (_cm->verbose_high())
2923 gclog_or_tty->print_cr("[%d] "PTR_FORMAT" is not considered marked", 2925 gclog_or_tty->print_cr("[%d] "PTR_FORMAT" is not considered marked",
2924 _task_id, (void*) obj); 2926 _task_id, (void*) obj);
2975 } 2977 }
2976 } 2978 }
2977 2979
2978 void CMTask::push(oop obj) { 2980 void CMTask::push(oop obj) {
2979 HeapWord* objAddr = (HeapWord*) obj; 2981 HeapWord* objAddr = (HeapWord*) obj;
2980 tmp_guarantee_CM( _g1h->is_in_g1_reserved(objAddr), "invariant" ); 2982 assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
2981 tmp_guarantee_CM( !_g1h->heap_region_containing(objAddr)->is_on_free_list(), "invariant" ); 2983 assert(!_g1h->heap_region_containing(objAddr)->is_on_free_list(),
2982 tmp_guarantee_CM( !_g1h->is_obj_ill(obj), "invariant" ); 2984 "invariant");
2983 tmp_guarantee_CM( _nextMarkBitMap->isMarked(objAddr), "invariant" ); 2985 assert(!_g1h->is_obj_ill(obj), "invariant");
2986 assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
2984 2987
2985 if (_cm->verbose_high()) 2988 if (_cm->verbose_high())
2986 gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj); 2989 gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj);
2987 2990
2988 if (!_task_queue->push(obj)) { 2991 if (!_task_queue->push(obj)) {
2997 3000
2998 // this should succeed since, even if we overflow the global 3001 // this should succeed since, even if we overflow the global
2999 // stack, we should have definitely removed some entries from the 3002 // stack, we should have definitely removed some entries from the
3000 // local queue. So, there must be space on it. 3003 // local queue. So, there must be space on it.
3001 bool success = _task_queue->push(obj); 3004 bool success = _task_queue->push(obj);
3002 tmp_guarantee_CM( success, "invariant" ); 3005 assert(success, "invariant");
3003 } 3006 }
3004 3007
3005 statsOnly( int tmp_size = _task_queue->size(); 3008 statsOnly( int tmp_size = _task_queue->size();
3006 if (tmp_size > _local_max_size) 3009 if (tmp_size > _local_max_size)
3007 _local_max_size = tmp_size; 3010 _local_max_size = tmp_size;
3008 ++_local_pushes ); 3011 ++_local_pushes );
3009 } 3012 }
3010 3013
3011 void CMTask::reached_limit() { 3014 void CMTask::reached_limit() {
3012 tmp_guarantee_CM( _words_scanned >= _words_scanned_limit || 3015 assert(_words_scanned >= _words_scanned_limit ||
3013 _refs_reached >= _refs_reached_limit , 3016 _refs_reached >= _refs_reached_limit ,
3014 "shouldn't have been called otherwise" ); 3017 "shouldn't have been called otherwise");
3015 regular_clock_call(); 3018 regular_clock_call();
3016 } 3019 }
3017 3020
3018 void CMTask::regular_clock_call() { 3021 void CMTask::regular_clock_call() {
3019 if (has_aborted()) 3022 if (has_aborted())
3167 // local array where we'll store the entries that will be popped 3170 // local array where we'll store the entries that will be popped
3168 // from the global stack. 3171 // from the global stack.
3169 oop buffer[global_stack_transfer_size]; 3172 oop buffer[global_stack_transfer_size];
3170 int n; 3173 int n;
3171 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3174 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3172 tmp_guarantee_CM( n <= global_stack_transfer_size, 3175 assert(n <= global_stack_transfer_size,
3173 "we should not pop more than the given limit" ); 3176 "we should not pop more than the given limit");
3174 if (n > 0) { 3177 if (n > 0) {
3175 // yes, we did actually pop at least one entry 3178 // yes, we did actually pop at least one entry
3176 3179
3177 statsOnly( ++_global_transfers_from; _global_pops += n ); 3180 statsOnly( ++_global_transfers_from; _global_pops += n );
3178 if (_cm->verbose_medium()) 3181 if (_cm->verbose_medium())
3180 _task_id, n); 3183 _task_id, n);
3181 for (int i = 0; i < n; ++i) { 3184 for (int i = 0; i < n; ++i) {
3182 bool success = _task_queue->push(buffer[i]); 3185 bool success = _task_queue->push(buffer[i]);
3183 // We only call this when the local queue is empty or under a 3186 // We only call this when the local queue is empty or under a
3184 // given target limit. So, we do not expect this push to fail. 3187 // given target limit. So, we do not expect this push to fail.
3185 tmp_guarantee_CM( success, "invariant" ); 3188 assert(success, "invariant");
3186 } 3189 }
3187 3190
3188 statsOnly( int tmp_size = _task_queue->size(); 3191 statsOnly( int tmp_size = _task_queue->size();
3189 if (tmp_size > _local_max_size) 3192 if (tmp_size > _local_max_size)
3190 _local_max_size = tmp_size; 3193 _local_max_size = tmp_size;
3220 3223
3221 if (_cm->verbose_high()) 3224 if (_cm->verbose_high())
3222 gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id, 3225 gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id,
3223 (void*) obj); 3226 (void*) obj);
3224 3227
3225 tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) obj), 3228 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3226 "invariant" ); 3229 assert(!_g1h->heap_region_containing(obj)->is_on_free_list(),
3227 tmp_guarantee_CM( !_g1h->heap_region_containing(obj)->is_on_free_list(), 3230 "invariant");
3228 "invariant" );
3229 3231
3230 scan_object(obj); 3232 scan_object(obj);
3231 3233
3232 if (_task_queue->size() <= target_size || has_aborted()) 3234 if (_task_queue->size() <= target_size || has_aborted())
3233 ret = false; 3235 ret = false;
3245 if (has_aborted()) 3247 if (has_aborted())
3246 return; 3248 return;
3247 3249
3248 // We have a policy to drain the local queue before we attempt to 3250 // We have a policy to drain the local queue before we attempt to
3249 // drain the global stack. 3251 // drain the global stack.
3250 tmp_guarantee_CM( partially || _task_queue->size() == 0, "invariant" ); 3252 assert(partially || _task_queue->size() == 0, "invariant");
3251 3253
3252 // Decide what the target size is, depending whether we're going to 3254 // Decide what the target size is, depending whether we're going to
3253 // drain it partially (so that other tasks can steal if they run out 3255 // drain it partially (so that other tasks can steal if they run out
3254 // of things to do) or totally (at the very end). Notice that, 3256 // of things to do) or totally (at the very end). Notice that,
3255 // because we move entries from the global stack in chunks or 3257 // because we move entries from the global stack in chunks or
3326 satb_mq_set.iterate_closure_all_threads(); 3328 satb_mq_set.iterate_closure_all_threads();
3327 } 3329 }
3328 3330
3329 _draining_satb_buffers = false; 3331 _draining_satb_buffers = false;
3330 3332
3331 tmp_guarantee_CM( has_aborted() || 3333 assert(has_aborted() ||
3332 concurrent() || 3334 concurrent() ||
3333 satb_mq_set.completed_buffers_num() == 0, "invariant" ); 3335 satb_mq_set.completed_buffers_num() == 0, "invariant");
3334 3336
3335 if (ParallelGCThreads > 0) 3337 if (ParallelGCThreads > 0)
3336 satb_mq_set.set_par_closure(_task_id, NULL); 3338 satb_mq_set.set_par_closure(_task_id, NULL);
3337 else 3339 else
3338 satb_mq_set.set_closure(NULL); 3340 satb_mq_set.set_closure(NULL);
3344 3346
3345 void CMTask::drain_region_stack(BitMapClosure* bc) { 3347 void CMTask::drain_region_stack(BitMapClosure* bc) {
3346 if (has_aborted()) 3348 if (has_aborted())
3347 return; 3349 return;
3348 3350
3349 tmp_guarantee_CM( _region_finger == NULL, 3351 assert(_region_finger == NULL,
3350 "it should be NULL when we're not scanning a region" ); 3352 "it should be NULL when we're not scanning a region");
3351 3353
3352 if (!_cm->region_stack_empty()) { 3354 if (!_cm->region_stack_empty()) {
3353 if (_cm->verbose_low()) 3355 if (_cm->verbose_low())
3354 gclog_or_tty->print_cr("[%d] draining region stack, size = %d", 3356 gclog_or_tty->print_cr("[%d] draining region stack, size = %d",
3355 _task_id, _cm->region_stack_size()); 3357 _task_id, _cm->region_stack_size());
3361 while (mr.start() != NULL) { 3363 while (mr.start() != NULL) {
3362 if (_cm->verbose_medium()) 3364 if (_cm->verbose_medium())
3363 gclog_or_tty->print_cr("[%d] we are scanning region " 3365 gclog_or_tty->print_cr("[%d] we are scanning region "
3364 "["PTR_FORMAT", "PTR_FORMAT")", 3366 "["PTR_FORMAT", "PTR_FORMAT")",
3365 _task_id, mr.start(), mr.end()); 3367 _task_id, mr.start(), mr.end());
3366 tmp_guarantee_CM( mr.end() <= _cm->finger(), 3368 assert(mr.end() <= _cm->finger(),
3367 "otherwise the region shouldn't be on the stack" ); 3369 "otherwise the region shouldn't be on the stack");
3368 assert(!mr.is_empty(), "Only non-empty regions live on the region stack"); 3370 assert(!mr.is_empty(), "Only non-empty regions live on the region stack");
3369 if (_nextMarkBitMap->iterate(bc, mr)) { 3371 if (_nextMarkBitMap->iterate(bc, mr)) {
3370 tmp_guarantee_CM( !has_aborted(), 3372 assert(!has_aborted(),
3371 "cannot abort the task without aborting the bitmap iteration" ); 3373 "cannot abort the task without aborting the bitmap iteration");
3372 3374
3373 // We finished iterating over the region without aborting. 3375 // We finished iterating over the region without aborting.
3374 regular_clock_call(); 3376 regular_clock_call();
3375 if (has_aborted()) 3377 if (has_aborted())
3376 mr = MemRegion(); 3378 mr = MemRegion();
3378 mr = _cm->region_stack_pop(); 3380 mr = _cm->region_stack_pop();
3379 // it returns MemRegion() if the pop fails 3381 // it returns MemRegion() if the pop fails
3380 statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); 3382 statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
3381 } 3383 }
3382 } else { 3384 } else {
3383 guarantee( has_aborted(), "currently the only way to do so" ); 3385 assert(has_aborted(), "currently the only way to do so");
3384 3386
3385 // The only way to abort the bitmap iteration is to return 3387 // The only way to abort the bitmap iteration is to return
3386 // false from the do_bit() method. However, inside the 3388 // false from the do_bit() method. However, inside the
3387 // do_bit() method we move the _region_finger to point to the 3389 // do_bit() method we move the _region_finger to point to the
3388 // object currently being looked at. So, if we bail out, we 3390 // object currently being looked at. So, if we bail out, we
3389 // have definitely set _region_finger to something non-null. 3391 // have definitely set _region_finger to something non-null.
3390 guarantee( _region_finger != NULL, "invariant" ); 3392 assert(_region_finger != NULL, "invariant");
3391 3393
3392 // The iteration was actually aborted. So now _region_finger 3394 // The iteration was actually aborted. So now _region_finger
3393 // points to the address of the object we last scanned. If we 3395 // points to the address of the object we last scanned. If we
3394 // leave it there, when we restart this task, we will rescan 3396 // leave it there, when we restart this task, we will rescan
3395 // the object. It is easy to avoid this. We move the finger by 3397 // the object. It is easy to avoid this. We move the finger by
3571 too and not constantly check them throughout the code. 3573 too and not constantly check them throughout the code.
3572 3574
3573 *****************************************************************************/ 3575 *****************************************************************************/
3574 3576
3575 void CMTask::do_marking_step(double time_target_ms) { 3577 void CMTask::do_marking_step(double time_target_ms) {
3576 guarantee( time_target_ms >= 1.0, "minimum granularity is 1ms" ); 3578 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
3577 guarantee( concurrent() == _cm->concurrent(), "they should be the same" ); 3579 assert(concurrent() == _cm->concurrent(), "they should be the same");
3578 3580
3579 guarantee( concurrent() || _cm->region_stack_empty(), 3581 assert(concurrent() || _cm->region_stack_empty(),
3580 "the region stack should have been cleared before remark" ); 3582 "the region stack should have been cleared before remark");
3581 guarantee( _region_finger == NULL, 3583 assert(_region_finger == NULL,
3582 "this should be non-null only when a region is being scanned" ); 3584 "this should be non-null only when a region is being scanned");
3583 3585
3584 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3586 G1CollectorPolicy* g1_policy = _g1h->g1_policy();
3585 guarantee( _task_queues != NULL, "invariant" ); 3587 assert(_task_queues != NULL, "invariant");
3586 guarantee( _task_queue != NULL, "invariant" ); 3588 assert(_task_queue != NULL, "invariant");
3587 guarantee( _task_queues->queue(_task_id) == _task_queue, "invariant" ); 3589 assert(_task_queues->queue(_task_id) == _task_queue, "invariant");
3588 3590
3589 guarantee( !_claimed, 3591 assert(!_claimed,
3590 "only one thread should claim this task at any one time" ); 3592 "only one thread should claim this task at any one time");
3591 3593
3592 // OK, this doesn't safeguard again all possible scenarios, as it is 3594 // OK, this doesn't safeguard again all possible scenarios, as it is
3593 // possible for two threads to set the _claimed flag at the same 3595 // possible for two threads to set the _claimed flag at the same
3594 // time. But it is only for debugging purposes anyway and it will 3596 // time. But it is only for debugging purposes anyway and it will
3595 // catch most problems. 3597 // catch most problems.
3656 drain_global_stack(true); 3658 drain_global_stack(true);
3657 3659
3658 do { 3660 do {
3659 if (!has_aborted() && _curr_region != NULL) { 3661 if (!has_aborted() && _curr_region != NULL) {
3660 // This means that we're already holding on to a region. 3662 // This means that we're already holding on to a region.
3661 tmp_guarantee_CM( _finger != NULL, 3663 assert(_finger != NULL, "if region is not NULL, then the finger "
3662 "if region is not NULL, then the finger " 3664 "should not be NULL either");
3663 "should not be NULL either" );
3664 3665
3665 // We might have restarted this task after an evacuation pause 3666 // We might have restarted this task after an evacuation pause
3666 // which might have evacuated the region we're holding on to 3667 // which might have evacuated the region we're holding on to
3667 // underneath our feet. Let's read its limit again to make sure 3668 // underneath our feet. Let's read its limit again to make sure
3668 // that we do not iterate over a region of the heap that 3669 // that we do not iterate over a region of the heap that
3690 // We successfully completed iterating over the region. Now, 3691 // We successfully completed iterating over the region. Now,
3691 // let's give up the region. 3692 // let's give up the region.
3692 giveup_current_region(); 3693 giveup_current_region();
3693 regular_clock_call(); 3694 regular_clock_call();
3694 } else { 3695 } else {
3695 guarantee( has_aborted(), "currently the only way to do so" ); 3696 assert(has_aborted(), "currently the only way to do so");
3696 // The only way to abort the bitmap iteration is to return 3697 // The only way to abort the bitmap iteration is to return
3697 // false from the do_bit() method. However, inside the 3698 // false from the do_bit() method. However, inside the
3698 // do_bit() method we move the _finger to point to the 3699 // do_bit() method we move the _finger to point to the
3699 // object currently being looked at. So, if we bail out, we 3700 // object currently being looked at. So, if we bail out, we
3700 // have definitely set _finger to something non-null. 3701 // have definitely set _finger to something non-null.
3701 guarantee( _finger != NULL, "invariant" ); 3702 assert(_finger != NULL, "invariant");
3702 3703
3703 // Region iteration was actually aborted. So now _finger 3704 // Region iteration was actually aborted. So now _finger
3704 // points to the address of the object we last scanned. If we 3705 // points to the address of the object we last scanned. If we
3705 // leave it there, when we restart this task, we will rescan 3706 // leave it there, when we restart this task, we will rescan
3706 // the object. It is easy to avoid this. We move the finger by 3707 // the object. It is easy to avoid this. We move the finger by
3723 // claiming and why we have to check out_of_regions() to determine 3724 // claiming and why we have to check out_of_regions() to determine
3724 // whether we're done or not. 3725 // whether we're done or not.
3725 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 3726 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
3726 // We are going to try to claim a new region. We should have 3727 // We are going to try to claim a new region. We should have
3727 // given up on the previous one. 3728 // given up on the previous one.
3728 tmp_guarantee_CM( _curr_region == NULL && 3729 // Separated the asserts so that we know which one fires.
3729 _finger == NULL && 3730 assert(_curr_region == NULL, "invariant");
3730 _region_limit == NULL, "invariant" ); 3731 assert(_finger == NULL, "invariant");
3732 assert(_region_limit == NULL, "invariant");
3731 if (_cm->verbose_low()) 3733 if (_cm->verbose_low())
3732 gclog_or_tty->print_cr("[%d] trying to claim a new region", _task_id); 3734 gclog_or_tty->print_cr("[%d] trying to claim a new region", _task_id);
3733 HeapRegion* claimed_region = _cm->claim_region(_task_id); 3735 HeapRegion* claimed_region = _cm->claim_region(_task_id);
3734 if (claimed_region != NULL) { 3736 if (claimed_region != NULL) {
3735 // Yes, we managed to claim one 3737 // Yes, we managed to claim one
3739 gclog_or_tty->print_cr("[%d] we successfully claimed " 3741 gclog_or_tty->print_cr("[%d] we successfully claimed "
3740 "region "PTR_FORMAT, 3742 "region "PTR_FORMAT,
3741 _task_id, claimed_region); 3743 _task_id, claimed_region);
3742 3744
3743 setup_for_region(claimed_region); 3745 setup_for_region(claimed_region);
3744 tmp_guarantee_CM( _curr_region == claimed_region, "invariant" ); 3746 assert(_curr_region == claimed_region, "invariant");
3745 } 3747 }
3746 // It is important to call the regular clock here. It might take 3748 // It is important to call the regular clock here. It might take
3747 // a while to claim a region if, for example, we hit a large 3749 // a while to claim a region if, for example, we hit a large
3748 // block of empty regions. So we need to call the regular clock 3750 // block of empty regions. So we need to call the regular clock
3749 // method once round the loop to make sure it's called 3751 // method once round the loop to make sure it's called
3750 // frequently enough. 3752 // frequently enough.
3751 regular_clock_call(); 3753 regular_clock_call();
3752 } 3754 }
3753 3755
3754 if (!has_aborted() && _curr_region == NULL) { 3756 if (!has_aborted() && _curr_region == NULL) {
3755 tmp_guarantee_CM( _cm->out_of_regions(), 3757 assert(_cm->out_of_regions(),
3756 "at this point we should be out of regions" ); 3758 "at this point we should be out of regions");
3757 } 3759 }
3758 } while ( _curr_region != NULL && !has_aborted()); 3760 } while ( _curr_region != NULL && !has_aborted());
3759 3761
3760 if (!has_aborted()) { 3762 if (!has_aborted()) {
3761 // We cannot check whether the global stack is empty, since other 3763 // We cannot check whether the global stack is empty, since other
3762 // tasks might be pushing objects to it concurrently. We also cannot 3764 // tasks might be pushing objects to it concurrently. We also cannot
3763 // check if the region stack is empty because if a thread is aborting 3765 // check if the region stack is empty because if a thread is aborting
3764 // it can push a partially done region back. 3766 // it can push a partially done region back.
3765 tmp_guarantee_CM( _cm->out_of_regions(), 3767 assert(_cm->out_of_regions(),
3766 "at this point we should be out of regions" ); 3768 "at this point we should be out of regions");
3767 3769
3768 if (_cm->verbose_low()) 3770 if (_cm->verbose_low())
3769 gclog_or_tty->print_cr("[%d] all regions claimed", _task_id); 3771 gclog_or_tty->print_cr("[%d] all regions claimed", _task_id);
3770 3772
3771 // Try to reduce the number of available SATB buffers so that 3773 // Try to reduce the number of available SATB buffers so that
3785 3787
3786 // We cannot check whether the global stack is empty, since other 3788 // We cannot check whether the global stack is empty, since other
3787 // tasks might be pushing objects to it concurrently. We also cannot 3789 // tasks might be pushing objects to it concurrently. We also cannot
3788 // check if the region stack is empty because if a thread is aborting 3790 // check if the region stack is empty because if a thread is aborting
3789 // it can push a partially done region back. 3791 // it can push a partially done region back.
3790 guarantee( _cm->out_of_regions() && 3792 assert(_cm->out_of_regions() && _task_queue->size() == 0,
3791 _task_queue->size() == 0, "only way to reach here" ); 3793 "only way to reach here");
3792 3794
3793 if (_cm->verbose_low()) 3795 if (_cm->verbose_low())
3794 gclog_or_tty->print_cr("[%d] starting to steal", _task_id); 3796 gclog_or_tty->print_cr("[%d] starting to steal", _task_id);
3795 3797
3796 while (!has_aborted()) { 3798 while (!has_aborted()) {
3802 gclog_or_tty->print_cr("[%d] stolen "PTR_FORMAT" successfully", 3804 gclog_or_tty->print_cr("[%d] stolen "PTR_FORMAT" successfully",
3803 _task_id, (void*) obj); 3805 _task_id, (void*) obj);
3804 3806
3805 statsOnly( ++_steals ); 3807 statsOnly( ++_steals );
3806 3808
3807 tmp_guarantee_CM( _nextMarkBitMap->isMarked((HeapWord*) obj), 3809 assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
3808 "any stolen object should be marked" ); 3810 "any stolen object should be marked");
3809 scan_object(obj); 3811 scan_object(obj);
3810 3812
3811 // And since we're towards the end, let's totally drain the 3813 // And since we're towards the end, let's totally drain the
3812 // local queue and global stack. 3814 // local queue and global stack.
3813 drain_local_queue(false); 3815 drain_local_queue(false);
3823 if (!has_aborted()) { 3825 if (!has_aborted()) {
3824 // We cannot check whether the global stack is empty, since other 3826 // We cannot check whether the global stack is empty, since other
3825 // tasks might be concurrently pushing objects on it. We also cannot 3827 // tasks might be concurrently pushing objects on it. We also cannot
3826 // check if the region stack is empty because if a thread is aborting 3828 // check if the region stack is empty because if a thread is aborting
3827 // it can push a partially done region back. 3829 // it can push a partially done region back.
3828 guarantee( _cm->out_of_regions() && 3830 // Separated the asserts so that we know which one fires.
3829 _task_queue->size() == 0, "only way to reach here" ); 3831 assert(_cm->out_of_regions(), "only way to reach here");
3832 assert(_task_queue->size() == 0, "only way to reach here");
3830 3833
3831 if (_cm->verbose_low()) 3834 if (_cm->verbose_low())
3832 gclog_or_tty->print_cr("[%d] starting termination protocol", _task_id); 3835 gclog_or_tty->print_cr("[%d] starting termination protocol", _task_id);
3833 3836
3834 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 3837 _termination_start_time_ms = os::elapsedVTime() * 1000.0;
3844 // We're all done. 3847 // We're all done.
3845 3848
3846 if (_task_id == 0) { 3849 if (_task_id == 0) {
3847 // let's allow task 0 to do this 3850 // let's allow task 0 to do this
3848 if (concurrent()) { 3851 if (concurrent()) {
3849 guarantee( _cm->concurrent_marking_in_progress(), "invariant" ); 3852 assert(_cm->concurrent_marking_in_progress(), "invariant");
3850 // we need to set this to false before the next 3853 // we need to set this to false before the next
3851 // safepoint. This way we ensure that the marking phase 3854 // safepoint. This way we ensure that the marking phase
3852 // doesn't observe any more heap expansions. 3855 // doesn't observe any more heap expansions.
3853 _cm->clear_concurrent_marking_in_progress(); 3856 _cm->clear_concurrent_marking_in_progress();
3854 } 3857 }
3855 } 3858 }
3856 3859
3857 // We can now guarantee that the global stack is empty, since 3860 // We can now guarantee that the global stack is empty, since
3858 // all other tasks have finished. 3861 // all other tasks have finished. We separated the guarantees so
3859 guarantee( _cm->out_of_regions() && 3862 // that, if a condition is false, we can immediately find out
3860 _cm->region_stack_empty() && 3863 // which one.
3861 _cm->mark_stack_empty() && 3864 guarantee(_cm->out_of_regions(), "only way to reach here");
3862 _task_queue->size() == 0 && 3865 guarantee(_cm->region_stack_empty(), "only way to reach here");
3863 !_cm->has_overflown() && 3866 guarantee(_cm->mark_stack_empty(), "only way to reach here");
3864 !_cm->mark_stack_overflow() && 3867 guarantee(_task_queue->size() == 0, "only way to reach here");
3865 !_cm->region_stack_overflow(), 3868 guarantee(!_cm->has_overflown(), "only way to reach here");
3866 "only way to reach here" ); 3869 guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
3870 guarantee(!_cm->region_stack_overflow(), "only way to reach here");
3867 3871
3868 if (_cm->verbose_low()) 3872 if (_cm->verbose_low())
3869 gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id); 3873 gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id);
3870 } else { 3874 } else {
3871 // Apparently there's more work to do. Let's abort this task. It 3875 // Apparently there's more work to do. Let's abort this task. It
3956 _claimed(false), 3960 _claimed(false),
3957 _nextMarkBitMap(NULL), _hash_seed(17), 3961 _nextMarkBitMap(NULL), _hash_seed(17),
3958 _task_queue(task_queue), 3962 _task_queue(task_queue),
3959 _task_queues(task_queues), 3963 _task_queues(task_queues),
3960 _oop_closure(NULL) { 3964 _oop_closure(NULL) {
3961 guarantee( task_queue != NULL, "invariant" ); 3965 guarantee(task_queue != NULL, "invariant");
3962 guarantee( task_queues != NULL, "invariant" ); 3966 guarantee(task_queues != NULL, "invariant");
3963 3967
3964 statsOnly( _clock_due_to_scanning = 0; 3968 statsOnly( _clock_due_to_scanning = 0;
3965 _clock_due_to_marking = 0 ); 3969 _clock_due_to_marking = 0 );
3966 3970
3967 _marking_step_diffs_ms.add(0.5); 3971 _marking_step_diffs_ms.add(0.5);