comparison src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @ 1359:23b1b27ac76c

6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.") Summary: Make sure that two marking cycles do not overlap, i.e., a new one can only start after the concurrent marking thread finishes all its work. In the fix I piggy-back a couple of minor extra fixes: some general code reformatting for consistency (only around the code I modified), the removal of a field (G1CollectorPolicy::_should_initiate_conc_mark) which doesn't seem to be used at all (it's only set but never read), as well as moving the "is GC locker active" test earlier into the G1 pause / Full GC and using a more appropriate method for it. Reviewed-by: johnc, jmasa, jcoomes, ysr
author tonyp
date Tue, 06 Apr 2010 10:59:45 -0400
parents 56507bcd639e
children 7666957bc44d
comparison
equal deleted inserted replaced
1358:72f725c5a7be 1359:23b1b27ac76c
176 176
177 // G1PausesBtwnConcMark defaults to -1 177 // G1PausesBtwnConcMark defaults to -1
178 // so the hack is to do the cast QQQ FIXME 178 // so the hack is to do the cast QQQ FIXME
179 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark), 179 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
180 _n_marks_since_last_pause(0), 180 _n_marks_since_last_pause(0),
181 _conc_mark_initiated(false), 181 _initiate_conc_mark_if_possible(false),
182 _should_initiate_conc_mark(false), 182 _during_initial_mark_pause(false),
183 _should_revert_to_full_young_gcs(false), 183 _should_revert_to_full_young_gcs(false),
184 _last_full_young_gc(false), 184 _last_full_young_gc(false),
185 185
186 _prev_collection_pause_used_at_end_bytes(0), 186 _prev_collection_pause_used_at_end_bytes(0),
187 187
791 _young_list_target_length - _young_list_so_prefix_length, 791 _young_list_target_length - _young_list_so_prefix_length,
792 _young_list_so_prefix_length, 792 _young_list_so_prefix_length,
793 elapsed_time_ms, 793 elapsed_time_ms,
794 calculations, 794 calculations,
795 full_young_gcs() ? "full" : "partial", 795 full_young_gcs() ? "full" : "partial",
796 should_initiate_conc_mark() ? " i-m" : "", 796 during_initial_mark_pause() ? " i-m" : "",
797 _in_marking_window, 797 _in_marking_window,
798 _in_marking_window_im); 798 _in_marking_window_im);
799 #endif // TRACE_CALC_YOUNG_CONFIG 799 #endif // TRACE_CALC_YOUNG_CONFIG
800 800
801 if (_young_list_target_length < _young_list_min_length) { 801 if (_young_list_target_length < _young_list_min_length) {
1038 // transitions and make sure we start with fully young GCs after the 1038 // transitions and make sure we start with fully young GCs after the
1039 // Full GC. 1039 // Full GC.
1040 set_full_young_gcs(true); 1040 set_full_young_gcs(true);
1041 _last_full_young_gc = false; 1041 _last_full_young_gc = false;
1042 _should_revert_to_full_young_gcs = false; 1042 _should_revert_to_full_young_gcs = false;
1043 _should_initiate_conc_mark = false; 1043 clear_initiate_conc_mark_if_possible();
1044 clear_during_initial_mark_pause();
1044 _known_garbage_bytes = 0; 1045 _known_garbage_bytes = 0;
1045 _known_garbage_ratio = 0.0; 1046 _known_garbage_ratio = 0.0;
1046 _in_marking_window = false; 1047 _in_marking_window = false;
1047 _in_marking_window_im = false; 1048 _in_marking_window_im = false;
1048 1049
1184 } 1185 }
1185 1186
1186 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double 1187 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
1187 mark_init_elapsed_time_ms) { 1188 mark_init_elapsed_time_ms) {
1188 _during_marking = true; 1189 _during_marking = true;
1189 _should_initiate_conc_mark = false; 1190 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
1191 clear_during_initial_mark_pause();
1190 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; 1192 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
1191 } 1193 }
1192 1194
1193 void G1CollectorPolicy::record_concurrent_mark_init_end() { 1195 void G1CollectorPolicy::record_concurrent_mark_init_end() {
1194 double end_time_sec = os::elapsedTime(); 1196 double end_time_sec = os::elapsedTime();
1255 os::active_processor_count()) { 1257 os::active_processor_count()) {
1256 considerConcMarkCost = 0.0; 1258 considerConcMarkCost = 0.0;
1257 } 1259 }
1258 _n_pauses_at_mark_end = _n_pauses; 1260 _n_pauses_at_mark_end = _n_pauses;
1259 _n_marks_since_last_pause++; 1261 _n_marks_since_last_pause++;
1260 _conc_mark_initiated = false;
1261 } 1262 }
1262 1263
1263 void 1264 void
1264 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 1265 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
1265 if (in_young_gc_mode()) { 1266 if (in_young_gc_mode()) {
1451 // do that for any other surv rate groups too 1452 // do that for any other surv rate groups too
1452 } 1453 }
1453 #endif // PRODUCT 1454 #endif // PRODUCT
1454 1455
1455 if (in_young_gc_mode()) { 1456 if (in_young_gc_mode()) {
1456 last_pause_included_initial_mark = _should_initiate_conc_mark; 1457 last_pause_included_initial_mark = during_initial_mark_pause();
1457 if (last_pause_included_initial_mark) 1458 if (last_pause_included_initial_mark)
1458 record_concurrent_mark_init_end_pre(0.0); 1459 record_concurrent_mark_init_end_pre(0.0);
1459 1460
1460 size_t min_used_targ = 1461 size_t min_used_targ =
1461 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; 1462 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
1462 1463
1463 if (cur_used_bytes > min_used_targ) { 1464
1464 if (cur_used_bytes <= _prev_collection_pause_used_at_end_bytes) { 1465 if (!_g1->mark_in_progress() && !_last_full_young_gc) {
1465 } else if (!_g1->mark_in_progress() && !_last_full_young_gc) { 1466 assert(!last_pause_included_initial_mark, "invariant");
1466 _should_initiate_conc_mark = true; 1467 if (cur_used_bytes > min_used_targ &&
1468 cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
1469 assert(!during_initial_mark_pause(), "we should not see this here");
1470
1471 // Note: this might have already been set, if during the last
1472 // pause we decided to start a cycle but at the beginning of
1473 // this pause we decided to postpone it. That's OK.
1474 set_initiate_conc_mark_if_possible();
1467 } 1475 }
1468 } 1476 }
1469 1477
1470 _prev_collection_pause_used_at_end_bytes = cur_used_bytes; 1478 _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
1471 } 1479 }
1752 } 1760 }
1753 double cur_efficiency = (double) freed_bytes / proc_ms; 1761 double cur_efficiency = (double) freed_bytes / proc_ms;
1754 1762
1755 bool new_in_marking_window = _in_marking_window; 1763 bool new_in_marking_window = _in_marking_window;
1756 bool new_in_marking_window_im = false; 1764 bool new_in_marking_window_im = false;
1757 if (_should_initiate_conc_mark) { 1765 if (during_initial_mark_pause()) {
1758 new_in_marking_window = true; 1766 new_in_marking_window = true;
1759 new_in_marking_window_im = true; 1767 new_in_marking_window_im = true;
1760 } 1768 }
1761 1769
1762 if (in_young_gc_mode()) { 1770 if (in_young_gc_mode()) {
2171 // I don't think we need to do this when in young GC mode since 2179 // I don't think we need to do this when in young GC mode since
2172 // marking will be initiated next time we hit the soft limit anyway... 2180 // marking will be initiated next time we hit the soft limit anyway...
2173 if (predicted_time_ms > _expensive_region_limit_ms) { 2181 if (predicted_time_ms > _expensive_region_limit_ms) {
2174 if (!in_young_gc_mode()) { 2182 if (!in_young_gc_mode()) {
2175 set_full_young_gcs(true); 2183 set_full_young_gcs(true);
2176 _should_initiate_conc_mark = true; 2184 // We might want to do something different here. However,
2185 // right now we don't support the non-generational G1 mode
2186 // (and in fact we are planning to remove the associated code,
2187 // see CR 6814390). So, let's leave it as is and this will be
2188 // removed some time in the future
2189 ShouldNotReachHere();
2190 set_during_initial_mark_pause();
2177 } else 2191 } else
2178 // no point in doing another partial one 2192 // no point in doing another partial one
2179 _should_revert_to_full_young_gcs = true; 2193 _should_revert_to_full_young_gcs = true;
2180 } 2194 }
2181 } 2195 }
2695 return true; 2709 return true;
2696 } 2710 }
2697 #endif 2711 #endif
2698 2712
2699 void 2713 void
2714 G1CollectorPolicy::decide_on_conc_mark_initiation() {
2715 // We are about to decide on whether this pause will be an
2716 // initial-mark pause.
2717
2718 // First, during_initial_mark_pause() should not be already set. We
2719 // will set it here if we have to. However, it should be cleared by
2720 // the end of the pause (it's only set for the duration of an
2721 // initial-mark pause).
2722 assert(!during_initial_mark_pause(), "pre-condition");
2723
2724 if (initiate_conc_mark_if_possible()) {
2725 // We had noticed on a previous pause that the heap occupancy has
2726 // gone over the initiating threshold and we should start a
2727 // concurrent marking cycle. So we might initiate one.
2728
2729 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
2730 if (!during_cycle) {
2731 // The concurrent marking thread is not "during a cycle", i.e.,
2732 // it has completed the last one. So we can go ahead and
2733 // initiate a new cycle.
2734
2735 set_during_initial_mark_pause();
2736
2737 // And we can now clear initiate_conc_mark_if_possible() as
2738 // we've already acted on it.
2739 clear_initiate_conc_mark_if_possible();
2740 } else {
2741 // The concurrent marking thread is still finishing up the
2742 // previous cycle. If we start one right now the two cycles
2743 // overlap. In particular, the concurrent marking thread might
2744 // be in the process of clearing the next marking bitmap (which
2745 // we will use for the next cycle if we start one). Starting a
2746 // cycle now will be bad given that parts of the marking
2747 // information might get cleared by the marking thread. And we
2748 // cannot wait for the marking thread to finish the cycle as it
2749 // periodically yields while clearing the next marking bitmap
2750 // and, if it's in a yield point, it's waiting for us to
2751 // finish. So, at this point we will not start a cycle and we'll
2752 // let the concurrent marking thread complete the last one.
2753 }
2754 }
2755 }
2756
2757 void
2700 G1CollectorPolicy_BestRegionsFirst:: 2758 G1CollectorPolicy_BestRegionsFirst::
2701 record_collection_pause_start(double start_time_sec, size_t start_used) { 2759 record_collection_pause_start(double start_time_sec, size_t start_used) {
2702 G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used); 2760 G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
2703 } 2761 }
2704 2762