comparison src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 7450:d275c3dc73e6

8004816: G1: Kitchensink failures after marking stack changes Summary: Reset the marking state, including the mark stack overflow flag, in the event of a marking stack overflow during serial reference processing. Reviewed-by: jmasa
author johnc
date Thu, 03 Jan 2013 16:28:22 -0800
parents 442f942757c0
children 4700e77d44c1
comparison
equal deleted inserted replaced
7449:37f7535e5f18 7450:d275c3dc73e6
1 /* 1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
190 "Didn't reserve backing store for all of ConcurrentMark stack?"); 190 "Didn't reserve backing store for all of ConcurrentMark stack?");
191 _base = (oop*) _virtual_space.low(); 191 _base = (oop*) _virtual_space.low();
192 setEmpty(); 192 setEmpty();
193 _capacity = (jint) capacity; 193 _capacity = (jint) capacity;
194 _saved_index = -1; 194 _saved_index = -1;
195 _should_expand = false;
195 NOT_PRODUCT(_max_depth = 0); 196 NOT_PRODUCT(_max_depth = 0);
196 return true; 197 return true;
197 } 198 }
198 199
199 void CMMarkStack::expand() { 200 void CMMarkStack::expand() {
745 // Separated the asserts so that we know which one fires. 746 // Separated the asserts so that we know which one fires.
746 assert(_heap_start != NULL, "heap bounds should look ok"); 747 assert(_heap_start != NULL, "heap bounds should look ok");
747 assert(_heap_end != NULL, "heap bounds should look ok"); 748 assert(_heap_end != NULL, "heap bounds should look ok");
748 assert(_heap_start < _heap_end, "heap bounds should look ok"); 749 assert(_heap_start < _heap_end, "heap bounds should look ok");
749 750
750 // reset all the marking data structures and any necessary flags 751 // Reset all the marking data structures and any necessary flags
751 clear_marking_state(); 752 reset_marking_state();
752 753
753 if (verbose_low()) { 754 if (verbose_low()) {
754 gclog_or_tty->print_cr("[global] resetting"); 755 gclog_or_tty->print_cr("[global] resetting");
755 } 756 }
756 757
762 } 763 }
763 764
764 // we need this to make sure that the flag is on during the evac 765 // we need this to make sure that the flag is on during the evac
765 // pause with initial mark piggy-backed 766 // pause with initial mark piggy-backed
766 set_concurrent_marking_in_progress(); 767 set_concurrent_marking_in_progress();
768 }
769
770
771 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
772 _markStack.set_should_expand();
773 _markStack.setEmpty(); // Also clears the _markStack overflow flag
774 if (clear_overflow) {
775 clear_has_overflown();
776 } else {
777 assert(has_overflown(), "pre-condition");
778 }
779 _finger = _heap_start;
780
781 for (uint i = 0; i < _max_worker_id; ++i) {
782 CMTaskQueue* queue = _task_queues->queue(i);
783 queue->set_empty();
784 }
767 } 785 }
768 786
769 void ConcurrentMark::set_phase(uint active_tasks, bool concurrent) { 787 void ConcurrentMark::set_phase(uint active_tasks, bool concurrent) {
770 assert(active_tasks <= _max_worker_id, "we should not have more"); 788 assert(active_tasks <= _max_worker_id, "we should not have more");
771 789
794 } 812 }
795 813
796 void ConcurrentMark::set_non_marking_state() { 814 void ConcurrentMark::set_non_marking_state() {
797 // We set the global marking state to some default values when we're 815 // We set the global marking state to some default values when we're
798 // not doing marking. 816 // not doing marking.
799 clear_marking_state(); 817 reset_marking_state();
800 _active_tasks = 0; 818 _active_tasks = 0;
801 clear_concurrent_marking_in_progress(); 819 clear_concurrent_marking_in_progress();
802 } 820 }
803 821
804 ConcurrentMark::~ConcurrentMark() { 822 ConcurrentMark::~ConcurrentMark() {
961 // task 0 is responsible for clearing the global data structures 979 // task 0 is responsible for clearing the global data structures
962 // We should be here because of an overflow. During STW we should 980 // We should be here because of an overflow. During STW we should
963 // not clear the overflow flag since we rely on it being true when 981 // not clear the overflow flag since we rely on it being true when
964 // we exit this method to abort the pause and restart concurent 982 // we exit this method to abort the pause and restart concurent
965 // marking. 983 // marking.
966 clear_marking_state(concurrent() /* clear_overflow */); 984 reset_marking_state(concurrent() /* clear_overflow */);
967 force_overflow()->update(); 985 force_overflow()->update();
968 986
969 if (G1Log::fine()) { 987 if (G1Log::fine()) {
970 gclog_or_tty->date_stamp(PrintGCDateStamps); 988 gclog_or_tty->date_stamp(PrintGCDateStamps);
971 gclog_or_tty->stamp(PrintGCTimeStamps); 989 gclog_or_tty->stamp(PrintGCTimeStamps);
1255 weakRefsWork(clear_all_soft_refs); 1273 weakRefsWork(clear_all_soft_refs);
1256 1274
1257 if (has_overflown()) { 1275 if (has_overflown()) {
1258 // Oops. We overflowed. Restart concurrent marking. 1276 // Oops. We overflowed. Restart concurrent marking.
1259 _restart_for_overflow = true; 1277 _restart_for_overflow = true;
1260 // Clear the flag. We do not need it any more. 1278 // Clear the marking state because we will be restarting
1261 clear_has_overflown(); 1279 // marking due to overflowing the global mark stack.
1280 reset_marking_state();
1262 if (G1TraceMarkStackOverflow) { 1281 if (G1TraceMarkStackOverflow) {
1263 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1282 gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1264 } 1283 }
1265 } else { 1284 } else {
1266 // Aggregate the per-task counting data that we have accumulated 1285 // Aggregate the per-task counting data that we have accumulated
1280 Universe::heap()->prepare_for_verify(); 1299 Universe::heap()->prepare_for_verify();
1281 Universe::verify(/* silent */ false, 1300 Universe::verify(/* silent */ false,
1282 /* option */ VerifyOption_G1UseNextMarking); 1301 /* option */ VerifyOption_G1UseNextMarking);
1283 } 1302 }
1284 assert(!restart_for_overflow(), "sanity"); 1303 assert(!restart_for_overflow(), "sanity");
1304 // Completely reset the marking state since marking completed
1305 set_non_marking_state();
1285 } 1306 }
1286 1307
1287 // Expand the marking stack, if we have to and if we can. 1308 // Expand the marking stack, if we have to and if we can.
1288 if (_markStack.should_expand()) { 1309 if (_markStack.should_expand()) {
1289 _markStack.expand(); 1310 _markStack.expand();
1290 }
1291
1292 // Reset the marking state if marking completed
1293 if (!restart_for_overflow()) {
1294 set_non_marking_state();
1295 } 1311 }
1296 1312
1297 #if VERIFY_OBJS_PROCESSED 1313 #if VERIFY_OBJS_PROCESSED
1298 _scan_obj_cl.objs_processed = 0; 1314 _scan_obj_cl.objs_processed = 0;
1299 ThreadLocalObjQueue::objs_enqueued = 0; 1315 ThreadLocalObjQueue::objs_enqueued = 0;
2961 } 2977 }
2962 } 2978 }
2963 } 2979 }
2964 #endif // PRODUCT 2980 #endif // PRODUCT
2965 2981
2966 void ConcurrentMark::clear_marking_state(bool clear_overflow) {
2967 _markStack.set_should_expand();
2968 _markStack.setEmpty(); // Also clears the _markStack overflow flag
2969 if (clear_overflow) {
2970 clear_has_overflown();
2971 } else {
2972 assert(has_overflown(), "pre-condition");
2973 }
2974 _finger = _heap_start;
2975
2976 for (uint i = 0; i < _max_worker_id; ++i) {
2977 CMTaskQueue* queue = _task_queues->queue(i);
2978 queue->set_empty();
2979 }
2980 }
2981
2982 // Aggregate the counting data that was constructed concurrently 2982 // Aggregate the counting data that was constructed concurrently
2983 // with marking. 2983 // with marking.
2984 class AggregateCountDataHRClosure: public HeapRegionClosure { 2984 class AggregateCountDataHRClosure: public HeapRegionClosure {
2985 G1CollectedHeap* _g1h; 2985 G1CollectedHeap* _g1h;
2986 ConcurrentMark* _cm; 2986 ConcurrentMark* _cm;
3183 // Clear all marks to force marking thread to do nothing 3183 // Clear all marks to force marking thread to do nothing
3184 _nextMarkBitMap->clearAll(); 3184 _nextMarkBitMap->clearAll();
3185 // Clear the liveness counting data 3185 // Clear the liveness counting data
3186 clear_all_count_data(); 3186 clear_all_count_data();
3187 // Empty mark stack 3187 // Empty mark stack
3188 clear_marking_state(); 3188 reset_marking_state();
3189 for (uint i = 0; i < _max_worker_id; ++i) { 3189 for (uint i = 0; i < _max_worker_id; ++i) {
3190 _tasks[i]->clear_region_fields(); 3190 _tasks[i]->clear_region_fields();
3191 } 3191 }
3192 _has_aborted = true; 3192 _has_aborted = true;
3193 3193