comparison src/share/vm/gc_implementation/g1/vm_operations_g1.cpp @ 6120:37552638d24a

7172388: G1: _total_full_collections should not be incremented for concurrent cycles Reviewed-by: azeemj, jmasa
author brutisso
date Tue, 05 Jun 2012 22:30:24 +0200
parents 1096fc5a52eb
children f2110083203d
comparison
equal deleted inserted replaced
6119:a297b0e14605 6120:37552638d24a
62 GCCause::Cause gc_cause) 62 GCCause::Cause gc_cause)
63 : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause), 63 : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause),
64 _should_initiate_conc_mark(should_initiate_conc_mark), 64 _should_initiate_conc_mark(should_initiate_conc_mark),
65 _target_pause_time_ms(target_pause_time_ms), 65 _target_pause_time_ms(target_pause_time_ms),
66 _should_retry_gc(false), 66 _should_retry_gc(false),
67 _full_collections_completed_before(0) { 67 _old_marking_cycles_completed_before(0) {
68 guarantee(target_pause_time_ms > 0.0, 68 guarantee(target_pause_time_ms > 0.0,
69 err_msg("target_pause_time_ms = %1.6lf should be positive", 69 err_msg("target_pause_time_ms = %1.6lf should be positive",
70 target_pause_time_ms)); 70 target_pause_time_ms));
71 guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause, 71 guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
72 "we can only request an allocation if the GC cause is for " 72 "we can only request an allocation if the GC cause is for "
110 } 110 }
111 } 111 }
112 112
113 GCCauseSetter x(g1h, _gc_cause); 113 GCCauseSetter x(g1h, _gc_cause);
114 if (_should_initiate_conc_mark) { 114 if (_should_initiate_conc_mark) {
115 // It's safer to read full_collections_completed() here, given 115 // It's safer to read old_marking_cycles_completed() here, given
116 // that noone else will be updating it concurrently. Since we'll 116 // that noone else will be updating it concurrently. Since we'll
117 // only need it if we're initiating a marking cycle, no point in 117 // only need it if we're initiating a marking cycle, no point in
118 // setting it earlier. 118 // setting it earlier.
119 _full_collections_completed_before = g1h->full_collections_completed(); 119 _old_marking_cycles_completed_before = g1h->old_marking_cycles_completed();
120 120
121 // At this point we are supposed to start a concurrent cycle. We 121 // At this point we are supposed to start a concurrent cycle. We
122 // will do so if one is not already in progress. 122 // will do so if one is not already in progress.
123 bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause); 123 bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
124 124
179 assert(ExplicitGCInvokesConcurrent, 179 assert(ExplicitGCInvokesConcurrent,
180 "the only way to be here is if ExplicitGCInvokesConcurrent is set"); 180 "the only way to be here is if ExplicitGCInvokesConcurrent is set");
181 181
182 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 182 G1CollectedHeap* g1h = G1CollectedHeap::heap();
183 183
184 // In the doit() method we saved g1h->full_collections_completed() 184 // In the doit() method we saved g1h->old_marking_cycles_completed()
185 // in the _full_collections_completed_before field. We have to 185 // in the _old_marking_cycles_completed_before field. We have to
186 // wait until we observe that g1h->full_collections_completed() 186 // wait until we observe that g1h->old_marking_cycles_completed()
187 // has increased by at least one. This can happen if a) we started 187 // has increased by at least one. This can happen if a) we started
188 // a cycle and it completes, b) a cycle already in progress 188 // a cycle and it completes, b) a cycle already in progress
189 // completes, or c) a Full GC happens. 189 // completes, or c) a Full GC happens.
190 190
191 // If the condition has already been reached, there's no point in 191 // If the condition has already been reached, there's no point in
192 // actually taking the lock and doing the wait. 192 // actually taking the lock and doing the wait.
193 if (g1h->full_collections_completed() <= 193 if (g1h->old_marking_cycles_completed() <=
194 _full_collections_completed_before) { 194 _old_marking_cycles_completed_before) {
195 // The following is largely copied from CMS 195 // The following is largely copied from CMS
196 196
197 Thread* thr = Thread::current(); 197 Thread* thr = Thread::current();
198 assert(thr->is_Java_thread(), "invariant"); 198 assert(thr->is_Java_thread(), "invariant");
199 JavaThread* jt = (JavaThread*)thr; 199 JavaThread* jt = (JavaThread*)thr;
200 ThreadToNativeFromVM native(jt); 200 ThreadToNativeFromVM native(jt);
201 201
202 MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); 202 MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
203 while (g1h->full_collections_completed() <= 203 while (g1h->old_marking_cycles_completed() <=
204 _full_collections_completed_before) { 204 _old_marking_cycles_completed_before) {
205 FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag); 205 FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
206 } 206 }
207 } 207 }
208 } 208 }
209 } 209 }