comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 4910:caa4652b4414

7129892: G1: explicit marking cycle initiation might fail to initiate a marking cycle Summary: If we try to schedule an initial-mark GC in order to explicit start a conc mark cycle and it gets pre-empted by antoher GC, we should retry the attempt as long as it's appropriate for the GC cause. Reviewed-by: brutisso, johnc
author tonyp
date Tue, 14 Feb 2012 08:21:08 -0500
parents 379b22e03c32
children a9647476d1a4
comparison
equal deleted inserted replaced
4909:95f6641e38e0 4910:caa4652b4414
956 } 956 }
957 } 957 }
958 should_try_gc = false; 958 should_try_gc = false;
959 } else { 959 } else {
960 // Read the GC count while still holding the Heap_lock. 960 // Read the GC count while still holding the Heap_lock.
961 gc_count_before = SharedHeap::heap()->total_collections(); 961 gc_count_before = total_collections();
962 should_try_gc = true; 962 should_try_gc = true;
963 } 963 }
964 } 964 }
965 965
966 if (should_try_gc) { 966 if (should_try_gc) {
974 if (succeeded) { 974 if (succeeded) {
975 // If we get here we successfully scheduled a collection which 975 // If we get here we successfully scheduled a collection which
976 // failed to allocate. No point in trying to allocate 976 // failed to allocate. No point in trying to allocate
977 // further. We'll just return NULL. 977 // further. We'll just return NULL.
978 MutexLockerEx x(Heap_lock); 978 MutexLockerEx x(Heap_lock);
979 *gc_count_before_ret = SharedHeap::heap()->total_collections(); 979 *gc_count_before_ret = total_collections();
980 return NULL; 980 return NULL;
981 } 981 }
982 } else { 982 } else {
983 GC_locker::stall_until_clear(); 983 GC_locker::stall_until_clear();
984 } 984 }
1029 // Humongous objects can exhaust the heap quickly, so we should check if we 1029 // Humongous objects can exhaust the heap quickly, so we should check if we
1030 // need to start a marking cycle at each humongous object allocation. We do 1030 // need to start a marking cycle at each humongous object allocation. We do
1031 // the check before we do the actual allocation. The reason for doing it 1031 // the check before we do the actual allocation. The reason for doing it
1032 // before the allocation is that we avoid having to keep track of the newly 1032 // before the allocation is that we avoid having to keep track of the newly
1033 // allocated memory while we do a GC. 1033 // allocated memory while we do a GC.
1034 if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) { 1034 if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
1035 word_size)) {
1035 collect(GCCause::_g1_humongous_allocation); 1036 collect(GCCause::_g1_humongous_allocation);
1036 } 1037 }
1037 1038
1038 // We will loop until a) we manage to successfully perform the 1039 // We will loop until a) we manage to successfully perform the
1039 // allocation or b) we successfully schedule a collection which 1040 // allocation or b) we successfully schedule a collection which
1057 1058
1058 if (GC_locker::is_active_and_needs_gc()) { 1059 if (GC_locker::is_active_and_needs_gc()) {
1059 should_try_gc = false; 1060 should_try_gc = false;
1060 } else { 1061 } else {
1061 // Read the GC count while still holding the Heap_lock. 1062 // Read the GC count while still holding the Heap_lock.
1062 gc_count_before = SharedHeap::heap()->total_collections(); 1063 gc_count_before = total_collections();
1063 should_try_gc = true; 1064 should_try_gc = true;
1064 } 1065 }
1065 } 1066 }
1066 1067
1067 if (should_try_gc) { 1068 if (should_try_gc) {
1079 if (succeeded) { 1080 if (succeeded) {
1080 // If we get here we successfully scheduled a collection which 1081 // If we get here we successfully scheduled a collection which
1081 // failed to allocate. No point in trying to allocate 1082 // failed to allocate. No point in trying to allocate
1082 // further. We'll just return NULL. 1083 // further. We'll just return NULL.
1083 MutexLockerEx x(Heap_lock); 1084 MutexLockerEx x(Heap_lock);
1084 *gc_count_before_ret = SharedHeap::heap()->total_collections(); 1085 *gc_count_before_ret = total_collections();
1085 return NULL; 1086 return NULL;
1086 } 1087 }
1087 } else { 1088 } else {
1088 GC_locker::stall_until_clear(); 1089 GC_locker::stall_until_clear();
1089 } 1090 }
2309 } 2310 }
2310 return hr->free(); 2311 return hr->free();
2311 } 2312 }
2312 2313
2313 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { 2314 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2314 return 2315 switch (cause) {
2315 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || 2316 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
2316 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) || 2317 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
2317 cause == GCCause::_g1_humongous_allocation); 2318 case GCCause::_g1_humongous_allocation: return true;
2319 default: return false;
2320 }
2318 } 2321 }
2319 2322
2320 #ifndef PRODUCT 2323 #ifndef PRODUCT
2321 void G1CollectedHeap::allocate_dummy_regions() { 2324 void G1CollectedHeap::allocate_dummy_regions() {
2322 // Let's fill up most of the region 2325 // Let's fill up most of the region
2406 ShouldNotReachHere(); // Unexpected use of this function 2409 ShouldNotReachHere(); // Unexpected use of this function
2407 } 2410 }
2408 } 2411 }
2409 2412
2410 void G1CollectedHeap::collect(GCCause::Cause cause) { 2413 void G1CollectedHeap::collect(GCCause::Cause cause) {
2411 // The caller doesn't have the Heap_lock 2414 assert_heap_not_locked();
2412 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
2413 2415
2414 unsigned int gc_count_before; 2416 unsigned int gc_count_before;
2415 unsigned int full_gc_count_before; 2417 unsigned int full_gc_count_before;
2416 { 2418 bool retry_gc;
2417 MutexLocker ml(Heap_lock); 2419
2418 2420 do {
2419 // Read the GC count while holding the Heap_lock 2421 retry_gc = false;
2420 gc_count_before = SharedHeap::heap()->total_collections(); 2422
2421 full_gc_count_before = SharedHeap::heap()->total_full_collections(); 2423 {
2422 } 2424 MutexLocker ml(Heap_lock);
2423 2425
2424 if (should_do_concurrent_full_gc(cause)) { 2426 // Read the GC count while holding the Heap_lock
2425 // Schedule an initial-mark evacuation pause that will start a 2427 gc_count_before = total_collections();
2426 // concurrent cycle. We're setting word_size to 0 which means that 2428 full_gc_count_before = total_full_collections();
2427 // we are not requesting a post-GC allocation. 2429 }
2428 VM_G1IncCollectionPause op(gc_count_before, 2430
2429 0, /* word_size */ 2431 if (should_do_concurrent_full_gc(cause)) {
2430 true, /* should_initiate_conc_mark */ 2432 // Schedule an initial-mark evacuation pause that will start a
2431 g1_policy()->max_pause_time_ms(), 2433 // concurrent cycle. We're setting word_size to 0 which means that
2432 cause); 2434 // we are not requesting a post-GC allocation.
2433 VMThread::execute(&op);
2434 } else {
2435 if (cause == GCCause::_gc_locker
2436 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2437
2438 // Schedule a standard evacuation pause. We're setting word_size
2439 // to 0 which means that we are not requesting a post-GC allocation.
2440 VM_G1IncCollectionPause op(gc_count_before, 2435 VM_G1IncCollectionPause op(gc_count_before,
2441 0, /* word_size */ 2436 0, /* word_size */
2442 false, /* should_initiate_conc_mark */ 2437 true, /* should_initiate_conc_mark */
2443 g1_policy()->max_pause_time_ms(), 2438 g1_policy()->max_pause_time_ms(),
2444 cause); 2439 cause);
2445 VMThread::execute(&op); 2440 VMThread::execute(&op);
2441 if (!op.pause_succeeded()) {
2442 // Another GC got scheduled and prevented us from scheduling
2443 // the initial-mark GC. It's unlikely that the GC that
2444 // pre-empted us was also an initial-mark GC. So, we'll retry
2445 // the initial-mark GC.
2446
2447 if (full_gc_count_before == total_full_collections()) {
2448 retry_gc = true;
2449 } else {
2450 // A Full GC happened while we were trying to schedule the
2451 // initial-mark GC. No point in starting a new cycle given
2452 // that the whole heap was collected anyway.
2453 }
2454 }
2446 } else { 2455 } else {
2447 // Schedule a Full GC. 2456 if (cause == GCCause::_gc_locker
2448 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); 2457 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2449 VMThread::execute(&op); 2458
2450 } 2459 // Schedule a standard evacuation pause. We're setting word_size
2451 } 2460 // to 0 which means that we are not requesting a post-GC allocation.
2461 VM_G1IncCollectionPause op(gc_count_before,
2462 0, /* word_size */
2463 false, /* should_initiate_conc_mark */
2464 g1_policy()->max_pause_time_ms(),
2465 cause);
2466 VMThread::execute(&op);
2467 } else {
2468 // Schedule a Full GC.
2469 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2470 VMThread::execute(&op);
2471 }
2472 }
2473 } while (retry_gc);
2452 } 2474 }
2453 2475
2454 bool G1CollectedHeap::is_in(const void* p) const { 2476 bool G1CollectedHeap::is_in(const void* p) const {
2455 if (_g1_committed.contains(p)) { 2477 if (_g1_committed.contains(p)) {
2456 // Given that we know that p is in the committed space, 2478 // Given that we know that p is in the committed space,
3147 3169
3148 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); 3170 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
3149 3171
3150 // We apply the relevant closures to all the oops in the 3172 // We apply the relevant closures to all the oops in the
3151 // system dictionary, the string table and the code cache. 3173 // system dictionary, the string table and the code cache.
3152 const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; 3174 const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
3153 3175
3154 process_strong_roots(true, // activate StrongRootsScope 3176 process_strong_roots(true, // activate StrongRootsScope
3155 true, // we set "collecting perm gen" to true, 3177 true, // we set "collecting perm gen" to true,
3156 // so we don't reset the dirty cards in the perm gen. 3178 // so we don't reset the dirty cards in the perm gen.
3157 SharedHeap::ScanningOption(so), // roots scanning options 3179 ScanningOption(so), // roots scanning options
3158 &rootsCl, 3180 &rootsCl,
3159 &blobsCl, 3181 &blobsCl,
3160 &rootsCl); 3182 &rootsCl);
3161 3183
3162 // If we're verifying after the marking phase of a Full GC then we can't 3184 // If we're verifying after the marking phase of a Full GC then we can't
4732 // This method is run in a GC worker. 4754 // This method is run in a GC worker.
4733 4755
4734 void 4756 void
4735 G1CollectedHeap:: 4757 G1CollectedHeap::
4736 g1_process_strong_roots(bool collecting_perm_gen, 4758 g1_process_strong_roots(bool collecting_perm_gen,
4737 SharedHeap::ScanningOption so, 4759 ScanningOption so,
4738 OopClosure* scan_non_heap_roots, 4760 OopClosure* scan_non_heap_roots,
4739 OopsInHeapRegionClosure* scan_rs, 4761 OopsInHeapRegionClosure* scan_rs,
4740 OopsInGenClosure* scan_perm, 4762 OopsInGenClosure* scan_perm,
4741 int worker_i) { 4763 int worker_i) {
4742 4764