comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 6106:1d478c993020

7143858: G1: Back to back young GCs with the second GC having a minimally sized eden Summary: Before the last thread to leave a JNI critical region was able to schedule a GCLocker Initiated GC, another thread was attempting an allocation and saw that the GCLocker region was no longer active and successfully scheduled a GC. Stall allocating threads until the GCLocker Initiated GC is performed and then retry the allocation. Reviewed-by: brutisso, huntch
author johnc
date Tue, 29 May 2012 10:18:02 -0700
parents 9d679effd28c
children bbc900c2482a
comparison
equal deleted inserted replaced
6105:9a344d88dc22 6106:1d478c993020
950 return result; 950 return result;
951 } 951 }
952 } 952 }
953 should_try_gc = false; 953 should_try_gc = false;
954 } else { 954 } else {
955 // Read the GC count while still holding the Heap_lock. 955 // The GCLocker may not be active but the GCLocker initiated
956 gc_count_before = total_collections(); 956 // GC may not yet have been performed (GCLocker::needs_gc()
957 should_try_gc = true; 957 // returns true). In this case we do not try this GC and
958 // wait until the GCLocker initiated GC is performed, and
959 // then retry the allocation.
960 if (GC_locker::needs_gc()) {
961 should_try_gc = false;
962 } else {
963 // Read the GC count while still holding the Heap_lock.
964 gc_count_before = total_collections();
965 should_try_gc = true;
966 }
958 } 967 }
959 } 968 }
960 969
961 if (should_try_gc) { 970 if (should_try_gc) {
962 bool succeeded; 971 bool succeeded;
973 MutexLockerEx x(Heap_lock); 982 MutexLockerEx x(Heap_lock);
974 *gc_count_before_ret = total_collections(); 983 *gc_count_before_ret = total_collections();
975 return NULL; 984 return NULL;
976 } 985 }
977 } else { 986 } else {
987 // The GCLocker is either active or the GCLocker initiated
988 // GC has not yet been performed. Stall until it is and
989 // then retry the allocation.
978 GC_locker::stall_until_clear(); 990 GC_locker::stall_until_clear();
979 } 991 }
980 992
981 // We can reach here if we were unsuccessul in scheduling a 993 // We can reach here if we were unsuccessul in scheduling a
982 // collection (because another thread beat us to it) or if we were 994 // collection (because another thread beat us to it) or if we were
1052 } 1064 }
1053 1065
1054 if (GC_locker::is_active_and_needs_gc()) { 1066 if (GC_locker::is_active_and_needs_gc()) {
1055 should_try_gc = false; 1067 should_try_gc = false;
1056 } else { 1068 } else {
1057 // Read the GC count while still holding the Heap_lock. 1069 // The GCLocker may not be active but the GCLocker initiated
1058 gc_count_before = total_collections(); 1070 // GC may not yet have been performed (GCLocker::needs_gc()
1059 should_try_gc = true; 1071 // returns true). In this case we do not try this GC and
1072 // wait until the GCLocker initiated GC is performed, and
1073 // then retry the allocation.
1074 if (GC_locker::needs_gc()) {
1075 should_try_gc = false;
1076 } else {
1077 // Read the GC count while still holding the Heap_lock.
1078 gc_count_before = total_collections();
1079 should_try_gc = true;
1080 }
1060 } 1081 }
1061 } 1082 }
1062 1083
1063 if (should_try_gc) { 1084 if (should_try_gc) {
1064 // If we failed to allocate the humongous object, we should try to 1085 // If we failed to allocate the humongous object, we should try to
1079 MutexLockerEx x(Heap_lock); 1100 MutexLockerEx x(Heap_lock);
1080 *gc_count_before_ret = total_collections(); 1101 *gc_count_before_ret = total_collections();
1081 return NULL; 1102 return NULL;
1082 } 1103 }
1083 } else { 1104 } else {
1105 // The GCLocker is either active or the GCLocker initiated
1106 // GC has not yet been performed. Stall until it is and
1107 // then retry the allocation.
1084 GC_locker::stall_until_clear(); 1108 GC_locker::stall_until_clear();
1085 } 1109 }
1086 1110
1087 // We can reach here if we were unsuccessul in scheduling a 1111 // We can reach here if we were unsuccessul in scheduling a
1088 // collection (because another thread beat us to it) or if we were 1112 // collection (because another thread beat us to it) or if we were