comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 8853:2e093b564241

7014552: gc/lock/jni/jnilockXXX works too slow on 1-processor machine Summary: Keep a counter of how many times we were stalled by the GC locker, add a diagnostic flag which sets the limit. Reviewed-by: brutisso, ehelin, johnc
author mgerdin
date Thu, 28 Mar 2013 10:27:28 +0100
parents ad747ee9d0b1
children 24ef5fb05e0f
comparison
equal deleted inserted replaced
8827:42e370795a39 8853:2e093b564241
852 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { 852 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
853 assert_heap_not_locked_and_not_at_safepoint(); 853 assert_heap_not_locked_and_not_at_safepoint();
854 assert(!isHumongous(word_size), "we do not allow humongous TLABs"); 854 assert(!isHumongous(word_size), "we do not allow humongous TLABs");
855 855
856 unsigned int dummy_gc_count_before; 856 unsigned int dummy_gc_count_before;
857 return attempt_allocation(word_size, &dummy_gc_count_before); 857 int dummy_gclocker_retry_count = 0;
858 return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
858 } 859 }
859 860
860 HeapWord* 861 HeapWord*
861 G1CollectedHeap::mem_allocate(size_t word_size, 862 G1CollectedHeap::mem_allocate(size_t word_size,
862 bool* gc_overhead_limit_was_exceeded) { 863 bool* gc_overhead_limit_was_exceeded) {
863 assert_heap_not_locked_and_not_at_safepoint(); 864 assert_heap_not_locked_and_not_at_safepoint();
864 865
865 // Loop until the allocation is satisified, or unsatisfied after GC. 866 // Loop until the allocation is satisified, or unsatisfied after GC.
866 for (int try_count = 1; /* we'll return */; try_count += 1) { 867 for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
867 unsigned int gc_count_before; 868 unsigned int gc_count_before;
868 869
869 HeapWord* result = NULL; 870 HeapWord* result = NULL;
870 if (!isHumongous(word_size)) { 871 if (!isHumongous(word_size)) {
871 result = attempt_allocation(word_size, &gc_count_before); 872 result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
872 } else { 873 } else {
873 result = attempt_allocation_humongous(word_size, &gc_count_before); 874 result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
874 } 875 }
875 if (result != NULL) { 876 if (result != NULL) {
876 return result; 877 return result;
877 } 878 }
878 879
892 // this for non-humongous allocations, though. 893 // this for non-humongous allocations, though.
893 dirty_young_block(result, word_size); 894 dirty_young_block(result, word_size);
894 } 895 }
895 return result; 896 return result;
896 } else { 897 } else {
898 if (gclocker_retry_count > GCLockerRetryAllocationCount) {
899 return NULL;
900 }
897 assert(op.result() == NULL, 901 assert(op.result() == NULL,
898 "the result should be NULL if the VM op did not succeed"); 902 "the result should be NULL if the VM op did not succeed");
899 } 903 }
900 904
901 // Give a warning if we seem to be looping forever. 905 // Give a warning if we seem to be looping forever.
908 ShouldNotReachHere(); 912 ShouldNotReachHere();
909 return NULL; 913 return NULL;
910 } 914 }
911 915
912 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, 916 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
913 unsigned int *gc_count_before_ret) { 917 unsigned int *gc_count_before_ret,
918 int* gclocker_retry_count_ret) {
914 // Make sure you read the note in attempt_allocation_humongous(). 919 // Make sure you read the note in attempt_allocation_humongous().
915 920
916 assert_heap_not_locked_and_not_at_safepoint(); 921 assert_heap_not_locked_and_not_at_safepoint();
917 assert(!isHumongous(word_size), "attempt_allocation_slow() should not " 922 assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
918 "be called for humongous allocation requests"); 923 "be called for humongous allocation requests");
984 MutexLockerEx x(Heap_lock); 989 MutexLockerEx x(Heap_lock);
985 *gc_count_before_ret = total_collections(); 990 *gc_count_before_ret = total_collections();
986 return NULL; 991 return NULL;
987 } 992 }
988 } else { 993 } else {
994 if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
995 MutexLockerEx x(Heap_lock);
996 *gc_count_before_ret = total_collections();
997 return NULL;
998 }
989 // The GCLocker is either active or the GCLocker initiated 999 // The GCLocker is either active or the GCLocker initiated
990 // GC has not yet been performed. Stall until it is and 1000 // GC has not yet been performed. Stall until it is and
991 // then retry the allocation. 1001 // then retry the allocation.
992 GC_locker::stall_until_clear(); 1002 GC_locker::stall_until_clear();
1003 (*gclocker_retry_count_ret) += 1;
993 } 1004 }
994 1005
995 // We can reach here if we were unsuccessul in scheduling a 1006 // We can reach here if we were unsuccessul in scheduling a
996 // collection (because another thread beat us to it) or if we were 1007 // collection (because another thread beat us to it) or if we were
997 // stalled due to the GC locker. In either can we should retry the 1008 // stalled due to the GC locker. In either can we should retry the
1017 ShouldNotReachHere(); 1028 ShouldNotReachHere();
1018 return NULL; 1029 return NULL;
1019 } 1030 }
1020 1031
1021 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, 1032 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1022 unsigned int * gc_count_before_ret) { 1033 unsigned int * gc_count_before_ret,
1034 int* gclocker_retry_count_ret) {
1023 // The structure of this method has a lot of similarities to 1035 // The structure of this method has a lot of similarities to
1024 // attempt_allocation_slow(). The reason these two were not merged 1036 // attempt_allocation_slow(). The reason these two were not merged
1025 // into a single one is that such a method would require several "if 1037 // into a single one is that such a method would require several "if
1026 // allocation is not humongous do this, otherwise do that" 1038 // allocation is not humongous do this, otherwise do that"
1027 // conditional paths which would obscure its flow. In fact, an early 1039 // conditional paths which would obscure its flow. In fact, an early
1102 MutexLockerEx x(Heap_lock); 1114 MutexLockerEx x(Heap_lock);
1103 *gc_count_before_ret = total_collections(); 1115 *gc_count_before_ret = total_collections();
1104 return NULL; 1116 return NULL;
1105 } 1117 }
1106 } else { 1118 } else {
1119 if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1120 MutexLockerEx x(Heap_lock);
1121 *gc_count_before_ret = total_collections();
1122 return NULL;
1123 }
1107 // The GCLocker is either active or the GCLocker initiated 1124 // The GCLocker is either active or the GCLocker initiated
1108 // GC has not yet been performed. Stall until it is and 1125 // GC has not yet been performed. Stall until it is and
1109 // then retry the allocation. 1126 // then retry the allocation.
1110 GC_locker::stall_until_clear(); 1127 GC_locker::stall_until_clear();
1128 (*gclocker_retry_count_ret) += 1;
1111 } 1129 }
1112 1130
1113 // We can reach here if we were unsuccessul in scheduling a 1131 // We can reach here if we were unsuccessul in scheduling a
1114 // collection (because another thread beat us to it) or if we were 1132 // collection (because another thread beat us to it) or if we were
1115 // stalled due to the GC locker. In either can we should retry the 1133 // stalled due to the GC locker. In either can we should retry the