comparison src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @ 1833:8b10f48633dc

6984287: Regularize how GC parallel workers are specified. Summary: Associate number of GC workers with the workgang as opposed to the task. Reviewed-by: johnc, ysr
author jmasa
date Mon, 20 Sep 2010 14:38:38 -0700
parents 6eddcbe17c83
children f95d63e2154a
comparison
equal deleted inserted replaced
1781:97fbf5beff7b 1833:8b10f48633dc
70 }; 70 };
71 71
72 // </NEW PREDICTION> 72 // </NEW PREDICTION>
73 73
74 G1CollectorPolicy::G1CollectorPolicy() : 74 G1CollectorPolicy::G1CollectorPolicy() :
75 _parallel_gc_threads((ParallelGCThreads > 0) ? ParallelGCThreads : 1), 75 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
76 ? ParallelGCThreads : 1),
77
78
76 _n_pauses(0), 79 _n_pauses(0),
77 _recent_CH_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 80 _recent_CH_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
78 _recent_G1_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 81 _recent_G1_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
79 _recent_evac_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 82 _recent_evac_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
80 _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 83 _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
1071 gclog_or_tty->print(" "); 1074 gclog_or_tty->print(" ");
1072 gclog_or_tty->print_cr("[%s: %d]", str, value); 1075 gclog_or_tty->print_cr("[%s: %d]", str, value);
1073 } 1076 }
1074 1077
1075 double G1CollectorPolicy::avg_value (double* data) { 1078 double G1CollectorPolicy::avg_value (double* data) {
1076 if (ParallelGCThreads > 0) { 1079 if (G1CollectedHeap::use_parallel_gc_threads()) {
1077 double ret = 0.0; 1080 double ret = 0.0;
1078 for (uint i = 0; i < ParallelGCThreads; ++i) 1081 for (uint i = 0; i < ParallelGCThreads; ++i)
1079 ret += data[i]; 1082 ret += data[i];
1080 return ret / (double) ParallelGCThreads; 1083 return ret / (double) ParallelGCThreads;
1081 } else { 1084 } else {
1082 return data[0]; 1085 return data[0];
1083 } 1086 }
1084 } 1087 }
1085 1088
1086 double G1CollectorPolicy::max_value (double* data) { 1089 double G1CollectorPolicy::max_value (double* data) {
1087 if (ParallelGCThreads > 0) { 1090 if (G1CollectedHeap::use_parallel_gc_threads()) {
1088 double ret = data[0]; 1091 double ret = data[0];
1089 for (uint i = 1; i < ParallelGCThreads; ++i) 1092 for (uint i = 1; i < ParallelGCThreads; ++i)
1090 if (data[i] > ret) 1093 if (data[i] > ret)
1091 ret = data[i]; 1094 ret = data[i];
1092 return ret; 1095 return ret;
1094 return data[0]; 1097 return data[0];
1095 } 1098 }
1096 } 1099 }
1097 1100
1098 double G1CollectorPolicy::sum_of_values (double* data) { 1101 double G1CollectorPolicy::sum_of_values (double* data) {
1099 if (ParallelGCThreads > 0) { 1102 if (G1CollectedHeap::use_parallel_gc_threads()) {
1100 double sum = 0.0; 1103 double sum = 0.0;
1101 for (uint i = 0; i < ParallelGCThreads; i++) 1104 for (uint i = 0; i < ParallelGCThreads; i++)
1102 sum += data[i]; 1105 sum += data[i];
1103 return sum; 1106 return sum;
1104 } else { 1107 } else {
1108 1111
1109 double G1CollectorPolicy::max_sum (double* data1, 1112 double G1CollectorPolicy::max_sum (double* data1,
1110 double* data2) { 1113 double* data2) {
1111 double ret = data1[0] + data2[0]; 1114 double ret = data1[0] + data2[0];
1112 1115
1113 if (ParallelGCThreads > 0) { 1116 if (G1CollectedHeap::use_parallel_gc_threads()) {
1114 for (uint i = 1; i < ParallelGCThreads; ++i) { 1117 for (uint i = 1; i < ParallelGCThreads; ++i) {
1115 double data = data1[i] + data2[i]; 1118 double data = data1[i] + data2[i];
1116 if (data > ret) 1119 if (data > ret)
1117 ret = data; 1120 ret = data;
1118 } 1121 }
1124 #define MIN_TIMER_GRANULARITY 0.0000001 1127 #define MIN_TIMER_GRANULARITY 0.0000001
1125 1128
1126 void G1CollectorPolicy::record_collection_pause_end() { 1129 void G1CollectorPolicy::record_collection_pause_end() {
1127 double end_time_sec = os::elapsedTime(); 1130 double end_time_sec = os::elapsedTime();
1128 double elapsed_ms = _last_pause_time_ms; 1131 double elapsed_ms = _last_pause_time_ms;
1129 bool parallel = ParallelGCThreads > 0; 1132 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
1130 double evac_ms = (end_time_sec - _cur_G1_strong_roots_end_sec) * 1000.0; 1133 double evac_ms = (end_time_sec - _cur_G1_strong_roots_end_sec) * 1000.0;
1131 size_t rs_size = 1134 size_t rs_size =
1132 _cur_collection_pause_used_regions_at_start - collection_set_size(); 1135 _cur_collection_pause_used_regions_at_start - collection_set_size();
1133 size_t cur_used_bytes = _g1->used(); 1136 size_t cur_used_bytes = _g1->used();
1134 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 1137 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1939 // We exempt parallel collection from this check because Alloc Buffer 1942 // We exempt parallel collection from this check because Alloc Buffer
1940 // fragmentation can produce negative collections. 1943 // fragmentation can produce negative collections.
1941 // Further, we're now always doing parallel collection. But I'm still 1944 // Further, we're now always doing parallel collection. But I'm still
1942 // leaving this here as a placeholder for a more precise assertion later. 1945 // leaving this here as a placeholder for a more precise assertion later.
1943 // (DLD, 10/05.) 1946 // (DLD, 10/05.)
1944 assert((true || ParallelGCThreads > 0) || 1947 assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
1945 _g1->evacuation_failed() || 1948 _g1->evacuation_failed() ||
1946 recent_survival_rate <= 1.0, "Or bad frac"); 1949 recent_survival_rate <= 1.0, "Or bad frac");
1947 return recent_survival_rate; 1950 return recent_survival_rate;
1948 } else { 1951 } else {
1949 return 1.0; // Be conservative. 1952 return 1.0; // Be conservative.
1959 // We exempt parallel collection from this check because Alloc Buffer 1962 // We exempt parallel collection from this check because Alloc Buffer
1960 // fragmentation can produce negative collections. 1963 // fragmentation can produce negative collections.
1961 // Further, we're now always doing parallel collection. But I'm still 1964 // Further, we're now always doing parallel collection. But I'm still
1962 // leaving this here as a placeholder for a more precise assertion later. 1965 // leaving this here as a placeholder for a more precise assertion later.
1963 // (DLD, 10/05.) 1966 // (DLD, 10/05.)
1964 assert((true || ParallelGCThreads > 0) || 1967 assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
1965 last_survival_rate <= 1.0, "Or bad frac"); 1968 last_survival_rate <= 1.0, "Or bad frac");
1966 return last_survival_rate; 1969 return last_survival_rate;
1967 } else { 1970 } else {
1968 return 1.0; 1971 return 1.0;
1969 } 1972 }
2119 if (should_print) 2122 if (should_print)
2120 print_summary(level, "Other(Calc)", calc_other_times_ms); 2123 print_summary(level, "Other(Calc)", calc_other_times_ms);
2121 } 2124 }
2122 2125
2123 void G1CollectorPolicy::print_summary(PauseSummary* summary) const { 2126 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
2124 bool parallel = ParallelGCThreads > 0; 2127 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
2125 MainBodySummary* body_summary = summary->main_body_summary(); 2128 MainBodySummary* body_summary = summary->main_body_summary();
2126 if (summary->get_total_seq()->num() > 0) { 2129 if (summary->get_total_seq()->num() > 0) {
2127 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq()); 2130 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
2128 if (body_summary != NULL) { 2131 if (body_summary != NULL) {
2129 print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq()); 2132 print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
2557 if (G1PrintParCleanupStats) { 2560 if (G1PrintParCleanupStats) {
2558 clear_marked_end = os::elapsedTime(); 2561 clear_marked_end = os::elapsedTime();
2559 gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.", 2562 gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.",
2560 (clear_marked_end - start)*1000.0); 2563 (clear_marked_end - start)*1000.0);
2561 } 2564 }
2562 if (ParallelGCThreads > 0) { 2565 if (G1CollectedHeap::use_parallel_gc_threads()) {
2563 const size_t OverpartitionFactor = 4; 2566 const size_t OverpartitionFactor = 4;
2564 const size_t MinWorkUnit = 8; 2567 const size_t MinWorkUnit = 8;
2565 const size_t WorkUnit = 2568 const size_t WorkUnit =
2566 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor), 2569 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
2567 MinWorkUnit); 2570 MinWorkUnit);