Mercurial > hg > graal-compiler
comparison src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @ 4829:9509c20bba28
6976060: G1: humongous object allocations should initiate marking cycles when necessary
Reviewed-by: tonyp, johnc
author | brutisso |
---|---|
date | Mon, 16 Jan 2012 22:10:05 +0100 |
parents | 2ace1c4ee8da |
children | 6a78aa6ac1ff |
comparison
equal
deleted
inserted
replaced
4828:851b58c26def | 4829:9509c20bba28 |
---|---|
210 _last_gc_was_young(false), | 210 _last_gc_was_young(false), |
211 | 211 |
212 _eden_bytes_before_gc(0), | 212 _eden_bytes_before_gc(0), |
213 _survivor_bytes_before_gc(0), | 213 _survivor_bytes_before_gc(0), |
214 _capacity_before_gc(0), | 214 _capacity_before_gc(0), |
215 | |
216 _prev_collection_pause_used_at_end_bytes(0), | |
217 | 215 |
218 _eden_cset_region_length(0), | 216 _eden_cset_region_length(0), |
219 _survivor_cset_region_length(0), | 217 _survivor_cset_region_length(0), |
220 _old_cset_region_length(0), | 218 _old_cset_region_length(0), |
221 | 219 |
1138 } | 1136 } |
1139 } | 1137 } |
1140 return ret; | 1138 return ret; |
1141 } | 1139 } |
1142 | 1140 |
1141 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source) { | |
1142 if (_g1->mark_in_progress()) { | |
1143 return false; | |
1144 } | |
1145 | |
1146 size_t marking_initiating_used_threshold = | |
1147 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; | |
1148 size_t cur_used_bytes = _g1->non_young_capacity_bytes(); | |
1149 | |
1150 if (cur_used_bytes > marking_initiating_used_threshold) { | |
1151 if (gcs_are_young()) { | |
1152 ergo_verbose4(ErgoConcCycles, | |
1153 "request concurrent cycle initiation", | |
1154 ergo_format_reason("occupancy higher than threshold") | |
1155 ergo_format_byte("occupancy") | |
1156 ergo_format_byte_perc("threshold") | |
1157 ergo_format_str("source"), | |
1158 cur_used_bytes, | |
1159 marking_initiating_used_threshold, | |
1160 (double) InitiatingHeapOccupancyPercent, | |
1161 source); | |
1162 return true; | |
1163 } else { | |
1164 ergo_verbose4(ErgoConcCycles, | |
1165 "do not request concurrent cycle initiation", | |
1166 ergo_format_reason("still doing mixed collections") | |
1167 ergo_format_byte("occupancy") | |
1168 ergo_format_byte_perc("threshold") | |
1169 ergo_format_str("source"), | |
1170 cur_used_bytes, | |
1171 marking_initiating_used_threshold, | |
1172 (double) InitiatingHeapOccupancyPercent, | |
1173 source); | |
1174 } | |
1175 } | |
1176 | |
1177 return false; | |
1178 } | |
1179 | |
1143 // Anything below that is considered to be zero | 1180 // Anything below that is considered to be zero |
1144 #define MIN_TIMER_GRANULARITY 0.0000001 | 1181 #define MIN_TIMER_GRANULARITY 0.0000001 |
1145 | 1182 |
1146 void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { | 1183 void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { |
1147 double end_time_sec = os::elapsedTime(); | 1184 double end_time_sec = os::elapsedTime(); |
1164 // do that for any other surv rate groups too | 1201 // do that for any other surv rate groups too |
1165 } | 1202 } |
1166 #endif // PRODUCT | 1203 #endif // PRODUCT |
1167 | 1204 |
1168 last_pause_included_initial_mark = during_initial_mark_pause(); | 1205 last_pause_included_initial_mark = during_initial_mark_pause(); |
1169 if (last_pause_included_initial_mark) | 1206 if (last_pause_included_initial_mark) { |
1170 record_concurrent_mark_init_end(0.0); | 1207 record_concurrent_mark_init_end(0.0); |
1171 | 1208 } |
1172 size_t marking_initiating_used_threshold = | 1209 |
1173 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; | 1210 if (!_last_young_gc && need_to_start_conc_mark("end of GC")) { |
1174 | 1211 // Note: this might have already been set, if during the last |
1175 if (!_g1->mark_in_progress() && !_last_young_gc) { | 1212 // pause we decided to start a cycle but at the beginning of |
1176 assert(!last_pause_included_initial_mark, "invariant"); | 1213 // this pause we decided to postpone it. That's OK. |
1177 if (cur_used_bytes > marking_initiating_used_threshold) { | 1214 set_initiate_conc_mark_if_possible(); |
1178 if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) { | 1215 } |
1179 assert(!during_initial_mark_pause(), "we should not see this here"); | |
1180 | |
1181 ergo_verbose3(ErgoConcCycles, | |
1182 "request concurrent cycle initiation", | |
1183 ergo_format_reason("occupancy higher than threshold") | |
1184 ergo_format_byte("occupancy") | |
1185 ergo_format_byte_perc("threshold"), | |
1186 cur_used_bytes, | |
1187 marking_initiating_used_threshold, | |
1188 (double) InitiatingHeapOccupancyPercent); | |
1189 | |
1190 // Note: this might have already been set, if during the last | |
1191 // pause we decided to start a cycle but at the beginning of | |
1192 // this pause we decided to postpone it. That's OK. | |
1193 set_initiate_conc_mark_if_possible(); | |
1194 } else { | |
1195 ergo_verbose2(ErgoConcCycles, | |
1196 "do not request concurrent cycle initiation", | |
1197 ergo_format_reason("occupancy lower than previous occupancy") | |
1198 ergo_format_byte("occupancy") | |
1199 ergo_format_byte("previous occupancy"), | |
1200 cur_used_bytes, | |
1201 _prev_collection_pause_used_at_end_bytes); | |
1202 } | |
1203 } | |
1204 } | |
1205 | |
1206 _prev_collection_pause_used_at_end_bytes = cur_used_bytes; | |
1207 | 1216 |
1208 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0, | 1217 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0, |
1209 end_time_sec, false); | 1218 end_time_sec, false); |
1210 | 1219 |
1211 // This assert is exempted when we're doing parallel collection pauses, | 1220 // This assert is exempted when we're doing parallel collection pauses, |