Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/shared/vmGCOperations.cpp @ 20278:2c6ef90f030a
8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com
author | stefank |
---|---|
date | Mon, 07 Jul 2014 10:12:40 +0200 |
parents | 9c3dc501b5eb |
children | bac98749fe00 |
comparison
equal
deleted
inserted
replaced
20277:882004b9e7e1 | 20278:2c6ef90f030a |
---|---|
207 GenCollectedHeap* gch = GenCollectedHeap::heap(); | 207 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
208 GCCauseSetter gccs(gch, _gc_cause); | 208 GCCauseSetter gccs(gch, _gc_cause); |
209 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); | 209 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); |
210 } | 210 } |
211 | 211 |
212 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() { | |
213 #if INCLUDE_ALL_GCS | |
214 if (UseConcMarkSweepGC || UseG1GC) { | |
215 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) { | |
216 MetaspaceGC::set_should_concurrent_collect(true); | |
217 } else if (UseG1GC) { | |
218 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
219 g1h->g1_policy()->set_initiate_conc_mark_if_possible(); | |
220 | |
221 GCCauseSetter x(g1h, _gc_cause); | |
222 | |
223 // At this point we are supposed to start a concurrent cycle. We | |
224 // will do so if one is not already in progress. | |
225 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause); | |
226 | |
227 if (should_start) { | |
228 double pause_target = g1h->g1_policy()->max_pause_time_ms(); | |
229 g1h->do_collection_pause_at_safepoint(pause_target); | |
230 } | |
231 } | |
232 | |
233 return true; | |
234 } | |
235 #endif | |
236 return false; | |
237 } | |
238 | |
239 static void log_metaspace_alloc_failure_for_concurrent_GC() { | |
240 if (Verbose && PrintGCDetails) { | |
241 if (UseConcMarkSweepGC) { | |
242 gclog_or_tty->print_cr("\nCMS full GC for Metaspace"); | |
243 } else if (UseG1GC) { | |
244 gclog_or_tty->print_cr("\nG1 full GC for Metaspace"); | |
245 } | |
246 } | |
247 } | |
248 | |
212 void VM_CollectForMetadataAllocation::doit() { | 249 void VM_CollectForMetadataAllocation::doit() { |
213 SvcGCMarker sgcm(SvcGCMarker::FULL); | 250 SvcGCMarker sgcm(SvcGCMarker::FULL); |
214 | 251 |
215 CollectedHeap* heap = Universe::heap(); | 252 CollectedHeap* heap = Universe::heap(); |
216 GCCauseSetter gccs(heap, _gc_cause); | 253 GCCauseSetter gccs(heap, _gc_cause); |
218 // Check again if the space is available. Another thread | 255 // Check again if the space is available. Another thread |
219 // may have similarly failed a metadata allocation and induced | 256 // may have similarly failed a metadata allocation and induced |
220 // a GC that freed space for the allocation. | 257 // a GC that freed space for the allocation. |
221 if (!MetadataAllocationFailALot) { | 258 if (!MetadataAllocationFailALot) { |
222 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); | 259 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); |
223 } | 260 if (_result != NULL) { |
224 | 261 return; |
225 if (_result == NULL) { | 262 } |
226 if (UseConcMarkSweepGC) { | 263 } |
227 if (CMSClassUnloadingEnabled) { | 264 |
228 MetaspaceGC::set_should_concurrent_collect(true); | 265 if (initiate_concurrent_GC()) { |
229 } | 266 // For CMS and G1 expand since the collection is going to be concurrent. |
230 // For CMS expand since the collection is going to be concurrent. | 267 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); |
231 _result = | 268 if (_result != NULL) { |
232 _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); | 269 return; |
233 } | 270 } |
234 if (_result == NULL) { | 271 |
235 // Don't clear the soft refs yet. | 272 log_metaspace_alloc_failure_for_concurrent_GC(); |
236 if (Verbose && PrintGCDetails && UseConcMarkSweepGC) { | 273 } |
237 gclog_or_tty->print_cr("\nCMS full GC for Metaspace"); | 274 |
238 } | 275 // Don't clear the soft refs yet. |
239 heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold); | 276 heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold); |
240 // After a GC try to allocate without expanding. Could fail | 277 // After a GC try to allocate without expanding. Could fail |
241 // and expansion will be tried below. | 278 // and expansion will be tried below. |
242 _result = | 279 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); |
243 _loader_data->metaspace_non_null()->allocate(_size, _mdtype); | 280 if (_result != NULL) { |
244 } | 281 return; |
245 if (_result == NULL) { | 282 } |
246 // If still failing, allow the Metaspace to expand. | 283 |
247 // See delta_capacity_until_GC() for explanation of the | 284 // If still failing, allow the Metaspace to expand. |
248 // amount of the expansion. | 285 // See delta_capacity_until_GC() for explanation of the |
249 // This should work unless there really is no more space | 286 // amount of the expansion. |
250 // or a MaxMetaspaceSize has been specified on the command line. | 287 // This should work unless there really is no more space |
251 _result = | 288 // or a MaxMetaspaceSize has been specified on the command line. |
252 _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); | 289 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); |
253 if (_result == NULL) { | 290 if (_result != NULL) { |
254 // If expansion failed, do a last-ditch collection and try allocating | 291 return; |
255 // again. A last-ditch collection will clear softrefs. This | 292 } |
256 // behavior is similar to the last-ditch collection done for perm | 293 |
257 // gen when it was full and a collection for failed allocation | 294 // If expansion failed, do a last-ditch collection and try allocating |
258 // did not free perm gen space. | 295 // again. A last-ditch collection will clear softrefs. This |
259 heap->collect_as_vm_thread(GCCause::_last_ditch_collection); | 296 // behavior is similar to the last-ditch collection done for perm |
260 _result = | 297 // gen when it was full and a collection for failed allocation |
261 _loader_data->metaspace_non_null()->allocate(_size, _mdtype); | 298 // did not free perm gen space. |
262 } | 299 heap->collect_as_vm_thread(GCCause::_last_ditch_collection); |
263 } | 300 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); |
264 if (Verbose && PrintGCDetails && _result == NULL) { | 301 if (_result != NULL) { |
265 gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size " | 302 return; |
266 SIZE_FORMAT, _size); | 303 } |
267 } | 304 |
268 } | 305 if (Verbose && PrintGCDetails) { |
269 | 306 gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size " |
270 if (_result == NULL && GC_locker::is_active_and_needs_gc()) { | 307 SIZE_FORMAT, _size); |
308 } | |
309 | |
310 if (GC_locker::is_active_and_needs_gc()) { | |
271 set_gc_locked(); | 311 set_gc_locked(); |
272 } | 312 } |
273 } | 313 } |