Mercurial > hg > graal-compiler
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 10408:836a62f43af9
Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/
author | Doug Simon <doug.simon@oracle.com> |
---|---|
date | Wed, 19 Jun 2013 10:45:56 +0200 |
parents | 89e4d67fdd2a f2110083203d |
children | d85bdcb38fa2 6b0fd0964b87 |
comparison
equal
deleted
inserted
replaced
10086:e0fb8a213650 | 10408:836a62f43af9 |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp" | 36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp" |
37 #include "gc_implementation/g1/g1Log.hpp" | 37 #include "gc_implementation/g1/g1Log.hpp" |
38 #include "gc_implementation/g1/g1MarkSweep.hpp" | 38 #include "gc_implementation/g1/g1MarkSweep.hpp" |
39 #include "gc_implementation/g1/g1OopClosures.inline.hpp" | 39 #include "gc_implementation/g1/g1OopClosures.inline.hpp" |
40 #include "gc_implementation/g1/g1RemSet.inline.hpp" | 40 #include "gc_implementation/g1/g1RemSet.inline.hpp" |
41 #include "gc_implementation/g1/g1YCTypes.hpp" | |
41 #include "gc_implementation/g1/heapRegion.inline.hpp" | 42 #include "gc_implementation/g1/heapRegion.inline.hpp" |
42 #include "gc_implementation/g1/heapRegionRemSet.hpp" | 43 #include "gc_implementation/g1/heapRegionRemSet.hpp" |
43 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | 44 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" |
44 #include "gc_implementation/g1/vm_operations_g1.hpp" | 45 #include "gc_implementation/g1/vm_operations_g1.hpp" |
46 #include "gc_implementation/shared/gcHeapSummary.hpp" | |
47 #include "gc_implementation/shared/gcTimer.hpp" | |
48 #include "gc_implementation/shared/gcTrace.hpp" | |
49 #include "gc_implementation/shared/gcTraceTime.hpp" | |
45 #include "gc_implementation/shared/isGCActiveMark.hpp" | 50 #include "gc_implementation/shared/isGCActiveMark.hpp" |
46 #include "memory/gcLocker.inline.hpp" | 51 #include "memory/gcLocker.inline.hpp" |
47 #include "memory/genOopClosures.inline.hpp" | 52 #include "memory/genOopClosures.inline.hpp" |
48 #include "memory/generationSpec.hpp" | 53 #include "memory/generationSpec.hpp" |
49 #include "memory/referenceProcessor.hpp" | 54 #include "memory/referenceProcessor.hpp" |
74 // | 79 // |
75 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism. | 80 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism. |
76 // The number of GC workers is passed to heap_region_par_iterate_chunked(). | 81 // The number of GC workers is passed to heap_region_par_iterate_chunked(). |
77 // It does use run_task() which sets _n_workers in the task. | 82 // It does use run_task() which sets _n_workers in the task. |
78 // G1ParTask executes g1_process_strong_roots() -> | 83 // G1ParTask executes g1_process_strong_roots() -> |
79 // SharedHeap::process_strong_roots() which calls eventuall to | 84 // SharedHeap::process_strong_roots() which calls eventually to |
80 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses | 85 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses |
81 // SequentialSubTasksDone. SharedHeap::process_strong_roots() also | 86 // SequentialSubTasksDone. SharedHeap::process_strong_roots() also |
82 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap). | 87 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap). |
83 // | 88 // |
84 | 89 |
94 G1RemSet* g1rs, | 99 G1RemSet* g1rs, |
95 ConcurrentG1Refine* cg1r) : | 100 ConcurrentG1Refine* cg1r) : |
96 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | 101 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) |
97 {} | 102 {} |
98 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | 103 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
99 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false); | 104 bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false); |
100 // This path is executed by the concurrent refine or mutator threads, | 105 // This path is executed by the concurrent refine or mutator threads, |
101 // concurrently, and so we do not care if card_ptr contains references | 106 // concurrently, and so we do not care if card_ptr contains references |
102 // that point into the collection set. | 107 // that point into the collection set. |
103 assert(!oops_into_cset, "should be"); | 108 assert(!oops_into_cset, "should be"); |
104 | 109 |
455 return hr != NULL && hr->in_collection_set(); | 460 return hr != NULL && hr->in_collection_set(); |
456 } | 461 } |
457 #endif | 462 #endif |
458 | 463 |
459 // Returns true if the reference points to an object that | 464 // Returns true if the reference points to an object that |
460 // can move in an incremental collecction. | 465 // can move in an incremental collection. |
461 bool G1CollectedHeap::is_scavengable(const void* p) { | 466 bool G1CollectedHeap::is_scavengable(const void* p) { |
462 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | 467 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
463 G1CollectorPolicy* g1p = g1h->g1_policy(); | 468 G1CollectorPolicy* g1p = g1h->g1_policy(); |
464 HeapRegion* hr = heap_region_containing(p); | 469 HeapRegion* hr = heap_region_containing(p); |
465 if (hr == NULL) { | 470 if (hr == NULL) { |
546 HR_FORMAT_PARAMS(res)); | 551 HR_FORMAT_PARAMS(res)); |
547 } | 552 } |
548 return res; | 553 return res; |
549 } | 554 } |
550 | 555 |
551 // Wait here until we get notifed either when (a) there are no | 556 // Wait here until we get notified either when (a) there are no |
552 // more free regions coming or (b) some regions have been moved on | 557 // more free regions coming or (b) some regions have been moved on |
553 // the secondary_free_list. | 558 // the secondary_free_list. |
554 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); | 559 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); |
555 } | 560 } |
556 | 561 |
621 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); | 626 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); |
622 | 627 |
623 uint first = G1_NULL_HRS_INDEX; | 628 uint first = G1_NULL_HRS_INDEX; |
624 if (num_regions == 1) { | 629 if (num_regions == 1) { |
625 // Only one region to allocate, no need to go through the slower | 630 // Only one region to allocate, no need to go through the slower |
626 // path. The caller will attempt the expasion if this fails, so | 631 // path. The caller will attempt the expansion if this fails, so |
627 // let's not try to expand here too. | 632 // let's not try to expand here too. |
628 HeapRegion* hr = new_region(word_size, false /* do_expand */); | 633 HeapRegion* hr = new_region(word_size, false /* do_expand */); |
629 if (hr != NULL) { | 634 if (hr != NULL) { |
630 first = hr->hrs_index(); | 635 first = hr->hrs_index(); |
631 } else { | 636 } else { |
686 HeapRegion* first_hr = region_at(first); | 691 HeapRegion* first_hr = region_at(first); |
687 // The header of the new object will be placed at the bottom of | 692 // The header of the new object will be placed at the bottom of |
688 // the first region. | 693 // the first region. |
689 HeapWord* new_obj = first_hr->bottom(); | 694 HeapWord* new_obj = first_hr->bottom(); |
690 // This will be the new end of the first region in the series that | 695 // This will be the new end of the first region in the series that |
691 // should also match the end of the last region in the seriers. | 696 // should also match the end of the last region in the series. |
692 HeapWord* new_end = new_obj + word_size_sum; | 697 HeapWord* new_end = new_obj + word_size_sum; |
693 // This will be the new top of the first region that will reflect | 698 // This will be the new top of the first region that will reflect |
694 // this allocation. | 699 // this allocation. |
695 HeapWord* new_top = new_obj + word_size; | 700 HeapWord* new_top = new_obj + word_size; |
696 | 701 |
861 HeapWord* | 866 HeapWord* |
862 G1CollectedHeap::mem_allocate(size_t word_size, | 867 G1CollectedHeap::mem_allocate(size_t word_size, |
863 bool* gc_overhead_limit_was_exceeded) { | 868 bool* gc_overhead_limit_was_exceeded) { |
864 assert_heap_not_locked_and_not_at_safepoint(); | 869 assert_heap_not_locked_and_not_at_safepoint(); |
865 | 870 |
866 // Loop until the allocation is satisified, or unsatisfied after GC. | 871 // Loop until the allocation is satisfied, or unsatisfied after GC. |
867 for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { | 872 for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { |
868 unsigned int gc_count_before; | 873 unsigned int gc_count_before; |
869 | 874 |
870 HeapWord* result = NULL; | 875 HeapWord* result = NULL; |
871 if (!isHumongous(word_size)) { | 876 if (!isHumongous(word_size)) { |
1001 // then retry the allocation. | 1006 // then retry the allocation. |
1002 GC_locker::stall_until_clear(); | 1007 GC_locker::stall_until_clear(); |
1003 (*gclocker_retry_count_ret) += 1; | 1008 (*gclocker_retry_count_ret) += 1; |
1004 } | 1009 } |
1005 | 1010 |
1006 // We can reach here if we were unsuccessul in scheduling a | 1011 // We can reach here if we were unsuccessful in scheduling a |
1007 // collection (because another thread beat us to it) or if we were | 1012 // collection (because another thread beat us to it) or if we were |
1008 // stalled due to the GC locker. In either can we should retry the | 1013 // stalled due to the GC locker. In either can we should retry the |
1009 // allocation attempt in case another thread successfully | 1014 // allocation attempt in case another thread successfully |
1010 // performed a collection and reclaimed enough space. We do the | 1015 // performed a collection and reclaimed enough space. We do the |
1011 // first attempt (without holding the Heap_lock) here and the | 1016 // first attempt (without holding the Heap_lock) here and the |
1126 // then retry the allocation. | 1131 // then retry the allocation. |
1127 GC_locker::stall_until_clear(); | 1132 GC_locker::stall_until_clear(); |
1128 (*gclocker_retry_count_ret) += 1; | 1133 (*gclocker_retry_count_ret) += 1; |
1129 } | 1134 } |
1130 | 1135 |
1131 // We can reach here if we were unsuccessul in scheduling a | 1136 // We can reach here if we were unsuccessful in scheduling a |
1132 // collection (because another thread beat us to it) or if we were | 1137 // collection (because another thread beat us to it) or if we were |
1133 // stalled due to the GC locker. In either can we should retry the | 1138 // stalled due to the GC locker. In either can we should retry the |
1134 // allocation attempt in case another thread successfully | 1139 // allocation attempt in case another thread successfully |
1135 // performed a collection and reclaimed enough space. Give a | 1140 // performed a collection and reclaimed enough space. Give a |
1136 // warning if we seem to be looping forever. | 1141 // warning if we seem to be looping forever. |
1269 double verify_time_ms = 0.0; | 1274 double verify_time_ms = 0.0; |
1270 | 1275 |
1271 if (guard && total_collections() >= VerifyGCStartAt) { | 1276 if (guard && total_collections() >= VerifyGCStartAt) { |
1272 double verify_start = os::elapsedTime(); | 1277 double verify_start = os::elapsedTime(); |
1273 HandleMark hm; // Discard invalid handles created during verification | 1278 HandleMark hm; // Discard invalid handles created during verification |
1274 gclog_or_tty->print(msg); | |
1275 prepare_for_verify(); | 1279 prepare_for_verify(); |
1276 Universe::verify(false /* silent */, VerifyOption_G1UsePrevMarking); | 1280 Universe::verify(VerifyOption_G1UsePrevMarking, msg); |
1277 verify_time_ms = (os::elapsedTime() - verify_start) * 1000; | 1281 verify_time_ms = (os::elapsedTime() - verify_start) * 1000; |
1278 } | 1282 } |
1279 | 1283 |
1280 return verify_time_ms; | 1284 return verify_time_ms; |
1281 } | 1285 } |
1297 | 1301 |
1298 if (GC_locker::check_active_before_gc()) { | 1302 if (GC_locker::check_active_before_gc()) { |
1299 return false; | 1303 return false; |
1300 } | 1304 } |
1301 | 1305 |
1306 STWGCTimer* gc_timer = G1MarkSweep::gc_timer(); | |
1307 gc_timer->register_gc_start(os::elapsed_counter()); | |
1308 | |
1309 SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer(); | |
1310 gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start()); | |
1311 | |
1302 SvcGCMarker sgcm(SvcGCMarker::FULL); | 1312 SvcGCMarker sgcm(SvcGCMarker::FULL); |
1303 ResourceMark rm; | 1313 ResourceMark rm; |
1304 | 1314 |
1305 print_heap_before_gc(); | 1315 print_heap_before_gc(); |
1306 | 1316 trace_heap_before_gc(gc_tracer); |
1307 size_t metadata_prev_used = MetaspaceAux::used_in_bytes(); | 1317 |
1318 size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); | |
1308 | 1319 |
1309 HRSPhaseSetter x(HRSPhaseFullGC); | 1320 HRSPhaseSetter x(HRSPhaseFullGC); |
1310 verify_region_sets_optional(); | 1321 verify_region_sets_optional(); |
1311 | 1322 |
1312 const bool do_clear_all_soft_refs = clear_all_soft_refs || | 1323 const bool do_clear_all_soft_refs = clear_all_soft_refs || |
1320 // Timing | 1331 // Timing |
1321 assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant"); | 1332 assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant"); |
1322 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); | 1333 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); |
1323 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); | 1334 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); |
1324 | 1335 |
1325 TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty); | |
1326 TraceCollectorStats tcs(g1mm()->full_collection_counters()); | |
1327 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); | |
1328 | |
1329 double start = os::elapsedTime(); | |
1330 g1_policy()->record_full_collection_start(); | |
1331 | |
1332 // Note: When we have a more flexible GC logging framework that | |
1333 // allows us to add optional attributes to a GC log record we | |
1334 // could consider timing and reporting how long we wait in the | |
1335 // following two methods. | |
1336 wait_while_free_regions_coming(); | |
1337 // If we start the compaction before the CM threads finish | |
1338 // scanning the root regions we might trip them over as we'll | |
1339 // be moving objects / updating references. So let's wait until | |
1340 // they are done. By telling them to abort, they should complete | |
1341 // early. | |
1342 _cm->root_regions()->abort(); | |
1343 _cm->root_regions()->wait_until_scan_finished(); | |
1344 append_secondary_free_list_if_not_empty_with_lock(); | |
1345 | |
1346 gc_prologue(true); | |
1347 increment_total_collections(true /* full gc */); | |
1348 increment_old_marking_cycles_started(); | |
1349 | |
1350 size_t g1h_prev_used = used(); | |
1351 assert(used() == recalculate_used(), "Should be equal"); | |
1352 | |
1353 verify_before_gc(); | |
1354 | |
1355 pre_full_gc_dump(); | |
1356 | |
1357 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
1358 | |
1359 // Disable discovery and empty the discovered lists | |
1360 // for the CM ref processor. | |
1361 ref_processor_cm()->disable_discovery(); | |
1362 ref_processor_cm()->abandon_partial_discovery(); | |
1363 ref_processor_cm()->verify_no_references_recorded(); | |
1364 | |
1365 // Abandon current iterations of concurrent marking and concurrent | |
1366 // refinement, if any are in progress. We have to do this before | |
1367 // wait_until_scan_finished() below. | |
1368 concurrent_mark()->abort(); | |
1369 | |
1370 // Make sure we'll choose a new allocation region afterwards. | |
1371 release_mutator_alloc_region(); | |
1372 abandon_gc_alloc_regions(); | |
1373 g1_rem_set()->cleanupHRRS(); | |
1374 | |
1375 // We should call this after we retire any currently active alloc | |
1376 // regions so that all the ALLOC / RETIRE events are generated | |
1377 // before the start GC event. | |
1378 _hr_printer.start_gc(true /* full */, (size_t) total_collections()); | |
1379 | |
1380 // We may have added regions to the current incremental collection | |
1381 // set between the last GC or pause and now. We need to clear the | |
1382 // incremental collection set and then start rebuilding it afresh | |
1383 // after this full GC. | |
1384 abandon_collection_set(g1_policy()->inc_cset_head()); | |
1385 g1_policy()->clear_incremental_cset(); | |
1386 g1_policy()->stop_incremental_cset_building(); | |
1387 | |
1388 tear_down_region_sets(false /* free_list_only */); | |
1389 g1_policy()->set_gcs_are_young(true); | |
1390 | |
1391 // See the comments in g1CollectedHeap.hpp and | |
1392 // G1CollectedHeap::ref_processing_init() about | |
1393 // how reference processing currently works in G1. | |
1394 | |
1395 // Temporarily make discovery by the STW ref processor single threaded (non-MT). | |
1396 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false); | |
1397 | |
1398 // Temporarily clear the STW ref processor's _is_alive_non_header field. | |
1399 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL); | |
1400 | |
1401 ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); | |
1402 ref_processor_stw()->setup_policy(do_clear_all_soft_refs); | |
1403 | |
1404 // Do collection work | |
1405 { | 1336 { |
1406 HandleMark hm; // Discard invalid handles created during gc | 1337 GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL); |
1407 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); | 1338 TraceCollectorStats tcs(g1mm()->full_collection_counters()); |
1408 } | 1339 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); |
1409 | 1340 |
1410 assert(free_regions() == 0, "we should not have added any free regions"); | 1341 double start = os::elapsedTime(); |
1411 rebuild_region_sets(false /* free_list_only */); | 1342 g1_policy()->record_full_collection_start(); |
1412 | 1343 |
1413 // Enqueue any discovered reference objects that have | 1344 // Note: When we have a more flexible GC logging framework that |
1414 // not been removed from the discovered lists. | 1345 // allows us to add optional attributes to a GC log record we |
1415 ref_processor_stw()->enqueue_discovered_references(); | 1346 // could consider timing and reporting how long we wait in the |
1416 | 1347 // following two methods. |
1417 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | 1348 wait_while_free_regions_coming(); |
1418 | 1349 // If we start the compaction before the CM threads finish |
1419 MemoryService::track_memory_usage(); | 1350 // scanning the root regions we might trip them over as we'll |
1420 | 1351 // be moving objects / updating references. So let's wait until |
1421 verify_after_gc(); | 1352 // they are done. By telling them to abort, they should complete |
1422 | 1353 // early. |
1423 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); | 1354 _cm->root_regions()->abort(); |
1424 ref_processor_stw()->verify_no_references_recorded(); | 1355 _cm->root_regions()->wait_until_scan_finished(); |
1425 | 1356 append_secondary_free_list_if_not_empty_with_lock(); |
1426 // Delete metaspaces for unloaded class loaders and clean up loader_data graph | 1357 |
1427 ClassLoaderDataGraph::purge(); | 1358 gc_prologue(true); |
1428 | 1359 increment_total_collections(true /* full gc */); |
1429 // Note: since we've just done a full GC, concurrent | 1360 increment_old_marking_cycles_started(); |
1430 // marking is no longer active. Therefore we need not | 1361 |
1431 // re-enable reference discovery for the CM ref processor. | 1362 assert(used() == recalculate_used(), "Should be equal"); |
1432 // That will be done at the start of the next marking cycle. | 1363 |
1433 assert(!ref_processor_cm()->discovery_enabled(), "Postcondition"); | 1364 verify_before_gc(); |
1434 ref_processor_cm()->verify_no_references_recorded(); | 1365 |
1435 | 1366 pre_full_gc_dump(gc_timer); |
1436 reset_gc_time_stamp(); | 1367 |
1437 // Since everything potentially moved, we will clear all remembered | 1368 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
1438 // sets, and clear all cards. Later we will rebuild remebered | 1369 |
1439 // sets. We will also reset the GC time stamps of the regions. | 1370 // Disable discovery and empty the discovered lists |
1440 clear_rsets_post_compaction(); | 1371 // for the CM ref processor. |
1441 check_gc_time_stamps(); | 1372 ref_processor_cm()->disable_discovery(); |
1442 | 1373 ref_processor_cm()->abandon_partial_discovery(); |
1443 // Resize the heap if necessary. | 1374 ref_processor_cm()->verify_no_references_recorded(); |
1444 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); | 1375 |
1445 | 1376 // Abandon current iterations of concurrent marking and concurrent |
1446 if (_hr_printer.is_active()) { | 1377 // refinement, if any are in progress. We have to do this before |
1447 // We should do this after we potentially resize the heap so | 1378 // wait_until_scan_finished() below. |
1448 // that all the COMMIT / UNCOMMIT events are generated before | 1379 concurrent_mark()->abort(); |
1449 // the end GC event. | 1380 |
1450 | 1381 // Make sure we'll choose a new allocation region afterwards. |
1451 print_hrs_post_compaction(); | 1382 release_mutator_alloc_region(); |
1452 _hr_printer.end_gc(true /* full */, (size_t) total_collections()); | 1383 abandon_gc_alloc_regions(); |
1453 } | 1384 g1_rem_set()->cleanupHRRS(); |
1454 | 1385 |
1455 if (_cg1r->use_cache()) { | 1386 // We should call this after we retire any currently active alloc |
1456 _cg1r->clear_and_record_card_counts(); | 1387 // regions so that all the ALLOC / RETIRE events are generated |
1457 _cg1r->clear_hot_cache(); | 1388 // before the start GC event. |
1458 } | 1389 _hr_printer.start_gc(true /* full */, (size_t) total_collections()); |
1459 | 1390 |
1460 // Rebuild remembered sets of all regions. | 1391 // We may have added regions to the current incremental collection |
1461 if (G1CollectedHeap::use_parallel_gc_threads()) { | 1392 // set between the last GC or pause and now. We need to clear the |
1462 uint n_workers = | 1393 // incremental collection set and then start rebuilding it afresh |
1463 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), | 1394 // after this full GC. |
1464 workers()->active_workers(), | 1395 abandon_collection_set(g1_policy()->inc_cset_head()); |
1465 Threads::number_of_non_daemon_threads()); | 1396 g1_policy()->clear_incremental_cset(); |
1466 assert(UseDynamicNumberOfGCThreads || | 1397 g1_policy()->stop_incremental_cset_building(); |
1467 n_workers == workers()->total_workers(), | 1398 |
1468 "If not dynamic should be using all the workers"); | 1399 tear_down_region_sets(false /* free_list_only */); |
1469 workers()->set_active_workers(n_workers); | 1400 g1_policy()->set_gcs_are_young(true); |
1470 // Set parallel threads in the heap (_n_par_threads) only | 1401 |
1471 // before a parallel phase and always reset it to 0 after | 1402 // See the comments in g1CollectedHeap.hpp and |
1472 // the phase so that the number of parallel threads does | 1403 // G1CollectedHeap::ref_processing_init() about |
1473 // no get carried forward to a serial phase where there | 1404 // how reference processing currently works in G1. |
1474 // may be code that is "possibly_parallel". | 1405 |
1475 set_par_threads(n_workers); | 1406 // Temporarily make discovery by the STW ref processor single threaded (non-MT). |
1476 | 1407 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false); |
1477 ParRebuildRSTask rebuild_rs_task(this); | 1408 |
1478 assert(check_heap_region_claim_values( | 1409 // Temporarily clear the STW ref processor's _is_alive_non_header field. |
1479 HeapRegion::InitialClaimValue), "sanity check"); | 1410 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL); |
1480 assert(UseDynamicNumberOfGCThreads || | 1411 |
1481 workers()->active_workers() == workers()->total_workers(), | 1412 ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); |
1482 "Unless dynamic should use total workers"); | 1413 ref_processor_stw()->setup_policy(do_clear_all_soft_refs); |
1483 // Use the most recent number of active workers | 1414 |
1484 assert(workers()->active_workers() > 0, | 1415 // Do collection work |
1485 "Active workers not properly set"); | 1416 { |
1486 set_par_threads(workers()->active_workers()); | 1417 HandleMark hm; // Discard invalid handles created during gc |
1487 workers()->run_task(&rebuild_rs_task); | 1418 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); |
1488 set_par_threads(0); | 1419 } |
1489 assert(check_heap_region_claim_values( | 1420 |
1490 HeapRegion::RebuildRSClaimValue), "sanity check"); | 1421 assert(free_regions() == 0, "we should not have added any free regions"); |
1491 reset_heap_region_claim_values(); | 1422 rebuild_region_sets(false /* free_list_only */); |
1492 } else { | 1423 |
1493 RebuildRSOutOfRegionClosure rebuild_rs(this); | 1424 // Enqueue any discovered reference objects that have |
1494 heap_region_iterate(&rebuild_rs); | 1425 // not been removed from the discovered lists. |
1495 } | 1426 ref_processor_stw()->enqueue_discovered_references(); |
1496 | 1427 |
1497 if (G1Log::fine()) { | 1428 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
1498 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | 1429 |
1499 } | 1430 MemoryService::track_memory_usage(); |
1500 | 1431 |
1501 if (true) { // FIXME | 1432 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); |
1502 MetaspaceGC::compute_new_size(); | 1433 ref_processor_stw()->verify_no_references_recorded(); |
1503 } | 1434 |
1504 | 1435 // Delete metaspaces for unloaded class loaders and clean up loader_data graph |
1505 // Start a new incremental collection set for the next pause | 1436 ClassLoaderDataGraph::purge(); |
1506 assert(g1_policy()->collection_set() == NULL, "must be"); | 1437 MetaspaceAux::verify_metrics(); |
1507 g1_policy()->start_incremental_cset_building(); | 1438 |
1508 | 1439 // Note: since we've just done a full GC, concurrent |
1509 // Clear the _cset_fast_test bitmap in anticipation of adding | 1440 // marking is no longer active. Therefore we need not |
1510 // regions to the incremental collection set for the next | 1441 // re-enable reference discovery for the CM ref processor. |
1511 // evacuation pause. | 1442 // That will be done at the start of the next marking cycle. |
1512 clear_cset_fast_test(); | 1443 assert(!ref_processor_cm()->discovery_enabled(), "Postcondition"); |
1513 | 1444 ref_processor_cm()->verify_no_references_recorded(); |
1514 init_mutator_alloc_region(); | 1445 |
1515 | 1446 reset_gc_time_stamp(); |
1516 double end = os::elapsedTime(); | 1447 // Since everything potentially moved, we will clear all remembered |
1517 g1_policy()->record_full_collection_end(); | 1448 // sets, and clear all cards. Later we will rebuild remembered |
1449 // sets. We will also reset the GC time stamps of the regions. | |
1450 clear_rsets_post_compaction(); | |
1451 check_gc_time_stamps(); | |
1452 | |
1453 // Resize the heap if necessary. | |
1454 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); | |
1455 | |
1456 if (_hr_printer.is_active()) { | |
1457 // We should do this after we potentially resize the heap so | |
1458 // that all the COMMIT / UNCOMMIT events are generated before | |
1459 // the end GC event. | |
1460 | |
1461 print_hrs_post_compaction(); | |
1462 _hr_printer.end_gc(true /* full */, (size_t) total_collections()); | |
1463 } | |
1464 | |
1465 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); | |
1466 if (hot_card_cache->use_cache()) { | |
1467 hot_card_cache->reset_card_counts(); | |
1468 hot_card_cache->reset_hot_cache(); | |
1469 } | |
1470 | |
1471 // Rebuild remembered sets of all regions. | |
1472 if (G1CollectedHeap::use_parallel_gc_threads()) { | |
1473 uint n_workers = | |
1474 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), | |
1475 workers()->active_workers(), | |
1476 Threads::number_of_non_daemon_threads()); | |
1477 assert(UseDynamicNumberOfGCThreads || | |
1478 n_workers == workers()->total_workers(), | |
1479 "If not dynamic should be using all the workers"); | |
1480 workers()->set_active_workers(n_workers); | |
1481 // Set parallel threads in the heap (_n_par_threads) only | |
1482 // before a parallel phase and always reset it to 0 after | |
1483 // the phase so that the number of parallel threads does | |
1484 // no get carried forward to a serial phase where there | |
1485 // may be code that is "possibly_parallel". | |
1486 set_par_threads(n_workers); | |
1487 | |
1488 ParRebuildRSTask rebuild_rs_task(this); | |
1489 assert(check_heap_region_claim_values( | |
1490 HeapRegion::InitialClaimValue), "sanity check"); | |
1491 assert(UseDynamicNumberOfGCThreads || | |
1492 workers()->active_workers() == workers()->total_workers(), | |
1493 "Unless dynamic should use total workers"); | |
1494 // Use the most recent number of active workers | |
1495 assert(workers()->active_workers() > 0, | |
1496 "Active workers not properly set"); | |
1497 set_par_threads(workers()->active_workers()); | |
1498 workers()->run_task(&rebuild_rs_task); | |
1499 set_par_threads(0); | |
1500 assert(check_heap_region_claim_values( | |
1501 HeapRegion::RebuildRSClaimValue), "sanity check"); | |
1502 reset_heap_region_claim_values(); | |
1503 } else { | |
1504 RebuildRSOutOfRegionClosure rebuild_rs(this); | |
1505 heap_region_iterate(&rebuild_rs); | |
1506 } | |
1507 | |
1508 if (true) { // FIXME | |
1509 MetaspaceGC::compute_new_size(); | |
1510 } | |
1518 | 1511 |
1519 #ifdef TRACESPINNING | 1512 #ifdef TRACESPINNING |
1520 ParallelTaskTerminator::print_termination_counts(); | 1513 ParallelTaskTerminator::print_termination_counts(); |
1521 #endif | 1514 #endif |
1522 | 1515 |
1523 gc_epilogue(true); | 1516 // Discard all rset updates |
1524 | 1517 JavaThread::dirty_card_queue_set().abandon_logs(); |
1525 // Discard all rset updates | 1518 assert(!G1DeferredRSUpdate |
1526 JavaThread::dirty_card_queue_set().abandon_logs(); | 1519 || (G1DeferredRSUpdate && |
1527 assert(!G1DeferredRSUpdate | 1520 (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
1528 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); | 1521 |
1529 | 1522 _young_list->reset_sampled_info(); |
1530 _young_list->reset_sampled_info(); | 1523 // At this point there should be no regions in the |
1531 // At this point there should be no regions in the | 1524 // entire heap tagged as young. |
1532 // entire heap tagged as young. | 1525 assert(check_young_list_empty(true /* check_heap */), |
1533 assert( check_young_list_empty(true /* check_heap */), | 1526 "young list should be empty at this point"); |
1534 "young list should be empty at this point"); | 1527 |
1535 | 1528 // Update the number of full collections that have been completed. |
1536 // Update the number of full collections that have been completed. | 1529 increment_old_marking_cycles_completed(false /* concurrent */); |
1537 increment_old_marking_cycles_completed(false /* concurrent */); | 1530 |
1538 | 1531 _hrs.verify_optional(); |
1539 _hrs.verify_optional(); | 1532 verify_region_sets_optional(); |
1540 verify_region_sets_optional(); | 1533 |
1534 verify_after_gc(); | |
1535 | |
1536 // Start a new incremental collection set for the next pause | |
1537 assert(g1_policy()->collection_set() == NULL, "must be"); | |
1538 g1_policy()->start_incremental_cset_building(); | |
1539 | |
1540 // Clear the _cset_fast_test bitmap in anticipation of adding | |
1541 // regions to the incremental collection set for the next | |
1542 // evacuation pause. | |
1543 clear_cset_fast_test(); | |
1544 | |
1545 init_mutator_alloc_region(); | |
1546 | |
1547 double end = os::elapsedTime(); | |
1548 g1_policy()->record_full_collection_end(); | |
1549 | |
1550 if (G1Log::fine()) { | |
1551 g1_policy()->print_heap_transition(); | |
1552 } | |
1553 | |
1554 // We must call G1MonitoringSupport::update_sizes() in the same scoping level | |
1555 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the | |
1556 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated | |
1557 // before any GC notifications are raised. | |
1558 g1mm()->update_sizes(); | |
1559 | |
1560 gc_epilogue(true); | |
1561 } | |
1562 | |
1563 if (G1Log::finer()) { | |
1564 g1_policy()->print_detailed_heap_transition(true /* full */); | |
1565 } | |
1541 | 1566 |
1542 print_heap_after_gc(); | 1567 print_heap_after_gc(); |
1543 | 1568 trace_heap_after_gc(gc_tracer); |
1544 // We must call G1MonitoringSupport::update_sizes() in the same scoping level | 1569 |
1545 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the | 1570 post_full_gc_dump(gc_timer); |
1546 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated | 1571 |
1547 // before any GC notifications are raised. | 1572 gc_timer->register_gc_end(os::elapsed_counter()); |
1548 g1mm()->update_sizes(); | 1573 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); |
1549 } | 1574 } |
1550 | |
1551 post_full_gc_dump(); | |
1552 | 1575 |
1553 return true; | 1576 return true; |
1554 } | 1577 } |
1555 | 1578 |
1556 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | 1579 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { |
1759 _g1_committed.set_end(new_end); | 1782 _g1_committed.set_end(new_end); |
1760 // Tell the card table about the update. | 1783 // Tell the card table about the update. |
1761 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | 1784 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); |
1762 // Tell the BOT about the update. | 1785 // Tell the BOT about the update. |
1763 _bot_shared->resize(_g1_committed.word_size()); | 1786 _bot_shared->resize(_g1_committed.word_size()); |
1787 // Tell the hot card cache about the update | |
1788 _cg1r->hot_card_cache()->resize_card_counts(capacity()); | |
1764 } | 1789 } |
1765 | 1790 |
1766 bool G1CollectedHeap::expand(size_t expand_bytes) { | 1791 bool G1CollectedHeap::expand(size_t expand_bytes) { |
1767 size_t old_mem_size = _g1_storage.committed_size(); | 1792 size_t old_mem_size = _g1_storage.committed_size(); |
1768 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); | 1793 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); |
1823 // The expansion of the virtual storage space was unsuccessful. | 1848 // The expansion of the virtual storage space was unsuccessful. |
1824 // Let's see if it was because we ran out of swap. | 1849 // Let's see if it was because we ran out of swap. |
1825 if (G1ExitOnExpansionFailure && | 1850 if (G1ExitOnExpansionFailure && |
1826 _g1_storage.uncommitted_size() >= aligned_expand_bytes) { | 1851 _g1_storage.uncommitted_size() >= aligned_expand_bytes) { |
1827 // We had head room... | 1852 // We had head room... |
1828 vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion"); | 1853 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion"); |
1829 } | 1854 } |
1830 } | 1855 } |
1831 return successful; | 1856 return successful; |
1832 } | 1857 } |
1833 | 1858 |
1835 size_t old_mem_size = _g1_storage.committed_size(); | 1860 size_t old_mem_size = _g1_storage.committed_size(); |
1836 size_t aligned_shrink_bytes = | 1861 size_t aligned_shrink_bytes = |
1837 ReservedSpace::page_align_size_down(shrink_bytes); | 1862 ReservedSpace::page_align_size_down(shrink_bytes); |
1838 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | 1863 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, |
1839 HeapRegion::GrainBytes); | 1864 HeapRegion::GrainBytes); |
1840 uint num_regions_deleted = 0; | 1865 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); |
1841 MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted); | 1866 |
1867 uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove); | |
1842 HeapWord* old_end = (HeapWord*) _g1_storage.high(); | 1868 HeapWord* old_end = (HeapWord*) _g1_storage.high(); |
1843 assert(mr.end() == old_end, "post-condition"); | 1869 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; |
1844 | 1870 |
1845 ergo_verbose3(ErgoHeapSizing, | 1871 ergo_verbose3(ErgoHeapSizing, |
1846 "shrink the heap", | 1872 "shrink the heap", |
1847 ergo_format_byte("requested shrinking amount") | 1873 ergo_format_byte("requested shrinking amount") |
1848 ergo_format_byte("aligned shrinking amount") | 1874 ergo_format_byte("aligned shrinking amount") |
1849 ergo_format_byte("attempted shrinking amount"), | 1875 ergo_format_byte("attempted shrinking amount"), |
1850 shrink_bytes, aligned_shrink_bytes, mr.byte_size()); | 1876 shrink_bytes, aligned_shrink_bytes, shrunk_bytes); |
1851 if (mr.byte_size() > 0) { | 1877 if (num_regions_removed > 0) { |
1878 _g1_storage.shrink_by(shrunk_bytes); | |
1879 HeapWord* new_end = (HeapWord*) _g1_storage.high(); | |
1880 | |
1852 if (_hr_printer.is_active()) { | 1881 if (_hr_printer.is_active()) { |
1853 HeapWord* curr = mr.end(); | 1882 HeapWord* curr = old_end; |
1854 while (curr > mr.start()) { | 1883 while (curr > new_end) { |
1855 HeapWord* curr_end = curr; | 1884 HeapWord* curr_end = curr; |
1856 curr -= HeapRegion::GrainWords; | 1885 curr -= HeapRegion::GrainWords; |
1857 _hr_printer.uncommit(curr, curr_end); | 1886 _hr_printer.uncommit(curr, curr_end); |
1858 } | 1887 } |
1859 assert(curr == mr.start(), "post-condition"); | 1888 } |
1860 } | 1889 |
1861 | 1890 _expansion_regions += num_regions_removed; |
1862 _g1_storage.shrink_by(mr.byte_size()); | |
1863 HeapWord* new_end = (HeapWord*) _g1_storage.high(); | |
1864 assert(mr.start() == new_end, "post-condition"); | |
1865 | |
1866 _expansion_regions += num_regions_deleted; | |
1867 update_committed_space(old_end, new_end); | 1891 update_committed_space(old_end, new_end); |
1868 HeapRegionRemSet::shrink_heap(n_regions()); | 1892 HeapRegionRemSet::shrink_heap(n_regions()); |
1869 g1_policy()->record_new_heap_size(n_regions()); | 1893 g1_policy()->record_new_heap_size(n_regions()); |
1870 } else { | 1894 } else { |
1871 ergo_verbose0(ErgoHeapSizing, | 1895 ergo_verbose0(ErgoHeapSizing, |
1909 _is_alive_closure_stw(this), | 1933 _is_alive_closure_stw(this), |
1910 _ref_processor_cm(NULL), | 1934 _ref_processor_cm(NULL), |
1911 _ref_processor_stw(NULL), | 1935 _ref_processor_stw(NULL), |
1912 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | 1936 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), |
1913 _bot_shared(NULL), | 1937 _bot_shared(NULL), |
1914 _evac_failure_scan_stack(NULL) , | 1938 _evac_failure_scan_stack(NULL), |
1915 _mark_in_progress(false), | 1939 _mark_in_progress(false), |
1916 _cg1r(NULL), _summary_bytes_used(0), | 1940 _cg1r(NULL), _summary_bytes_used(0), |
1917 _g1mm(NULL), | 1941 _g1mm(NULL), |
1918 _refine_cte_cl(NULL), | 1942 _refine_cte_cl(NULL), |
1919 _full_collection(false), | 1943 _full_collection(false), |
1929 _old_plab_stats(OldPLABSize, PLABWeight), | 1953 _old_plab_stats(OldPLABSize, PLABWeight), |
1930 _expand_heap_after_alloc_failure(true), | 1954 _expand_heap_after_alloc_failure(true), |
1931 _surviving_young_words(NULL), | 1955 _surviving_young_words(NULL), |
1932 _old_marking_cycles_started(0), | 1956 _old_marking_cycles_started(0), |
1933 _old_marking_cycles_completed(0), | 1957 _old_marking_cycles_completed(0), |
1958 _concurrent_cycle_started(false), | |
1934 _in_cset_fast_test(NULL), | 1959 _in_cset_fast_test(NULL), |
1935 _in_cset_fast_test_base(NULL), | 1960 _in_cset_fast_test_base(NULL), |
1936 _dirty_cards_region_list(NULL), | 1961 _dirty_cards_region_list(NULL), |
1937 _worker_cset_start_region(NULL), | 1962 _worker_cset_start_region(NULL), |
1938 _worker_cset_start_region_time_stamp(NULL) { | 1963 _worker_cset_start_region_time_stamp(NULL), |
1939 _g1h = this; // To catch bugs. | 1964 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), |
1965 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), | |
1966 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()), | |
1967 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) { | |
1968 | |
1969 _g1h = this; | |
1940 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | 1970 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { |
1941 vm_exit_during_initialization("Failed necessary allocation."); | 1971 vm_exit_during_initialization("Failed necessary allocation."); |
1942 } | 1972 } |
1943 | 1973 |
1944 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; | 1974 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; |
1947 _task_queues = new RefToScanQueueSet(n_queues); | 1977 _task_queues = new RefToScanQueueSet(n_queues); |
1948 | 1978 |
1949 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | 1979 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); |
1950 assert(n_rem_sets > 0, "Invariant."); | 1980 assert(n_rem_sets > 0, "Invariant."); |
1951 | 1981 |
1952 HeapRegionRemSetIterator** iter_arr = | |
1953 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC); | |
1954 for (int i = 0; i < n_queues; i++) { | |
1955 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1956 } | |
1957 _rem_set_iterator = iter_arr; | |
1958 | |
1959 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); | 1982 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); |
1960 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC); | 1983 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC); |
1984 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC); | |
1961 | 1985 |
1962 for (int i = 0; i < n_queues; i++) { | 1986 for (int i = 0; i < n_queues; i++) { |
1963 RefToScanQueue* q = new RefToScanQueue(); | 1987 RefToScanQueue* q = new RefToScanQueue(); |
1964 q->initialize(); | 1988 q->initialize(); |
1965 _task_queues->register_queue(i, q); | 1989 _task_queues->register_queue(i, q); |
1966 } | 1990 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo(); |
1967 | 1991 } |
1968 clear_cset_start_regions(); | 1992 clear_cset_start_regions(); |
1969 | 1993 |
1970 // Initialize the G1EvacuationFailureALot counters and flags. | 1994 // Initialize the G1EvacuationFailureALot counters and flags. |
1971 NOT_PRODUCT(reset_evacuation_should_fail();) | 1995 NOT_PRODUCT(reset_evacuation_should_fail();) |
1972 | 1996 |
1999 | 2023 |
2000 // Ensure that the sizes are properly aligned. | 2024 // Ensure that the sizes are properly aligned. |
2001 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | 2025 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); |
2002 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | 2026 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); |
2003 | 2027 |
2004 _cg1r = new ConcurrentG1Refine(); | 2028 _cg1r = new ConcurrentG1Refine(this); |
2005 | 2029 |
2006 // Reserve the maximum. | 2030 // Reserve the maximum. |
2007 | 2031 |
2008 // When compressed oops are enabled, the preferred heap base | 2032 // When compressed oops are enabled, the preferred heap base |
2009 // is calculated by subtracting the requested size from the | 2033 // is calculated by subtracting the requested size from the |
2022 | 2046 |
2023 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, | 2047 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, |
2024 HeapRegion::GrainBytes); | 2048 HeapRegion::GrainBytes); |
2025 | 2049 |
2026 // It is important to do this in a way such that concurrent readers can't | 2050 // It is important to do this in a way such that concurrent readers can't |
2027 // temporarily think somethings in the heap. (I've actually seen this | 2051 // temporarily think something is in the heap. (I've actually seen this |
2028 // happen in asserts: DLD.) | 2052 // happen in asserts: DLD.) |
2029 _reserved.set_word_size(0); | 2053 _reserved.set_word_size(0); |
2030 _reserved.set_start((HeapWord*)heap_rs.base()); | 2054 _reserved.set_start((HeapWord*)heap_rs.base()); |
2031 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | 2055 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); |
2032 | 2056 |
2060 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | 2084 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); |
2061 _hrs.initialize((HeapWord*) _g1_reserved.start(), | 2085 _hrs.initialize((HeapWord*) _g1_reserved.start(), |
2062 (HeapWord*) _g1_reserved.end(), | 2086 (HeapWord*) _g1_reserved.end(), |
2063 _expansion_regions); | 2087 _expansion_regions); |
2064 | 2088 |
2089 // Do later initialization work for concurrent refinement. | |
2090 _cg1r->init(); | |
2091 | |
2065 // 6843694 - ensure that the maximum region index can fit | 2092 // 6843694 - ensure that the maximum region index can fit |
2066 // in the remembered set structures. | 2093 // in the remembered set structures. |
2067 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; | 2094 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
2068 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); | 2095 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
2069 | 2096 |
2077 _bot_shared = new G1BlockOffsetSharedArray(_reserved, | 2104 _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
2078 heap_word_size(init_byte_size)); | 2105 heap_word_size(init_byte_size)); |
2079 | 2106 |
2080 _g1h = this; | 2107 _g1h = this; |
2081 | 2108 |
2082 _in_cset_fast_test_length = max_regions(); | 2109 _in_cset_fast_test_length = max_regions(); |
2083 _in_cset_fast_test_base = | 2110 _in_cset_fast_test_base = |
2084 NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC); | 2111 NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC); |
2085 | 2112 |
2086 // We're biasing _in_cset_fast_test to avoid subtracting the | 2113 // We're biasing _in_cset_fast_test to avoid subtracting the |
2087 // beginning of the heap every time we want to index; basically | 2114 // beginning of the heap every time we want to index; basically |
2088 // it's the same with what we do with the card table. | 2115 // it's the same with what we do with the card table. |
2089 _in_cset_fast_test = _in_cset_fast_test_base - | 2116 _in_cset_fast_test = _in_cset_fast_test_base - |
2090 ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); | 2117 ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
2091 | 2118 |
2092 // Clear the _cset_fast_test bitmap in anticipation of adding | 2119 // Clear the _cset_fast_test bitmap in anticipation of adding |
2093 // regions to the incremental collection set for the first | 2120 // regions to the incremental collection set for the first |
2094 // evacuation pause. | 2121 // evacuation pause. |
2095 clear_cset_fast_test(); | 2122 clear_cset_fast_test(); |
2096 | 2123 |
2097 // Create the ConcurrentMark data structure and thread. | 2124 // Create the ConcurrentMark data structure and thread. |
2098 // (Must do this late, so that "max_regions" is defined.) | 2125 // (Must do this late, so that "max_regions" is defined.) |
2099 _cm = new ConcurrentMark(this, heap_rs); | 2126 _cm = new ConcurrentMark(this, heap_rs); |
2100 if (_cm == NULL || !_cm->completed_initialization()) { | 2127 if (_cm == NULL || !_cm->completed_initialization()) { |
2151 &JavaThread::dirty_card_queue_set()); | 2178 &JavaThread::dirty_card_queue_set()); |
2152 | 2179 |
2153 // In case we're keeping closure specialization stats, initialize those | 2180 // In case we're keeping closure specialization stats, initialize those |
2154 // counts and that mechanism. | 2181 // counts and that mechanism. |
2155 SpecializationStats::clear(); | 2182 SpecializationStats::clear(); |
2156 | |
2157 // Do later initialization work for concurrent refinement. | |
2158 _cg1r->init(); | |
2159 | 2183 |
2160 // Here we allocate the dummy full region that is required by the | 2184 // Here we allocate the dummy full region that is required by the |
2161 // G1AllocRegion class. If we don't pass an address in the reserved | 2185 // G1AllocRegion class. If we don't pass an address in the reserved |
2162 // space here, lots of asserts fire. | 2186 // space here, lots of asserts fire. |
2163 | 2187 |
2313 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, | 2337 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, |
2314 DirtyCardQueue* into_cset_dcq, | 2338 DirtyCardQueue* into_cset_dcq, |
2315 bool concurrent, | 2339 bool concurrent, |
2316 int worker_i) { | 2340 int worker_i) { |
2317 // Clean cards in the hot card cache | 2341 // Clean cards in the hot card cache |
2318 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq); | 2342 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); |
2343 hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq); | |
2319 | 2344 |
2320 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | 2345 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
2321 int n_completed_buffers = 0; | 2346 int n_completed_buffers = 0; |
2322 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { | 2347 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { |
2323 n_completed_buffers++; | 2348 n_completed_buffers++; |
2468 _old_marking_cycles_completed += 1; | 2493 _old_marking_cycles_completed += 1; |
2469 | 2494 |
2470 // We need to clear the "in_progress" flag in the CM thread before | 2495 // We need to clear the "in_progress" flag in the CM thread before |
2471 // we wake up any waiters (especially when ExplicitInvokesConcurrent | 2496 // we wake up any waiters (especially when ExplicitInvokesConcurrent |
2472 // is set) so that if a waiter requests another System.gc() it doesn't | 2497 // is set) so that if a waiter requests another System.gc() it doesn't |
2473 // incorrectly see that a marking cyle is still in progress. | 2498 // incorrectly see that a marking cycle is still in progress. |
2474 if (concurrent) { | 2499 if (concurrent) { |
2475 _cmThread->clear_in_progress(); | 2500 _cmThread->clear_in_progress(); |
2476 } | 2501 } |
2477 | 2502 |
2478 // This notify_all() will ensure that a thread that called | 2503 // This notify_all() will ensure that a thread that called |
2479 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) | 2504 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) |
2480 // and it's waiting for a full GC to finish will be woken up. It is | 2505 // and it's waiting for a full GC to finish will be woken up. It is |
2481 // waiting in VM_G1IncCollectionPause::doit_epilogue(). | 2506 // waiting in VM_G1IncCollectionPause::doit_epilogue(). |
2482 FullGCCount_lock->notify_all(); | 2507 FullGCCount_lock->notify_all(); |
2508 } | |
2509 | |
2510 void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) { | |
2511 _concurrent_cycle_started = true; | |
2512 _gc_timer_cm->register_gc_start(start_time); | |
2513 | |
2514 _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start()); | |
2515 trace_heap_before_gc(_gc_tracer_cm); | |
2516 } | |
2517 | |
2518 void G1CollectedHeap::register_concurrent_cycle_end() { | |
2519 if (_concurrent_cycle_started) { | |
2520 _gc_timer_cm->register_gc_end(os::elapsed_counter()); | |
2521 | |
2522 if (_cm->has_aborted()) { | |
2523 _gc_tracer_cm->report_concurrent_mode_failure(); | |
2524 } | |
2525 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); | |
2526 | |
2527 _concurrent_cycle_started = false; | |
2528 } | |
2529 } | |
2530 | |
2531 void G1CollectedHeap::trace_heap_after_concurrent_cycle() { | |
2532 if (_concurrent_cycle_started) { | |
2533 trace_heap_after_gc(_gc_tracer_cm); | |
2534 } | |
2535 } | |
2536 | |
2537 G1YCType G1CollectedHeap::yc_type() { | |
2538 bool is_young = g1_policy()->gcs_are_young(); | |
2539 bool is_initial_mark = g1_policy()->during_initial_mark_pause(); | |
2540 bool is_during_mark = mark_in_progress(); | |
2541 | |
2542 if (is_initial_mark) { | |
2543 return InitialMark; | |
2544 } else if (is_during_mark) { | |
2545 return DuringMark; | |
2546 } else if (is_young) { | |
2547 return Normal; | |
2548 } else { | |
2549 return Mixed; | |
2550 } | |
2483 } | 2551 } |
2484 | 2552 |
2485 void G1CollectedHeap::collect(GCCause::Cause cause) { | 2553 void G1CollectedHeap::collect(GCCause::Cause cause) { |
2486 assert_heap_not_locked(); | 2554 assert_heap_not_locked(); |
2487 | 2555 |
2682 if (chr->claim_value() == claim_value || | 2750 if (chr->claim_value() == claim_value || |
2683 !chr->continuesHumongous()) { | 2751 !chr->continuesHumongous()) { |
2684 break; | 2752 break; |
2685 } | 2753 } |
2686 | 2754 |
2687 // Noone should have claimed it directly. We can given | 2755 // No one should have claimed it directly. We can given |
2688 // that we claimed its "starts humongous" region. | 2756 // that we claimed its "starts humongous" region. |
2689 assert(chr->claim_value() != claim_value, "sanity"); | 2757 assert(chr->claim_value() != claim_value, "sanity"); |
2690 assert(chr->humongous_start_region() == r, "sanity"); | 2758 assert(chr->humongous_start_region() == r, "sanity"); |
2691 | 2759 |
2692 if (chr->claimHeapRegion(claim_value)) { | 2760 if (chr->claimHeapRegion(claim_value)) { |
2693 // we should always be able to claim it; noone else should | 2761 // we should always be able to claim it; no one else should |
2694 // be trying to claim this region | 2762 // be trying to claim this region |
2695 | 2763 |
2696 bool res2 = cl->doHeapRegion(chr); | 2764 bool res2 = cl->doHeapRegion(chr); |
2697 assert(!res2, "Should not abort"); | 2765 assert(!res2, "Should not abort"); |
2698 | 2766 |
2982 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | 3050 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { |
2983 // Return the remaining space in the cur alloc region, but not less than | 3051 // Return the remaining space in the cur alloc region, but not less than |
2984 // the min TLAB size. | 3052 // the min TLAB size. |
2985 | 3053 |
2986 // Also, this value can be at most the humongous object threshold, | 3054 // Also, this value can be at most the humongous object threshold, |
2987 // since we can't allow tlabs to grow big enough to accomodate | 3055 // since we can't allow tlabs to grow big enough to accommodate |
2988 // humongous objects. | 3056 // humongous objects. |
2989 | 3057 |
2990 HeapRegion* hr = _mutator_alloc_region.get(); | 3058 HeapRegion* hr = _mutator_alloc_region.get(); |
2991 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; | 3059 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; |
2992 if (hr == NULL) { | 3060 if (hr == NULL) { |
3545 // Fill TLAB's and such | 3613 // Fill TLAB's and such |
3546 ensure_parsability(true); | 3614 ensure_parsability(true); |
3547 } | 3615 } |
3548 | 3616 |
3549 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | 3617 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { |
3618 | |
3619 if (G1SummarizeRSetStats && | |
3620 (G1SummarizeRSetStatsPeriod > 0) && | |
3621 // we are at the end of the GC. Total collections has already been increased. | |
3622 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) { | |
3623 g1_rem_set()->print_periodic_summary_info(); | |
3624 } | |
3625 | |
3550 // FIXME: what is this about? | 3626 // FIXME: what is this about? |
3551 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | 3627 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" |
3552 // is set. | 3628 // is set. |
3553 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | 3629 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), |
3554 "derived pointer present")); | 3630 "derived pointer present")); |
3616 G1CollectedHeap::setup_surviving_young_words() { | 3692 G1CollectedHeap::setup_surviving_young_words() { |
3617 assert(_surviving_young_words == NULL, "pre-condition"); | 3693 assert(_surviving_young_words == NULL, "pre-condition"); |
3618 uint array_length = g1_policy()->young_cset_region_length(); | 3694 uint array_length = g1_policy()->young_cset_region_length(); |
3619 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC); | 3695 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC); |
3620 if (_surviving_young_words == NULL) { | 3696 if (_surviving_young_words == NULL) { |
3621 vm_exit_out_of_memory(sizeof(size_t) * array_length, | 3697 vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR, |
3622 "Not enough space for young surv words summary."); | 3698 "Not enough space for young surv words summary."); |
3623 } | 3699 } |
3624 memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t)); | 3700 memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t)); |
3625 #ifdef ASSERT | 3701 #ifdef ASSERT |
3626 for (uint i = 0; i < array_length; ++i) { | 3702 for (uint i = 0; i < array_length; ++i) { |
3741 | 3817 |
3742 if (GC_locker::check_active_before_gc()) { | 3818 if (GC_locker::check_active_before_gc()) { |
3743 return false; | 3819 return false; |
3744 } | 3820 } |
3745 | 3821 |
3822 _gc_timer_stw->register_gc_start(os::elapsed_counter()); | |
3823 | |
3824 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start()); | |
3825 | |
3746 SvcGCMarker sgcm(SvcGCMarker::MINOR); | 3826 SvcGCMarker sgcm(SvcGCMarker::MINOR); |
3747 ResourceMark rm; | 3827 ResourceMark rm; |
3748 | 3828 |
3749 print_heap_before_gc(); | 3829 print_heap_before_gc(); |
3830 trace_heap_before_gc(_gc_tracer_stw); | |
3750 | 3831 |
3751 HRSPhaseSetter x(HRSPhaseEvacuation); | 3832 HRSPhaseSetter x(HRSPhaseEvacuation); |
3752 verify_region_sets_optional(); | 3833 verify_region_sets_optional(); |
3753 verify_dirty_young_regions(); | 3834 verify_dirty_young_regions(); |
3754 | 3835 |
3769 // the CM thread, the flag's value in the policy has been reset. | 3850 // the CM thread, the flag's value in the policy has been reset. |
3770 bool should_start_conc_mark = g1_policy()->during_initial_mark_pause(); | 3851 bool should_start_conc_mark = g1_policy()->during_initial_mark_pause(); |
3771 | 3852 |
3772 // Inner scope for scope based logging, timers, and stats collection | 3853 // Inner scope for scope based logging, timers, and stats collection |
3773 { | 3854 { |
3855 EvacuationInfo evacuation_info; | |
3856 | |
3774 if (g1_policy()->during_initial_mark_pause()) { | 3857 if (g1_policy()->during_initial_mark_pause()) { |
3775 // We are about to start a marking cycle, so we increment the | 3858 // We are about to start a marking cycle, so we increment the |
3776 // full collection counter. | 3859 // full collection counter. |
3777 increment_old_marking_cycles_started(); | 3860 increment_old_marking_cycles_started(); |
3778 } | 3861 register_concurrent_cycle_start(_gc_timer_stw->gc_start()); |
3862 } | |
3863 | |
3864 _gc_tracer_stw->report_yc_type(yc_type()); | |
3865 | |
3779 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); | 3866 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); |
3780 | 3867 |
3781 int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? | 3868 int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? |
3782 workers()->active_workers() : 1); | 3869 workers()->active_workers() : 1); |
3783 double pause_start_sec = os::elapsedTime(); | 3870 double pause_start_sec = os::elapsedTime(); |
3846 // Preserving the old comment here if that helps the investigation: | 3933 // Preserving the old comment here if that helps the investigation: |
3847 // | 3934 // |
3848 // The elapsed time induced by the start time below deliberately elides | 3935 // The elapsed time induced by the start time below deliberately elides |
3849 // the possible verification above. | 3936 // the possible verification above. |
3850 double sample_start_time_sec = os::elapsedTime(); | 3937 double sample_start_time_sec = os::elapsedTime(); |
3851 size_t start_used_bytes = used(); | |
3852 | 3938 |
3853 #if YOUNG_LIST_VERBOSE | 3939 #if YOUNG_LIST_VERBOSE |
3854 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); | 3940 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
3855 _young_list->print(); | 3941 _young_list->print(); |
3856 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); | 3942 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
3857 #endif // YOUNG_LIST_VERBOSE | 3943 #endif // YOUNG_LIST_VERBOSE |
3858 | 3944 |
3859 g1_policy()->record_collection_pause_start(sample_start_time_sec, | 3945 g1_policy()->record_collection_pause_start(sample_start_time_sec); |
3860 start_used_bytes); | |
3861 | 3946 |
3862 double scan_wait_start = os::elapsedTime(); | 3947 double scan_wait_start = os::elapsedTime(); |
3863 // We have to wait until the CM threads finish scanning the | 3948 // We have to wait until the CM threads finish scanning the |
3864 // root regions as it's the only way to ensure that all the | 3949 // root regions as it's the only way to ensure that all the |
3865 // objects on them have been correctly scanned before we start | 3950 // objects on them have been correctly scanned before we start |
3885 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); | 3970 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
3886 _young_list->print(); | 3971 _young_list->print(); |
3887 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); | 3972 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
3888 #endif // YOUNG_LIST_VERBOSE | 3973 #endif // YOUNG_LIST_VERBOSE |
3889 | 3974 |
3890 g1_policy()->finalize_cset(target_pause_time_ms); | 3975 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info); |
3891 | 3976 |
3892 _cm->note_start_of_gc(); | 3977 _cm->note_start_of_gc(); |
3893 // We should not verify the per-thread SATB buffers given that | 3978 // We should not verify the per-thread SATB buffers given that |
3894 // we have not filtered them yet (we'll do so during the | 3979 // we have not filtered them yet (we'll do so during the |
3895 // GC). We also call this after finalize_cset() to | 3980 // GC). We also call this after finalize_cset() to |
3921 #endif // ASSERT | 4006 #endif // ASSERT |
3922 | 4007 |
3923 setup_surviving_young_words(); | 4008 setup_surviving_young_words(); |
3924 | 4009 |
3925 // Initialize the GC alloc regions. | 4010 // Initialize the GC alloc regions. |
3926 init_gc_alloc_regions(); | 4011 init_gc_alloc_regions(evacuation_info); |
3927 | 4012 |
3928 // Actually do the work... | 4013 // Actually do the work... |
3929 evacuate_collection_set(); | 4014 evacuate_collection_set(evacuation_info); |
3930 | 4015 |
3931 // We do this to mainly verify the per-thread SATB buffers | 4016 // We do this to mainly verify the per-thread SATB buffers |
3932 // (which have been filtered by now) since we didn't verify | 4017 // (which have been filtered by now) since we didn't verify |
3933 // them earlier. No point in re-checking the stacks / enqueued | 4018 // them earlier. No point in re-checking the stacks / enqueued |
3934 // buffers given that the CSet has not changed since last time | 4019 // buffers given that the CSet has not changed since last time |
3936 _cm->verify_no_cset_oops(false /* verify_stacks */, | 4021 _cm->verify_no_cset_oops(false /* verify_stacks */, |
3937 false /* verify_enqueued_buffers */, | 4022 false /* verify_enqueued_buffers */, |
3938 true /* verify_thread_buffers */, | 4023 true /* verify_thread_buffers */, |
3939 true /* verify_fingers */); | 4024 true /* verify_fingers */); |
3940 | 4025 |
3941 free_collection_set(g1_policy()->collection_set()); | 4026 free_collection_set(g1_policy()->collection_set(), evacuation_info); |
3942 g1_policy()->clear_collection_set(); | 4027 g1_policy()->clear_collection_set(); |
3943 | 4028 |
3944 cleanup_surviving_young_words(); | 4029 cleanup_surviving_young_words(); |
3945 | 4030 |
3946 // Start a new incremental collection set for the next pause. | 4031 // Start a new incremental collection set for the next pause. |
3964 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); | 4049 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
3965 _young_list->print(); | 4050 _young_list->print(); |
3966 #endif // YOUNG_LIST_VERBOSE | 4051 #endif // YOUNG_LIST_VERBOSE |
3967 | 4052 |
3968 g1_policy()->record_survivor_regions(_young_list->survivor_length(), | 4053 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
3969 _young_list->first_survivor_region(), | 4054 _young_list->first_survivor_region(), |
3970 _young_list->last_survivor_region()); | 4055 _young_list->last_survivor_region()); |
3971 | 4056 |
3972 _young_list->reset_auxilary_lists(); | 4057 _young_list->reset_auxilary_lists(); |
3973 | 4058 |
3974 if (evacuation_failed()) { | 4059 if (evacuation_failed()) { |
3975 _summary_bytes_used = recalculate_used(); | 4060 _summary_bytes_used = recalculate_used(); |
4061 uint n_queues = MAX2((int)ParallelGCThreads, 1); | |
4062 for (uint i = 0; i < n_queues; i++) { | |
4063 if (_evacuation_failed_info_array[i].has_failed()) { | |
4064 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); | |
4065 } | |
4066 } | |
3976 } else { | 4067 } else { |
3977 // The "used" of the the collection set have already been subtracted | 4068 // The "used" of the the collection set have already been subtracted |
3978 // when they were freed. Add in the bytes evacuated. | 4069 // when they were freed. Add in the bytes evacuated. |
3979 _summary_bytes_used += g1_policy()->bytes_copied_during_gc(); | 4070 _summary_bytes_used += g1_policy()->bytes_copied_during_gc(); |
3980 } | 4071 } |
4013 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); | 4104 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); |
4014 } | 4105 } |
4015 } | 4106 } |
4016 } | 4107 } |
4017 | 4108 |
4018 // We redo the verificaiton but now wrt to the new CSet which | 4109 // We redo the verification but now wrt to the new CSet which |
4019 // has just got initialized after the previous CSet was freed. | 4110 // has just got initialized after the previous CSet was freed. |
4020 _cm->verify_no_cset_oops(true /* verify_stacks */, | 4111 _cm->verify_no_cset_oops(true /* verify_stacks */, |
4021 true /* verify_enqueued_buffers */, | 4112 true /* verify_enqueued_buffers */, |
4022 true /* verify_thread_buffers */, | 4113 true /* verify_thread_buffers */, |
4023 true /* verify_fingers */); | 4114 true /* verify_fingers */); |
4026 // This timing is only used by the ergonomics to handle our pause target. | 4117 // This timing is only used by the ergonomics to handle our pause target. |
4027 // It is unclear why this should not include the full pause. We will | 4118 // It is unclear why this should not include the full pause. We will |
4028 // investigate this in CR 7178365. | 4119 // investigate this in CR 7178365. |
4029 double sample_end_time_sec = os::elapsedTime(); | 4120 double sample_end_time_sec = os::elapsedTime(); |
4030 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; | 4121 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; |
4031 g1_policy()->record_collection_pause_end(pause_time_ms); | 4122 g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info); |
4032 | 4123 |
4033 MemoryService::track_memory_usage(); | 4124 MemoryService::track_memory_usage(); |
4034 | 4125 |
4035 // In prepare_for_verify() below we'll need to scan the deferred | 4126 // In prepare_for_verify() below we'll need to scan the deferred |
4036 // update buffers to bring the RSets up-to-date if | 4127 // update buffers to bring the RSets up-to-date if |
4093 | 4184 |
4094 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); | 4185 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); |
4095 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); | 4186 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); |
4096 | 4187 |
4097 print_heap_after_gc(); | 4188 print_heap_after_gc(); |
4189 trace_heap_after_gc(_gc_tracer_stw); | |
4098 | 4190 |
4099 // We must call G1MonitoringSupport::update_sizes() in the same scoping level | 4191 // We must call G1MonitoringSupport::update_sizes() in the same scoping level |
4100 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the | 4192 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the |
4101 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated | 4193 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated |
4102 // before any GC notifications are raised. | 4194 // before any GC notifications are raised. |
4103 g1mm()->update_sizes(); | 4195 g1mm()->update_sizes(); |
4104 } | 4196 |
4105 | 4197 _gc_tracer_stw->report_evacuation_info(&evacuation_info); |
4106 if (G1SummarizeRSetStats && | 4198 _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold()); |
4107 (G1SummarizeRSetStatsPeriod > 0) && | 4199 _gc_timer_stw->register_gc_end(os::elapsed_counter()); |
4108 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { | 4200 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions()); |
4109 g1_rem_set()->print_summary_info(); | 4201 } |
4110 } | |
4111 | |
4112 // It should now be safe to tell the concurrent mark thread to start | 4202 // It should now be safe to tell the concurrent mark thread to start |
4113 // without its logging output interfering with the logging output | 4203 // without its logging output interfering with the logging output |
4114 // that came from the pause. | 4204 // that came from the pause. |
4115 | 4205 |
4116 if (should_start_conc_mark) { | 4206 if (should_start_conc_mark) { |
4158 void G1CollectedHeap::release_mutator_alloc_region() { | 4248 void G1CollectedHeap::release_mutator_alloc_region() { |
4159 _mutator_alloc_region.release(); | 4249 _mutator_alloc_region.release(); |
4160 assert(_mutator_alloc_region.get() == NULL, "post-condition"); | 4250 assert(_mutator_alloc_region.get() == NULL, "post-condition"); |
4161 } | 4251 } |
4162 | 4252 |
4163 void G1CollectedHeap::init_gc_alloc_regions() { | 4253 void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { |
4164 assert_at_safepoint(true /* should_be_vm_thread */); | 4254 assert_at_safepoint(true /* should_be_vm_thread */); |
4165 | 4255 |
4166 _survivor_gc_alloc_region.init(); | 4256 _survivor_gc_alloc_region.init(); |
4167 _old_gc_alloc_region.init(); | 4257 _old_gc_alloc_region.init(); |
4168 HeapRegion* retained_region = _retained_old_gc_alloc_region; | 4258 HeapRegion* retained_region = _retained_old_gc_alloc_region; |
4173 // b) it's already full (no point in using it), | 4263 // b) it's already full (no point in using it), |
4174 // c) it's empty (this means that it was emptied during | 4264 // c) it's empty (this means that it was emptied during |
4175 // a cleanup and it should be on the free list now), or | 4265 // a cleanup and it should be on the free list now), or |
4176 // d) it's humongous (this means that it was emptied | 4266 // d) it's humongous (this means that it was emptied |
4177 // during a cleanup and was added to the free list, but | 4267 // during a cleanup and was added to the free list, but |
4178 // has been subseqently used to allocate a humongous | 4268 // has been subsequently used to allocate a humongous |
4179 // object that may be less than the region size). | 4269 // object that may be less than the region size). |
4180 if (retained_region != NULL && | 4270 if (retained_region != NULL && |
4181 !retained_region->in_collection_set() && | 4271 !retained_region->in_collection_set() && |
4182 !(retained_region->top() == retained_region->end()) && | 4272 !(retained_region->top() == retained_region->end()) && |
4183 !retained_region->is_empty() && | 4273 !retained_region->is_empty() && |
4190 _old_set.remove(retained_region); | 4280 _old_set.remove(retained_region); |
4191 bool during_im = g1_policy()->during_initial_mark_pause(); | 4281 bool during_im = g1_policy()->during_initial_mark_pause(); |
4192 retained_region->note_start_of_copying(during_im); | 4282 retained_region->note_start_of_copying(during_im); |
4193 _old_gc_alloc_region.set(retained_region); | 4283 _old_gc_alloc_region.set(retained_region); |
4194 _hr_printer.reuse(retained_region); | 4284 _hr_printer.reuse(retained_region); |
4195 } | 4285 evacuation_info.set_alloc_regions_used_before(retained_region->used()); |
4196 } | 4286 } |
4197 | 4287 } |
4198 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) { | 4288 |
4289 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) { | |
4290 evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() + | |
4291 _old_gc_alloc_region.count()); | |
4199 _survivor_gc_alloc_region.release(); | 4292 _survivor_gc_alloc_region.release(); |
4200 // If we have an old GC alloc region to release, we'll save it in | 4293 // If we have an old GC alloc region to release, we'll save it in |
4201 // _retained_old_gc_alloc_region. If we don't | 4294 // _retained_old_gc_alloc_region. If we don't |
4202 // _retained_old_gc_alloc_region will become NULL. This is what we | 4295 // _retained_old_gc_alloc_region will become NULL. This is what we |
4203 // want either way so no reason to check explicitly for either | 4296 // want either way so no reason to check explicitly for either |
4276 obj->oop_iterate_backwards(_evac_failure_closure); | 4369 obj->oop_iterate_backwards(_evac_failure_closure); |
4277 } | 4370 } |
4278 } | 4371 } |
4279 | 4372 |
4280 oop | 4373 oop |
4281 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | 4374 G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, |
4282 oop old) { | 4375 oop old) { |
4283 assert(obj_in_cs(old), | 4376 assert(obj_in_cs(old), |
4284 err_msg("obj: "PTR_FORMAT" should still be in the CSet", | 4377 err_msg("obj: "PTR_FORMAT" should still be in the CSet", |
4285 (HeapWord*) old)); | 4378 (HeapWord*) old)); |
4286 markOop m = old->mark(); | 4379 markOop m = old->mark(); |
4287 oop forward_ptr = old->forward_to_atomic(old); | 4380 oop forward_ptr = old->forward_to_atomic(old); |
4288 if (forward_ptr == NULL) { | 4381 if (forward_ptr == NULL) { |
4289 // Forward-to-self succeeded. | 4382 // Forward-to-self succeeded. |
4290 | 4383 assert(_par_scan_state != NULL, "par scan state"); |
4384 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
4385 uint queue_num = _par_scan_state->queue_num(); | |
4386 | |
4387 _evacuation_failed = true; | |
4388 _evacuation_failed_info_array[queue_num].register_copy_failure(old->size()); | |
4291 if (_evac_failure_closure != cl) { | 4389 if (_evac_failure_closure != cl) { |
4292 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | 4390 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); |
4293 assert(!_drain_in_progress, | 4391 assert(!_drain_in_progress, |
4294 "Should only be true while someone holds the lock."); | 4392 "Should only be true while someone holds the lock."); |
4295 // Set the global evac-failure closure to the current thread's. | 4393 // Set the global evac-failure closure to the current thread's. |
4316 return forward_ptr; | 4414 return forward_ptr; |
4317 } | 4415 } |
4318 } | 4416 } |
4319 | 4417 |
4320 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | 4418 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { |
4321 set_evacuation_failed(true); | |
4322 | |
4323 preserve_mark_if_necessary(old, m); | 4419 preserve_mark_if_necessary(old, m); |
4324 | 4420 |
4325 HeapRegion* r = heap_region_containing(old); | 4421 HeapRegion* r = heap_region_containing(old); |
4326 if (!r->evacuation_failed()) { | 4422 if (!r->evacuation_failed()) { |
4327 r->set_evacuation_failed(true); | 4423 r->set_evacuation_failed(true); |
4401 uint array_length = PADDING_ELEM_NUM + | 4497 uint array_length = PADDING_ELEM_NUM + |
4402 real_length + | 4498 real_length + |
4403 PADDING_ELEM_NUM; | 4499 PADDING_ELEM_NUM; |
4404 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); | 4500 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); |
4405 if (_surviving_young_words_base == NULL) | 4501 if (_surviving_young_words_base == NULL) |
4406 vm_exit_out_of_memory(array_length * sizeof(size_t), | 4502 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR, |
4407 "Not enough space for young surv histo."); | 4503 "Not enough space for young surv histo."); |
4408 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; | 4504 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
4409 memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t)); | 4505 memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t)); |
4410 | 4506 |
4411 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; | 4507 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; |
4567 #endif // !PRODUCT | 4663 #endif // !PRODUCT |
4568 | 4664 |
4569 if (obj_ptr == NULL) { | 4665 if (obj_ptr == NULL) { |
4570 // This will either forward-to-self, or detect that someone else has | 4666 // This will either forward-to-self, or detect that someone else has |
4571 // installed a forwarding pointer. | 4667 // installed a forwarding pointer. |
4572 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | 4668 return _g1->handle_evacuation_failure_par(_par_scan_state, old); |
4573 return _g1->handle_evacuation_failure_par(cl, old); | |
4574 } | 4669 } |
4575 | 4670 |
4576 oop obj = oop(obj_ptr); | 4671 oop obj = oop(obj_ptr); |
4577 | 4672 |
4578 // We're going to allocate linearly, so might as well prefetch ahead. | 4673 // We're going to allocate linearly, so might as well prefetch ahead. |
5083 } | 5178 } |
5084 _process_strong_tasks->all_tasks_completed(); | 5179 _process_strong_tasks->all_tasks_completed(); |
5085 } | 5180 } |
5086 | 5181 |
5087 void | 5182 void |
5088 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | 5183 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) { |
5089 OopClosure* non_root_closure) { | |
5090 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); | 5184 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
5091 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); | 5185 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs); |
5092 } | 5186 } |
5093 | 5187 |
5094 // Weak Reference Processing support | 5188 // Weak Reference Processing support |
5095 | 5189 |
5096 // An always "is_alive" closure that is used to preserve referents. | 5190 // An always "is_alive" closure that is used to preserve referents. |
5099 // discovered by the CM ref processor. | 5193 // discovered by the CM ref processor. |
5100 class G1AlwaysAliveClosure: public BoolObjectClosure { | 5194 class G1AlwaysAliveClosure: public BoolObjectClosure { |
5101 G1CollectedHeap* _g1; | 5195 G1CollectedHeap* _g1; |
5102 public: | 5196 public: |
5103 G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | 5197 G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} |
5104 void do_object(oop p) { assert(false, "Do not call."); } | |
5105 bool do_object_b(oop p) { | 5198 bool do_object_b(oop p) { |
5106 if (p != NULL) { | 5199 if (p != NULL) { |
5107 return true; | 5200 return true; |
5108 } | 5201 } |
5109 return false; | 5202 return false; |
5174 // on the PSS queue. When the queue is drained (after each | 5267 // on the PSS queue. When the queue is drained (after each |
5175 // phase of reference processing) the object and it's followers | 5268 // phase of reference processing) the object and it's followers |
5176 // will be copied, the reference field set to point to the | 5269 // will be copied, the reference field set to point to the |
5177 // new location, and the RSet updated. Otherwise we need to | 5270 // new location, and the RSet updated. Otherwise we need to |
5178 // use the the non-heap or metadata closures directly to copy | 5271 // use the the non-heap or metadata closures directly to copy |
5179 // the refernt object and update the pointer, while avoiding | 5272 // the referent object and update the pointer, while avoiding |
5180 // updating the RSet. | 5273 // updating the RSet. |
5181 | 5274 |
5182 if (_g1h->is_in_g1_reserved(p)) { | 5275 if (_g1h->is_in_g1_reserved(p)) { |
5183 _par_scan_state->push_on_queue(p); | 5276 _par_scan_state->push_on_queue(p); |
5184 } else { | 5277 } else { |
5342 virtual void work(uint worker_id) { | 5435 virtual void work(uint worker_id) { |
5343 _enq_task.work(worker_id); | 5436 _enq_task.work(worker_id); |
5344 } | 5437 } |
5345 }; | 5438 }; |
5346 | 5439 |
5347 // Driver routine for parallel reference enqueing. | 5440 // Driver routine for parallel reference enqueueing. |
5348 // Creates an instance of the ref enqueueing gang | 5441 // Creates an instance of the ref enqueueing gang |
5349 // task and has the worker threads execute it. | 5442 // task and has the worker threads execute it. |
5350 | 5443 |
5351 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) { | 5444 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) { |
5352 assert(_workers != NULL, "Need parallel worker threads."); | 5445 assert(_workers != NULL, "Need parallel worker threads."); |
5471 // But some of the referents, that are in the collection set, that these | 5564 // But some of the referents, that are in the collection set, that these |
5472 // reference objects point to may not have been copied: the STW ref | 5565 // reference objects point to may not have been copied: the STW ref |
5473 // processor would have seen that the reference object had already | 5566 // processor would have seen that the reference object had already |
5474 // been 'discovered' and would have skipped discovering the reference, | 5567 // been 'discovered' and would have skipped discovering the reference, |
5475 // but would not have treated the reference object as a regular oop. | 5568 // but would not have treated the reference object as a regular oop. |
5476 // As a reult the copy closure would not have been applied to the | 5569 // As a result the copy closure would not have been applied to the |
5477 // referent object. | 5570 // referent object. |
5478 // | 5571 // |
5479 // We need to explicitly copy these referent objects - the references | 5572 // We need to explicitly copy these referent objects - the references |
5480 // will be processed at the end of remarking. | 5573 // will be processed at the end of remarking. |
5481 // | 5574 // |
5547 G1STWDrainQueueClosure drain_queue(this, &pss); | 5640 G1STWDrainQueueClosure drain_queue(this, &pss); |
5548 | 5641 |
5549 // Setup the soft refs policy... | 5642 // Setup the soft refs policy... |
5550 rp->setup_policy(false); | 5643 rp->setup_policy(false); |
5551 | 5644 |
5645 ReferenceProcessorStats stats; | |
5552 if (!rp->processing_is_mt()) { | 5646 if (!rp->processing_is_mt()) { |
5553 // Serial reference processing... | 5647 // Serial reference processing... |
5554 rp->process_discovered_references(&is_alive, | 5648 stats = rp->process_discovered_references(&is_alive, |
5555 &keep_alive, | 5649 &keep_alive, |
5556 &drain_queue, | 5650 &drain_queue, |
5557 NULL); | 5651 NULL, |
5652 _gc_timer_stw); | |
5558 } else { | 5653 } else { |
5559 // Parallel reference processing | 5654 // Parallel reference processing |
5560 assert(rp->num_q() == no_of_gc_workers, "sanity"); | 5655 assert(rp->num_q() == no_of_gc_workers, "sanity"); |
5561 assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); | 5656 assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); |
5562 | 5657 |
5563 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers); | 5658 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers); |
5564 rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor); | 5659 stats = rp->process_discovered_references(&is_alive, |
5565 } | 5660 &keep_alive, |
5566 | 5661 &drain_queue, |
5662 &par_task_executor, | |
5663 _gc_timer_stw); | |
5664 } | |
5665 | |
5666 _gc_tracer_stw->report_gc_reference_stats(stats); | |
5567 // We have completed copying any necessary live referent objects | 5667 // We have completed copying any necessary live referent objects |
5568 // (that were not copied during the actual pause) so we can | 5668 // (that were not copied during the actual pause) so we can |
5569 // retire any active alloc buffers | 5669 // retire any active alloc buffers |
5570 pss.retire_alloc_buffers(); | 5670 pss.retire_alloc_buffers(); |
5571 assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); | 5671 assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); |
5585 // the pending list. | 5685 // the pending list. |
5586 if (!rp->processing_is_mt()) { | 5686 if (!rp->processing_is_mt()) { |
5587 // Serial reference processing... | 5687 // Serial reference processing... |
5588 rp->enqueue_discovered_references(); | 5688 rp->enqueue_discovered_references(); |
5589 } else { | 5689 } else { |
5590 // Parallel reference enqueuing | 5690 // Parallel reference enqueueing |
5591 | 5691 |
5592 assert(no_of_gc_workers == workers()->active_workers(), | 5692 assert(no_of_gc_workers == workers()->active_workers(), |
5593 "Need to reset active workers"); | 5693 "Need to reset active workers"); |
5594 assert(rp->num_q() == no_of_gc_workers, "sanity"); | 5694 assert(rp->num_q() == no_of_gc_workers, "sanity"); |
5595 assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); | 5695 assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); |
5602 assert(!rp->discovery_enabled(), "should have been disabled"); | 5702 assert(!rp->discovery_enabled(), "should have been disabled"); |
5603 | 5703 |
5604 // FIXME | 5704 // FIXME |
5605 // CM's reference processing also cleans up the string and symbol tables. | 5705 // CM's reference processing also cleans up the string and symbol tables. |
5606 // Should we do that here also? We could, but it is a serial operation | 5706 // Should we do that here also? We could, but it is a serial operation |
5607 // and could signicantly increase the pause time. | 5707 // and could significantly increase the pause time. |
5608 | 5708 |
5609 double ref_enq_time = os::elapsedTime() - ref_enq_start; | 5709 double ref_enq_time = os::elapsedTime() - ref_enq_start; |
5610 g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0); | 5710 g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0); |
5611 } | 5711 } |
5612 | 5712 |
5613 void G1CollectedHeap::evacuate_collection_set() { | 5713 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) { |
5614 _expand_heap_after_alloc_failure = true; | 5714 _expand_heap_after_alloc_failure = true; |
5615 set_evacuation_failed(false); | 5715 _evacuation_failed = false; |
5616 | 5716 |
5617 // Should G1EvacuationFailureALot be in effect for this GC? | 5717 // Should G1EvacuationFailureALot be in effect for this GC? |
5618 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();) | 5718 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();) |
5619 | 5719 |
5620 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | 5720 g1_rem_set()->prepare_for_oops_into_collection_set_do(); |
5621 concurrent_g1_refine()->set_use_cache(false); | 5721 |
5622 concurrent_g1_refine()->clear_hot_cache_claimed_index(); | 5722 // Disable the hot card cache. |
5723 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); | |
5724 hot_card_cache->reset_hot_cache_claimed_index(); | |
5725 hot_card_cache->set_use_cache(false); | |
5623 | 5726 |
5624 uint n_workers; | 5727 uint n_workers; |
5625 if (G1CollectedHeap::use_parallel_gc_threads()) { | 5728 if (G1CollectedHeap::use_parallel_gc_threads()) { |
5626 n_workers = | 5729 n_workers = |
5627 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), | 5730 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), |
5696 G1STWIsAliveClosure is_alive(this); | 5799 G1STWIsAliveClosure is_alive(this); |
5697 G1KeepAliveClosure keep_alive(this); | 5800 G1KeepAliveClosure keep_alive(this); |
5698 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | 5801 JNIHandles::weak_oops_do(&is_alive, &keep_alive); |
5699 } | 5802 } |
5700 | 5803 |
5701 release_gc_alloc_regions(n_workers); | 5804 release_gc_alloc_regions(n_workers, evacuation_info); |
5702 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); | 5805 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
5703 | 5806 |
5704 concurrent_g1_refine()->clear_hot_cache(); | 5807 // Reset and re-enable the hot card cache. |
5705 concurrent_g1_refine()->set_use_cache(true); | 5808 // Note the counts for the cards in the regions in the |
5809 // collection set are reset when the collection set is freed. | |
5810 hot_card_cache->reset_hot_cache(); | |
5811 hot_card_cache->set_use_cache(true); | |
5706 | 5812 |
5707 finalize_for_evac_failure(); | 5813 finalize_for_evac_failure(); |
5708 | 5814 |
5709 if (evacuation_failed()) { | 5815 if (evacuation_failed()) { |
5710 remove_self_forwarding_pointers(); | 5816 remove_self_forwarding_pointers(); |
5716 } | 5822 } |
5717 | 5823 |
5718 // Enqueue any remaining references remaining on the STW | 5824 // Enqueue any remaining references remaining on the STW |
5719 // reference processor's discovered lists. We need to do | 5825 // reference processor's discovered lists. We need to do |
5720 // this after the card table is cleaned (and verified) as | 5826 // this after the card table is cleaned (and verified) as |
5721 // the act of enqueuing entries on to the pending list | 5827 // the act of enqueueing entries on to the pending list |
5722 // will log these updates (and dirty their associated | 5828 // will log these updates (and dirty their associated |
5723 // cards). We need these updates logged to update any | 5829 // cards). We need these updates logged to update any |
5724 // RSets. | 5830 // RSets. |
5725 enqueue_discovered_references(n_workers); | 5831 enqueue_discovered_references(n_workers); |
5726 | 5832 |
5762 bool par) { | 5868 bool par) { |
5763 assert(!hr->isHumongous(), "this is only for non-humongous regions"); | 5869 assert(!hr->isHumongous(), "this is only for non-humongous regions"); |
5764 assert(!hr->is_empty(), "the region should not be empty"); | 5870 assert(!hr->is_empty(), "the region should not be empty"); |
5765 assert(free_list != NULL, "pre-condition"); | 5871 assert(free_list != NULL, "pre-condition"); |
5766 | 5872 |
5873 // Clear the card counts for this region. | |
5874 // Note: we only need to do this if the region is not young | |
5875 // (since we don't refine cards in young regions). | |
5876 if (!hr->is_young()) { | |
5877 _cg1r->hot_card_cache()->reset_card_counts(hr); | |
5878 } | |
5767 *pre_used += hr->used(); | 5879 *pre_used += hr->used(); |
5768 hr->hr_clear(par, true /* clear_space */); | 5880 hr->hr_clear(par, true /* clear_space */); |
5769 free_list->add_as_head(hr); | 5881 free_list->add_as_head(hr); |
5770 } | 5882 } |
5771 | 5883 |
5938 | 6050 |
5939 double elapsed = os::elapsedTime() - start; | 6051 double elapsed = os::elapsedTime() - start; |
5940 g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0); | 6052 g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0); |
5941 } | 6053 } |
5942 | 6054 |
5943 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | 6055 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) { |
5944 size_t pre_used = 0; | 6056 size_t pre_used = 0; |
5945 FreeRegionList local_free_list("Local List for CSet Freeing"); | 6057 FreeRegionList local_free_list("Local List for CSet Freeing"); |
5946 | 6058 |
5947 double young_time_ms = 0.0; | 6059 double young_time_ms = 0.0; |
5948 double non_young_time_ms = 0.0; | 6060 double non_young_time_ms = 0.0; |
6024 } | 6136 } |
6025 cur->set_not_young(); | 6137 cur->set_not_young(); |
6026 cur->set_evacuation_failed(false); | 6138 cur->set_evacuation_failed(false); |
6027 // The region is now considered to be old. | 6139 // The region is now considered to be old. |
6028 _old_set.add(cur); | 6140 _old_set.add(cur); |
6141 evacuation_info.increment_collectionset_used_after(cur->used()); | |
6029 } | 6142 } |
6030 cur = next; | 6143 cur = next; |
6031 } | 6144 } |
6032 | 6145 |
6146 evacuation_info.set_regions_freed(local_free_list.length()); | |
6033 policy->record_max_rs_lengths(rs_lengths); | 6147 policy->record_max_rs_lengths(rs_lengths); |
6034 policy->cset_regions_freed(); | 6148 policy->cset_regions_freed(); |
6035 | 6149 |
6036 double end_sec = os::elapsedTime(); | 6150 double end_sec = os::elapsedTime(); |
6037 double elapsed_ms = (end_sec - start_sec) * 1000.0; | 6151 double elapsed_ms = (end_sec - start_sec) * 1000.0; |