Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 10099:c0000f77bc6d
Merge
author | johnc |
---|---|
date | Thu, 11 Apr 2013 10:20:00 -0700 |
parents | 7b835924c31c 71013d764f6e |
children | 746b070f5022 a08c80e9e1e5 |
comparison
equal
deleted
inserted
replaced
9076:7b835924c31c | 10099:c0000f77bc6d |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
1320 // Timing | 1320 // Timing |
1321 assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant"); | 1321 assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant"); |
1322 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); | 1322 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); |
1323 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); | 1323 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); |
1324 | 1324 |
1325 TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty); | |
1326 TraceCollectorStats tcs(g1mm()->full_collection_counters()); | |
1327 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); | |
1328 | |
1329 double start = os::elapsedTime(); | |
1330 g1_policy()->record_full_collection_start(); | |
1331 | |
1332 // Note: When we have a more flexible GC logging framework that | |
1333 // allows us to add optional attributes to a GC log record we | |
1334 // could consider timing and reporting how long we wait in the | |
1335 // following two methods. | |
1336 wait_while_free_regions_coming(); | |
1337 // If we start the compaction before the CM threads finish | |
1338 // scanning the root regions we might trip them over as we'll | |
1339 // be moving objects / updating references. So let's wait until | |
1340 // they are done. By telling them to abort, they should complete | |
1341 // early. | |
1342 _cm->root_regions()->abort(); | |
1343 _cm->root_regions()->wait_until_scan_finished(); | |
1344 append_secondary_free_list_if_not_empty_with_lock(); | |
1345 | |
1346 gc_prologue(true); | |
1347 increment_total_collections(true /* full gc */); | |
1348 increment_old_marking_cycles_started(); | |
1349 | |
1350 size_t g1h_prev_used = used(); | |
1351 assert(used() == recalculate_used(), "Should be equal"); | |
1352 | |
1353 verify_before_gc(); | |
1354 | |
1355 pre_full_gc_dump(); | |
1356 | |
1357 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
1358 | |
1359 // Disable discovery and empty the discovered lists | |
1360 // for the CM ref processor. | |
1361 ref_processor_cm()->disable_discovery(); | |
1362 ref_processor_cm()->abandon_partial_discovery(); | |
1363 ref_processor_cm()->verify_no_references_recorded(); | |
1364 | |
1365 // Abandon current iterations of concurrent marking and concurrent | |
1366 // refinement, if any are in progress. We have to do this before | |
1367 // wait_until_scan_finished() below. | |
1368 concurrent_mark()->abort(); | |
1369 | |
1370 // Make sure we'll choose a new allocation region afterwards. | |
1371 release_mutator_alloc_region(); | |
1372 abandon_gc_alloc_regions(); | |
1373 g1_rem_set()->cleanupHRRS(); | |
1374 | |
1375 // We should call this after we retire any currently active alloc | |
1376 // regions so that all the ALLOC / RETIRE events are generated | |
1377 // before the start GC event. | |
1378 _hr_printer.start_gc(true /* full */, (size_t) total_collections()); | |
1379 | |
1380 // We may have added regions to the current incremental collection | |
1381 // set between the last GC or pause and now. We need to clear the | |
1382 // incremental collection set and then start rebuilding it afresh | |
1383 // after this full GC. | |
1384 abandon_collection_set(g1_policy()->inc_cset_head()); | |
1385 g1_policy()->clear_incremental_cset(); | |
1386 g1_policy()->stop_incremental_cset_building(); | |
1387 | |
1388 tear_down_region_sets(false /* free_list_only */); | |
1389 g1_policy()->set_gcs_are_young(true); | |
1390 | |
1391 // See the comments in g1CollectedHeap.hpp and | |
1392 // G1CollectedHeap::ref_processing_init() about | |
1393 // how reference processing currently works in G1. | |
1394 | |
1395 // Temporarily make discovery by the STW ref processor single threaded (non-MT). | |
1396 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false); | |
1397 | |
1398 // Temporarily clear the STW ref processor's _is_alive_non_header field. | |
1399 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL); | |
1400 | |
1401 ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); | |
1402 ref_processor_stw()->setup_policy(do_clear_all_soft_refs); | |
1403 | |
1404 // Do collection work | |
1405 { | 1325 { |
1406 HandleMark hm; // Discard invalid handles created during gc | 1326 TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty); |
1407 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); | 1327 TraceCollectorStats tcs(g1mm()->full_collection_counters()); |
1408 } | 1328 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); |
1409 | 1329 |
1410 assert(free_regions() == 0, "we should not have added any free regions"); | 1330 double start = os::elapsedTime(); |
1411 rebuild_region_sets(false /* free_list_only */); | 1331 g1_policy()->record_full_collection_start(); |
1412 | 1332 |
1413 // Enqueue any discovered reference objects that have | 1333 // Note: When we have a more flexible GC logging framework that |
1414 // not been removed from the discovered lists. | 1334 // allows us to add optional attributes to a GC log record we |
1415 ref_processor_stw()->enqueue_discovered_references(); | 1335 // could consider timing and reporting how long we wait in the |
1416 | 1336 // following two methods. |
1417 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | 1337 wait_while_free_regions_coming(); |
1418 | 1338 // If we start the compaction before the CM threads finish |
1419 MemoryService::track_memory_usage(); | 1339 // scanning the root regions we might trip them over as we'll |
1420 | 1340 // be moving objects / updating references. So let's wait until |
1421 verify_after_gc(); | 1341 // they are done. By telling them to abort, they should complete |
1422 | 1342 // early. |
1423 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); | 1343 _cm->root_regions()->abort(); |
1424 ref_processor_stw()->verify_no_references_recorded(); | 1344 _cm->root_regions()->wait_until_scan_finished(); |
1425 | 1345 append_secondary_free_list_if_not_empty_with_lock(); |
1426 // Delete metaspaces for unloaded class loaders and clean up loader_data graph | 1346 |
1427 ClassLoaderDataGraph::purge(); | 1347 gc_prologue(true); |
1428 | 1348 increment_total_collections(true /* full gc */); |
1429 // Note: since we've just done a full GC, concurrent | 1349 increment_old_marking_cycles_started(); |
1430 // marking is no longer active. Therefore we need not | 1350 |
1431 // re-enable reference discovery for the CM ref processor. | 1351 assert(used() == recalculate_used(), "Should be equal"); |
1432 // That will be done at the start of the next marking cycle. | 1352 |
1433 assert(!ref_processor_cm()->discovery_enabled(), "Postcondition"); | 1353 verify_before_gc(); |
1434 ref_processor_cm()->verify_no_references_recorded(); | 1354 |
1435 | 1355 pre_full_gc_dump(); |
1436 reset_gc_time_stamp(); | 1356 |
1437 // Since everything potentially moved, we will clear all remembered | 1357 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
1438 // sets, and clear all cards. Later we will rebuild remebered | 1358 |
1439 // sets. We will also reset the GC time stamps of the regions. | 1359 // Disable discovery and empty the discovered lists |
1440 clear_rsets_post_compaction(); | 1360 // for the CM ref processor. |
1441 check_gc_time_stamps(); | 1361 ref_processor_cm()->disable_discovery(); |
1442 | 1362 ref_processor_cm()->abandon_partial_discovery(); |
1443 // Resize the heap if necessary. | 1363 ref_processor_cm()->verify_no_references_recorded(); |
1444 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); | 1364 |
1445 | 1365 // Abandon current iterations of concurrent marking and concurrent |
1446 if (_hr_printer.is_active()) { | 1366 // refinement, if any are in progress. We have to do this before |
1447 // We should do this after we potentially resize the heap so | 1367 // wait_until_scan_finished() below. |
1448 // that all the COMMIT / UNCOMMIT events are generated before | 1368 concurrent_mark()->abort(); |
1449 // the end GC event. | 1369 |
1450 | 1370 // Make sure we'll choose a new allocation region afterwards. |
1451 print_hrs_post_compaction(); | 1371 release_mutator_alloc_region(); |
1452 _hr_printer.end_gc(true /* full */, (size_t) total_collections()); | 1372 abandon_gc_alloc_regions(); |
1453 } | 1373 g1_rem_set()->cleanupHRRS(); |
1454 | 1374 |
1455 if (_cg1r->use_cache()) { | 1375 // We should call this after we retire any currently active alloc |
1456 _cg1r->clear_and_record_card_counts(); | 1376 // regions so that all the ALLOC / RETIRE events are generated |
1457 _cg1r->clear_hot_cache(); | 1377 // before the start GC event. |
1458 } | 1378 _hr_printer.start_gc(true /* full */, (size_t) total_collections()); |
1459 | 1379 |
1460 // Rebuild remembered sets of all regions. | 1380 // We may have added regions to the current incremental collection |
1461 if (G1CollectedHeap::use_parallel_gc_threads()) { | 1381 // set between the last GC or pause and now. We need to clear the |
1462 uint n_workers = | 1382 // incremental collection set and then start rebuilding it afresh |
1463 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), | 1383 // after this full GC. |
1464 workers()->active_workers(), | 1384 abandon_collection_set(g1_policy()->inc_cset_head()); |
1465 Threads::number_of_non_daemon_threads()); | 1385 g1_policy()->clear_incremental_cset(); |
1466 assert(UseDynamicNumberOfGCThreads || | 1386 g1_policy()->stop_incremental_cset_building(); |
1467 n_workers == workers()->total_workers(), | 1387 |
1468 "If not dynamic should be using all the workers"); | 1388 tear_down_region_sets(false /* free_list_only */); |
1469 workers()->set_active_workers(n_workers); | 1389 g1_policy()->set_gcs_are_young(true); |
1470 // Set parallel threads in the heap (_n_par_threads) only | 1390 |
1471 // before a parallel phase and always reset it to 0 after | 1391 // See the comments in g1CollectedHeap.hpp and |
1472 // the phase so that the number of parallel threads does | 1392 // G1CollectedHeap::ref_processing_init() about |
1473 // no get carried forward to a serial phase where there | 1393 // how reference processing currently works in G1. |
1474 // may be code that is "possibly_parallel". | 1394 |
1475 set_par_threads(n_workers); | 1395 // Temporarily make discovery by the STW ref processor single threaded (non-MT). |
1476 | 1396 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false); |
1477 ParRebuildRSTask rebuild_rs_task(this); | 1397 |
1478 assert(check_heap_region_claim_values( | 1398 // Temporarily clear the STW ref processor's _is_alive_non_header field. |
1479 HeapRegion::InitialClaimValue), "sanity check"); | 1399 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL); |
1480 assert(UseDynamicNumberOfGCThreads || | 1400 |
1481 workers()->active_workers() == workers()->total_workers(), | 1401 ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); |
1482 "Unless dynamic should use total workers"); | 1402 ref_processor_stw()->setup_policy(do_clear_all_soft_refs); |
1483 // Use the most recent number of active workers | 1403 |
1484 assert(workers()->active_workers() > 0, | 1404 // Do collection work |
1485 "Active workers not properly set"); | 1405 { |
1486 set_par_threads(workers()->active_workers()); | 1406 HandleMark hm; // Discard invalid handles created during gc |
1487 workers()->run_task(&rebuild_rs_task); | 1407 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); |
1488 set_par_threads(0); | 1408 } |
1489 assert(check_heap_region_claim_values( | 1409 |
1490 HeapRegion::RebuildRSClaimValue), "sanity check"); | 1410 assert(free_regions() == 0, "we should not have added any free regions"); |
1491 reset_heap_region_claim_values(); | 1411 rebuild_region_sets(false /* free_list_only */); |
1492 } else { | 1412 |
1493 RebuildRSOutOfRegionClosure rebuild_rs(this); | 1413 // Enqueue any discovered reference objects that have |
1494 heap_region_iterate(&rebuild_rs); | 1414 // not been removed from the discovered lists. |
1495 } | 1415 ref_processor_stw()->enqueue_discovered_references(); |
1496 | 1416 |
1497 if (G1Log::fine()) { | 1417 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
1498 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | 1418 |
1499 } | 1419 MemoryService::track_memory_usage(); |
1500 | 1420 |
1501 if (true) { // FIXME | 1421 verify_after_gc(); |
1502 MetaspaceGC::compute_new_size(); | 1422 |
1503 } | 1423 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); |
1504 | 1424 ref_processor_stw()->verify_no_references_recorded(); |
1505 // Start a new incremental collection set for the next pause | 1425 |
1506 assert(g1_policy()->collection_set() == NULL, "must be"); | 1426 // Delete metaspaces for unloaded class loaders and clean up loader_data graph |
1507 g1_policy()->start_incremental_cset_building(); | 1427 ClassLoaderDataGraph::purge(); |
1508 | 1428 |
1509 // Clear the _cset_fast_test bitmap in anticipation of adding | 1429 // Note: since we've just done a full GC, concurrent |
1510 // regions to the incremental collection set for the next | 1430 // marking is no longer active. Therefore we need not |
1511 // evacuation pause. | 1431 // re-enable reference discovery for the CM ref processor. |
1512 clear_cset_fast_test(); | 1432 // That will be done at the start of the next marking cycle. |
1513 | 1433 assert(!ref_processor_cm()->discovery_enabled(), "Postcondition"); |
1514 init_mutator_alloc_region(); | 1434 ref_processor_cm()->verify_no_references_recorded(); |
1515 | 1435 |
1516 double end = os::elapsedTime(); | 1436 reset_gc_time_stamp(); |
1517 g1_policy()->record_full_collection_end(); | 1437 // Since everything potentially moved, we will clear all remembered |
1438 // sets, and clear all cards. Later we will rebuild remebered | |
1439 // sets. We will also reset the GC time stamps of the regions. | |
1440 clear_rsets_post_compaction(); | |
1441 check_gc_time_stamps(); | |
1442 | |
1443 // Resize the heap if necessary. | |
1444 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); | |
1445 | |
1446 if (_hr_printer.is_active()) { | |
1447 // We should do this after we potentially resize the heap so | |
1448 // that all the COMMIT / UNCOMMIT events are generated before | |
1449 // the end GC event. | |
1450 | |
1451 print_hrs_post_compaction(); | |
1452 _hr_printer.end_gc(true /* full */, (size_t) total_collections()); | |
1453 } | |
1454 | |
1455 if (_cg1r->use_cache()) { | |
1456 _cg1r->clear_and_record_card_counts(); | |
1457 _cg1r->clear_hot_cache(); | |
1458 } | |
1459 | |
1460 // Rebuild remembered sets of all regions. | |
1461 if (G1CollectedHeap::use_parallel_gc_threads()) { | |
1462 uint n_workers = | |
1463 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), | |
1464 workers()->active_workers(), | |
1465 Threads::number_of_non_daemon_threads()); | |
1466 assert(UseDynamicNumberOfGCThreads || | |
1467 n_workers == workers()->total_workers(), | |
1468 "If not dynamic should be using all the workers"); | |
1469 workers()->set_active_workers(n_workers); | |
1470 // Set parallel threads in the heap (_n_par_threads) only | |
1471 // before a parallel phase and always reset it to 0 after | |
1472 // the phase so that the number of parallel threads does | |
1473 // no get carried forward to a serial phase where there | |
1474 // may be code that is "possibly_parallel". | |
1475 set_par_threads(n_workers); | |
1476 | |
1477 ParRebuildRSTask rebuild_rs_task(this); | |
1478 assert(check_heap_region_claim_values( | |
1479 HeapRegion::InitialClaimValue), "sanity check"); | |
1480 assert(UseDynamicNumberOfGCThreads || | |
1481 workers()->active_workers() == workers()->total_workers(), | |
1482 "Unless dynamic should use total workers"); | |
1483 // Use the most recent number of active workers | |
1484 assert(workers()->active_workers() > 0, | |
1485 "Active workers not properly set"); | |
1486 set_par_threads(workers()->active_workers()); | |
1487 workers()->run_task(&rebuild_rs_task); | |
1488 set_par_threads(0); | |
1489 assert(check_heap_region_claim_values( | |
1490 HeapRegion::RebuildRSClaimValue), "sanity check"); | |
1491 reset_heap_region_claim_values(); | |
1492 } else { | |
1493 RebuildRSOutOfRegionClosure rebuild_rs(this); | |
1494 heap_region_iterate(&rebuild_rs); | |
1495 } | |
1496 | |
1497 if (true) { // FIXME | |
1498 MetaspaceGC::compute_new_size(); | |
1499 } | |
1518 | 1500 |
1519 #ifdef TRACESPINNING | 1501 #ifdef TRACESPINNING |
1520 ParallelTaskTerminator::print_termination_counts(); | 1502 ParallelTaskTerminator::print_termination_counts(); |
1521 #endif | 1503 #endif |
1522 | 1504 |
1523 gc_epilogue(true); | 1505 // Discard all rset updates |
1524 | 1506 JavaThread::dirty_card_queue_set().abandon_logs(); |
1525 // Discard all rset updates | 1507 assert(!G1DeferredRSUpdate |
1526 JavaThread::dirty_card_queue_set().abandon_logs(); | 1508 || (G1DeferredRSUpdate && |
1527 assert(!G1DeferredRSUpdate | 1509 (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
1528 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); | 1510 |
1529 | 1511 _young_list->reset_sampled_info(); |
1530 _young_list->reset_sampled_info(); | 1512 // At this point there should be no regions in the |
1531 // At this point there should be no regions in the | 1513 // entire heap tagged as young. |
1532 // entire heap tagged as young. | 1514 assert(check_young_list_empty(true /* check_heap */), |
1533 assert( check_young_list_empty(true /* check_heap */), | 1515 "young list should be empty at this point"); |
1534 "young list should be empty at this point"); | 1516 |
1535 | 1517 // Update the number of full collections that have been completed. |
1536 // Update the number of full collections that have been completed. | 1518 increment_old_marking_cycles_completed(false /* concurrent */); |
1537 increment_old_marking_cycles_completed(false /* concurrent */); | 1519 |
1538 | 1520 _hrs.verify_optional(); |
1539 _hrs.verify_optional(); | 1521 verify_region_sets_optional(); |
1540 verify_region_sets_optional(); | 1522 |
1523 // Start a new incremental collection set for the next pause | |
1524 assert(g1_policy()->collection_set() == NULL, "must be"); | |
1525 g1_policy()->start_incremental_cset_building(); | |
1526 | |
1527 // Clear the _cset_fast_test bitmap in anticipation of adding | |
1528 // regions to the incremental collection set for the next | |
1529 // evacuation pause. | |
1530 clear_cset_fast_test(); | |
1531 | |
1532 init_mutator_alloc_region(); | |
1533 | |
1534 double end = os::elapsedTime(); | |
1535 g1_policy()->record_full_collection_end(); | |
1536 | |
1537 if (G1Log::fine()) { | |
1538 g1_policy()->print_heap_transition(); | |
1539 } | |
1540 | |
1541 // We must call G1MonitoringSupport::update_sizes() in the same scoping level | |
1542 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the | |
1543 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated | |
1544 // before any GC notifications are raised. | |
1545 g1mm()->update_sizes(); | |
1546 | |
1547 gc_epilogue(true); | |
1548 } | |
1549 | |
1550 if (G1Log::finer()) { | |
1551 g1_policy()->print_detailed_heap_transition(); | |
1552 } | |
1541 | 1553 |
1542 print_heap_after_gc(); | 1554 print_heap_after_gc(); |
1543 | 1555 |
1544 // We must call G1MonitoringSupport::update_sizes() in the same scoping level | 1556 post_full_gc_dump(); |
1545 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the | 1557 } |
1546 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated | |
1547 // before any GC notifications are raised. | |
1548 g1mm()->update_sizes(); | |
1549 } | |
1550 | |
1551 post_full_gc_dump(); | |
1552 | 1558 |
1553 return true; | 1559 return true; |
1554 } | 1560 } |
1555 | 1561 |
1556 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | 1562 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { |
3836 // Preserving the old comment here if that helps the investigation: | 3842 // Preserving the old comment here if that helps the investigation: |
3837 // | 3843 // |
3838 // The elapsed time induced by the start time below deliberately elides | 3844 // The elapsed time induced by the start time below deliberately elides |
3839 // the possible verification above. | 3845 // the possible verification above. |
3840 double sample_start_time_sec = os::elapsedTime(); | 3846 double sample_start_time_sec = os::elapsedTime(); |
3841 size_t start_used_bytes = used(); | |
3842 | 3847 |
3843 #if YOUNG_LIST_VERBOSE | 3848 #if YOUNG_LIST_VERBOSE |
3844 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); | 3849 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
3845 _young_list->print(); | 3850 _young_list->print(); |
3846 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); | 3851 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
3847 #endif // YOUNG_LIST_VERBOSE | 3852 #endif // YOUNG_LIST_VERBOSE |
3848 | 3853 |
3849 g1_policy()->record_collection_pause_start(sample_start_time_sec, | 3854 g1_policy()->record_collection_pause_start(sample_start_time_sec); |
3850 start_used_bytes); | |
3851 | 3855 |
3852 double scan_wait_start = os::elapsedTime(); | 3856 double scan_wait_start = os::elapsedTime(); |
3853 // We have to wait until the CM threads finish scanning the | 3857 // We have to wait until the CM threads finish scanning the |
3854 // root regions as it's the only way to ensure that all the | 3858 // root regions as it's the only way to ensure that all the |
3855 // objects on them have been correctly scanned before we start | 3859 // objects on them have been correctly scanned before we start |