Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 10405:f2110083203d
8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>
author | sla |
---|---|
date | Mon, 10 Jun 2013 11:30:51 +0200 |
parents | 3a4805ad0005 |
children | 836a62f43af9 71180a6e5080 |
comparison
equal
deleted
inserted
replaced
10404:d0add7016434 | 10405:f2110083203d |
---|---|
36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp" | 36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp" |
37 #include "gc_implementation/g1/g1Log.hpp" | 37 #include "gc_implementation/g1/g1Log.hpp" |
38 #include "gc_implementation/g1/g1MarkSweep.hpp" | 38 #include "gc_implementation/g1/g1MarkSweep.hpp" |
39 #include "gc_implementation/g1/g1OopClosures.inline.hpp" | 39 #include "gc_implementation/g1/g1OopClosures.inline.hpp" |
40 #include "gc_implementation/g1/g1RemSet.inline.hpp" | 40 #include "gc_implementation/g1/g1RemSet.inline.hpp" |
41 #include "gc_implementation/g1/g1YCTypes.hpp" | |
41 #include "gc_implementation/g1/heapRegion.inline.hpp" | 42 #include "gc_implementation/g1/heapRegion.inline.hpp" |
42 #include "gc_implementation/g1/heapRegionRemSet.hpp" | 43 #include "gc_implementation/g1/heapRegionRemSet.hpp" |
43 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | 44 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" |
44 #include "gc_implementation/g1/vm_operations_g1.hpp" | 45 #include "gc_implementation/g1/vm_operations_g1.hpp" |
46 #include "gc_implementation/shared/gcHeapSummary.hpp" | |
47 #include "gc_implementation/shared/gcTimer.hpp" | |
48 #include "gc_implementation/shared/gcTrace.hpp" | |
49 #include "gc_implementation/shared/gcTraceTime.hpp" | |
45 #include "gc_implementation/shared/isGCActiveMark.hpp" | 50 #include "gc_implementation/shared/isGCActiveMark.hpp" |
46 #include "memory/gcLocker.inline.hpp" | 51 #include "memory/gcLocker.inline.hpp" |
47 #include "memory/genOopClosures.inline.hpp" | 52 #include "memory/genOopClosures.inline.hpp" |
48 #include "memory/generationSpec.hpp" | 53 #include "memory/generationSpec.hpp" |
49 #include "memory/referenceProcessor.hpp" | 54 #include "memory/referenceProcessor.hpp" |
74 // | 79 // |
75 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism. | 80 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism. |
76 // The number of GC workers is passed to heap_region_par_iterate_chunked(). | 81 // The number of GC workers is passed to heap_region_par_iterate_chunked(). |
77 // It does use run_task() which sets _n_workers in the task. | 82 // It does use run_task() which sets _n_workers in the task. |
78 // G1ParTask executes g1_process_strong_roots() -> | 83 // G1ParTask executes g1_process_strong_roots() -> |
79 // SharedHeap::process_strong_roots() which calls eventuall to | 84 // SharedHeap::process_strong_roots() which calls eventually to |
80 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses | 85 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses |
81 // SequentialSubTasksDone. SharedHeap::process_strong_roots() also | 86 // SequentialSubTasksDone. SharedHeap::process_strong_roots() also |
82 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap). | 87 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap). |
83 // | 88 // |
84 | 89 |
455 return hr != NULL && hr->in_collection_set(); | 460 return hr != NULL && hr->in_collection_set(); |
456 } | 461 } |
457 #endif | 462 #endif |
458 | 463 |
459 // Returns true if the reference points to an object that | 464 // Returns true if the reference points to an object that |
460 // can move in an incremental collecction. | 465 // can move in an incremental collection. |
461 bool G1CollectedHeap::is_scavengable(const void* p) { | 466 bool G1CollectedHeap::is_scavengable(const void* p) { |
462 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | 467 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
463 G1CollectorPolicy* g1p = g1h->g1_policy(); | 468 G1CollectorPolicy* g1p = g1h->g1_policy(); |
464 HeapRegion* hr = heap_region_containing(p); | 469 HeapRegion* hr = heap_region_containing(p); |
465 if (hr == NULL) { | 470 if (hr == NULL) { |
546 HR_FORMAT_PARAMS(res)); | 551 HR_FORMAT_PARAMS(res)); |
547 } | 552 } |
548 return res; | 553 return res; |
549 } | 554 } |
550 | 555 |
551 // Wait here until we get notifed either when (a) there are no | 556 // Wait here until we get notified either when (a) there are no |
552 // more free regions coming or (b) some regions have been moved on | 557 // more free regions coming or (b) some regions have been moved on |
553 // the secondary_free_list. | 558 // the secondary_free_list. |
554 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); | 559 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); |
555 } | 560 } |
556 | 561 |
621 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); | 626 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); |
622 | 627 |
623 uint first = G1_NULL_HRS_INDEX; | 628 uint first = G1_NULL_HRS_INDEX; |
624 if (num_regions == 1) { | 629 if (num_regions == 1) { |
625 // Only one region to allocate, no need to go through the slower | 630 // Only one region to allocate, no need to go through the slower |
626 // path. The caller will attempt the expasion if this fails, so | 631 // path. The caller will attempt the expansion if this fails, so |
627 // let's not try to expand here too. | 632 // let's not try to expand here too. |
628 HeapRegion* hr = new_region(word_size, false /* do_expand */); | 633 HeapRegion* hr = new_region(word_size, false /* do_expand */); |
629 if (hr != NULL) { | 634 if (hr != NULL) { |
630 first = hr->hrs_index(); | 635 first = hr->hrs_index(); |
631 } else { | 636 } else { |
686 HeapRegion* first_hr = region_at(first); | 691 HeapRegion* first_hr = region_at(first); |
687 // The header of the new object will be placed at the bottom of | 692 // The header of the new object will be placed at the bottom of |
688 // the first region. | 693 // the first region. |
689 HeapWord* new_obj = first_hr->bottom(); | 694 HeapWord* new_obj = first_hr->bottom(); |
690 // This will be the new end of the first region in the series that | 695 // This will be the new end of the first region in the series that |
691 // should also match the end of the last region in the seriers. | 696 // should also match the end of the last region in the series. |
692 HeapWord* new_end = new_obj + word_size_sum; | 697 HeapWord* new_end = new_obj + word_size_sum; |
693 // This will be the new top of the first region that will reflect | 698 // This will be the new top of the first region that will reflect |
694 // this allocation. | 699 // this allocation. |
695 HeapWord* new_top = new_obj + word_size; | 700 HeapWord* new_top = new_obj + word_size; |
696 | 701 |
861 HeapWord* | 866 HeapWord* |
862 G1CollectedHeap::mem_allocate(size_t word_size, | 867 G1CollectedHeap::mem_allocate(size_t word_size, |
863 bool* gc_overhead_limit_was_exceeded) { | 868 bool* gc_overhead_limit_was_exceeded) { |
864 assert_heap_not_locked_and_not_at_safepoint(); | 869 assert_heap_not_locked_and_not_at_safepoint(); |
865 | 870 |
866 // Loop until the allocation is satisified, or unsatisfied after GC. | 871 // Loop until the allocation is satisfied, or unsatisfied after GC. |
867 for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { | 872 for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { |
868 unsigned int gc_count_before; | 873 unsigned int gc_count_before; |
869 | 874 |
870 HeapWord* result = NULL; | 875 HeapWord* result = NULL; |
871 if (!isHumongous(word_size)) { | 876 if (!isHumongous(word_size)) { |
1001 // then retry the allocation. | 1006 // then retry the allocation. |
1002 GC_locker::stall_until_clear(); | 1007 GC_locker::stall_until_clear(); |
1003 (*gclocker_retry_count_ret) += 1; | 1008 (*gclocker_retry_count_ret) += 1; |
1004 } | 1009 } |
1005 | 1010 |
1006 // We can reach here if we were unsuccessul in scheduling a | 1011 // We can reach here if we were unsuccessful in scheduling a |
1007 // collection (because another thread beat us to it) or if we were | 1012 // collection (because another thread beat us to it) or if we were |
1008 // stalled due to the GC locker. In either can we should retry the | 1013 // stalled due to the GC locker. In either can we should retry the |
1009 // allocation attempt in case another thread successfully | 1014 // allocation attempt in case another thread successfully |
1010 // performed a collection and reclaimed enough space. We do the | 1015 // performed a collection and reclaimed enough space. We do the |
1011 // first attempt (without holding the Heap_lock) here and the | 1016 // first attempt (without holding the Heap_lock) here and the |
1126 // then retry the allocation. | 1131 // then retry the allocation. |
1127 GC_locker::stall_until_clear(); | 1132 GC_locker::stall_until_clear(); |
1128 (*gclocker_retry_count_ret) += 1; | 1133 (*gclocker_retry_count_ret) += 1; |
1129 } | 1134 } |
1130 | 1135 |
1131 // We can reach here if we were unsuccessul in scheduling a | 1136 // We can reach here if we were unsuccessful in scheduling a |
1132 // collection (because another thread beat us to it) or if we were | 1137 // collection (because another thread beat us to it) or if we were |
1133 // stalled due to the GC locker. In either can we should retry the | 1138 // stalled due to the GC locker. In either can we should retry the |
1134 // allocation attempt in case another thread successfully | 1139 // allocation attempt in case another thread successfully |
1135 // performed a collection and reclaimed enough space. Give a | 1140 // performed a collection and reclaimed enough space. Give a |
1136 // warning if we seem to be looping forever. | 1141 // warning if we seem to be looping forever. |
1296 | 1301 |
1297 if (GC_locker::check_active_before_gc()) { | 1302 if (GC_locker::check_active_before_gc()) { |
1298 return false; | 1303 return false; |
1299 } | 1304 } |
1300 | 1305 |
1306 STWGCTimer* gc_timer = G1MarkSweep::gc_timer(); | |
1307 gc_timer->register_gc_start(os::elapsed_counter()); | |
1308 | |
1309 SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer(); | |
1310 gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start()); | |
1311 | |
1301 SvcGCMarker sgcm(SvcGCMarker::FULL); | 1312 SvcGCMarker sgcm(SvcGCMarker::FULL); |
1302 ResourceMark rm; | 1313 ResourceMark rm; |
1303 | 1314 |
1304 print_heap_before_gc(); | 1315 print_heap_before_gc(); |
1316 trace_heap_before_gc(gc_tracer); | |
1305 | 1317 |
1306 size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); | 1318 size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); |
1307 | 1319 |
1308 HRSPhaseSetter x(HRSPhaseFullGC); | 1320 HRSPhaseSetter x(HRSPhaseFullGC); |
1309 verify_region_sets_optional(); | 1321 verify_region_sets_optional(); |
1320 assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant"); | 1332 assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant"); |
1321 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); | 1333 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); |
1322 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); | 1334 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); |
1323 | 1335 |
1324 { | 1336 { |
1325 TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty); | 1337 GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL); |
1326 TraceCollectorStats tcs(g1mm()->full_collection_counters()); | 1338 TraceCollectorStats tcs(g1mm()->full_collection_counters()); |
1327 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); | 1339 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); |
1328 | 1340 |
1329 double start = os::elapsedTime(); | 1341 double start = os::elapsedTime(); |
1330 g1_policy()->record_full_collection_start(); | 1342 g1_policy()->record_full_collection_start(); |
1349 | 1361 |
1350 assert(used() == recalculate_used(), "Should be equal"); | 1362 assert(used() == recalculate_used(), "Should be equal"); |
1351 | 1363 |
1352 verify_before_gc(); | 1364 verify_before_gc(); |
1353 | 1365 |
1354 pre_full_gc_dump(); | 1366 pre_full_gc_dump(gc_timer); |
1355 | 1367 |
1356 COMPILER2_PRESENT(DerivedPointerTable::clear()); | 1368 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
1357 | 1369 |
1358 // Disable discovery and empty the discovered lists | 1370 // Disable discovery and empty the discovered lists |
1359 // for the CM ref processor. | 1371 // for the CM ref processor. |
1431 assert(!ref_processor_cm()->discovery_enabled(), "Postcondition"); | 1443 assert(!ref_processor_cm()->discovery_enabled(), "Postcondition"); |
1432 ref_processor_cm()->verify_no_references_recorded(); | 1444 ref_processor_cm()->verify_no_references_recorded(); |
1433 | 1445 |
1434 reset_gc_time_stamp(); | 1446 reset_gc_time_stamp(); |
1435 // Since everything potentially moved, we will clear all remembered | 1447 // Since everything potentially moved, we will clear all remembered |
1436 // sets, and clear all cards. Later we will rebuild remebered | 1448 // sets, and clear all cards. Later we will rebuild remembered |
1437 // sets. We will also reset the GC time stamps of the regions. | 1449 // sets. We will also reset the GC time stamps of the regions. |
1438 clear_rsets_post_compaction(); | 1450 clear_rsets_post_compaction(); |
1439 check_gc_time_stamps(); | 1451 check_gc_time_stamps(); |
1440 | 1452 |
1441 // Resize the heap if necessary. | 1453 // Resize the heap if necessary. |
1551 if (G1Log::finer()) { | 1563 if (G1Log::finer()) { |
1552 g1_policy()->print_detailed_heap_transition(true /* full */); | 1564 g1_policy()->print_detailed_heap_transition(true /* full */); |
1553 } | 1565 } |
1554 | 1566 |
1555 print_heap_after_gc(); | 1567 print_heap_after_gc(); |
1556 | 1568 trace_heap_after_gc(gc_tracer); |
1557 post_full_gc_dump(); | 1569 |
1570 post_full_gc_dump(gc_timer); | |
1571 | |
1572 gc_timer->register_gc_end(os::elapsed_counter()); | |
1573 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); | |
1558 } | 1574 } |
1559 | 1575 |
1560 return true; | 1576 return true; |
1561 } | 1577 } |
1562 | 1578 |
1917 _is_alive_closure_stw(this), | 1933 _is_alive_closure_stw(this), |
1918 _ref_processor_cm(NULL), | 1934 _ref_processor_cm(NULL), |
1919 _ref_processor_stw(NULL), | 1935 _ref_processor_stw(NULL), |
1920 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | 1936 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), |
1921 _bot_shared(NULL), | 1937 _bot_shared(NULL), |
1922 _evac_failure_scan_stack(NULL) , | 1938 _evac_failure_scan_stack(NULL), |
1923 _mark_in_progress(false), | 1939 _mark_in_progress(false), |
1924 _cg1r(NULL), _summary_bytes_used(0), | 1940 _cg1r(NULL), _summary_bytes_used(0), |
1925 _g1mm(NULL), | 1941 _g1mm(NULL), |
1926 _refine_cte_cl(NULL), | 1942 _refine_cte_cl(NULL), |
1927 _full_collection(false), | 1943 _full_collection(false), |
1937 _old_plab_stats(OldPLABSize, PLABWeight), | 1953 _old_plab_stats(OldPLABSize, PLABWeight), |
1938 _expand_heap_after_alloc_failure(true), | 1954 _expand_heap_after_alloc_failure(true), |
1939 _surviving_young_words(NULL), | 1955 _surviving_young_words(NULL), |
1940 _old_marking_cycles_started(0), | 1956 _old_marking_cycles_started(0), |
1941 _old_marking_cycles_completed(0), | 1957 _old_marking_cycles_completed(0), |
1958 _concurrent_cycle_started(false), | |
1942 _in_cset_fast_test(NULL), | 1959 _in_cset_fast_test(NULL), |
1943 _in_cset_fast_test_base(NULL), | 1960 _in_cset_fast_test_base(NULL), |
1944 _dirty_cards_region_list(NULL), | 1961 _dirty_cards_region_list(NULL), |
1945 _worker_cset_start_region(NULL), | 1962 _worker_cset_start_region(NULL), |
1946 _worker_cset_start_region_time_stamp(NULL) { | 1963 _worker_cset_start_region_time_stamp(NULL), |
1947 _g1h = this; // To catch bugs. | 1964 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), |
1965 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), | |
1966 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()), | |
1967 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) { | |
1968 | |
1969 _g1h = this; | |
1948 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | 1970 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { |
1949 vm_exit_during_initialization("Failed necessary allocation."); | 1971 vm_exit_during_initialization("Failed necessary allocation."); |
1950 } | 1972 } |
1951 | 1973 |
1952 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; | 1974 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; |
1957 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | 1979 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); |
1958 assert(n_rem_sets > 0, "Invariant."); | 1980 assert(n_rem_sets > 0, "Invariant."); |
1959 | 1981 |
1960 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); | 1982 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); |
1961 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC); | 1983 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC); |
1984 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC); | |
1962 | 1985 |
1963 for (int i = 0; i < n_queues; i++) { | 1986 for (int i = 0; i < n_queues; i++) { |
1964 RefToScanQueue* q = new RefToScanQueue(); | 1987 RefToScanQueue* q = new RefToScanQueue(); |
1965 q->initialize(); | 1988 q->initialize(); |
1966 _task_queues->register_queue(i, q); | 1989 _task_queues->register_queue(i, q); |
1967 } | 1990 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo(); |
1968 | 1991 } |
1969 clear_cset_start_regions(); | 1992 clear_cset_start_regions(); |
1970 | 1993 |
1971 // Initialize the G1EvacuationFailureALot counters and flags. | 1994 // Initialize the G1EvacuationFailureALot counters and flags. |
1972 NOT_PRODUCT(reset_evacuation_should_fail();) | 1995 NOT_PRODUCT(reset_evacuation_should_fail();) |
1973 | 1996 |
2023 | 2046 |
2024 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, | 2047 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, |
2025 HeapRegion::GrainBytes); | 2048 HeapRegion::GrainBytes); |
2026 | 2049 |
2027 // It is important to do this in a way such that concurrent readers can't | 2050 // It is important to do this in a way such that concurrent readers can't |
2028 // temporarily think somethings in the heap. (I've actually seen this | 2051 // temporarily think something is in the heap. (I've actually seen this |
2029 // happen in asserts: DLD.) | 2052 // happen in asserts: DLD.) |
2030 _reserved.set_word_size(0); | 2053 _reserved.set_word_size(0); |
2031 _reserved.set_start((HeapWord*)heap_rs.base()); | 2054 _reserved.set_start((HeapWord*)heap_rs.base()); |
2032 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | 2055 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); |
2033 | 2056 |
2460 _old_marking_cycles_completed += 1; | 2483 _old_marking_cycles_completed += 1; |
2461 | 2484 |
2462 // We need to clear the "in_progress" flag in the CM thread before | 2485 // We need to clear the "in_progress" flag in the CM thread before |
2463 // we wake up any waiters (especially when ExplicitInvokesConcurrent | 2486 // we wake up any waiters (especially when ExplicitInvokesConcurrent |
2464 // is set) so that if a waiter requests another System.gc() it doesn't | 2487 // is set) so that if a waiter requests another System.gc() it doesn't |
2465 // incorrectly see that a marking cyle is still in progress. | 2488 // incorrectly see that a marking cycle is still in progress. |
2466 if (concurrent) { | 2489 if (concurrent) { |
2467 _cmThread->clear_in_progress(); | 2490 _cmThread->clear_in_progress(); |
2468 } | 2491 } |
2469 | 2492 |
2470 // This notify_all() will ensure that a thread that called | 2493 // This notify_all() will ensure that a thread that called |
2471 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) | 2494 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) |
2472 // and it's waiting for a full GC to finish will be woken up. It is | 2495 // and it's waiting for a full GC to finish will be woken up. It is |
2473 // waiting in VM_G1IncCollectionPause::doit_epilogue(). | 2496 // waiting in VM_G1IncCollectionPause::doit_epilogue(). |
2474 FullGCCount_lock->notify_all(); | 2497 FullGCCount_lock->notify_all(); |
2498 } | |
2499 | |
2500 void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) { | |
2501 _concurrent_cycle_started = true; | |
2502 _gc_timer_cm->register_gc_start(start_time); | |
2503 | |
2504 _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start()); | |
2505 trace_heap_before_gc(_gc_tracer_cm); | |
2506 } | |
2507 | |
2508 void G1CollectedHeap::register_concurrent_cycle_end() { | |
2509 if (_concurrent_cycle_started) { | |
2510 _gc_timer_cm->register_gc_end(os::elapsed_counter()); | |
2511 | |
2512 if (_cm->has_aborted()) { | |
2513 _gc_tracer_cm->report_concurrent_mode_failure(); | |
2514 } | |
2515 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); | |
2516 | |
2517 _concurrent_cycle_started = false; | |
2518 } | |
2519 } | |
2520 | |
2521 void G1CollectedHeap::trace_heap_after_concurrent_cycle() { | |
2522 if (_concurrent_cycle_started) { | |
2523 trace_heap_after_gc(_gc_tracer_cm); | |
2524 } | |
2525 } | |
2526 | |
2527 G1YCType G1CollectedHeap::yc_type() { | |
2528 bool is_young = g1_policy()->gcs_are_young(); | |
2529 bool is_initial_mark = g1_policy()->during_initial_mark_pause(); | |
2530 bool is_during_mark = mark_in_progress(); | |
2531 | |
2532 if (is_initial_mark) { | |
2533 return InitialMark; | |
2534 } else if (is_during_mark) { | |
2535 return DuringMark; | |
2536 } else if (is_young) { | |
2537 return Normal; | |
2538 } else { | |
2539 return Mixed; | |
2540 } | |
2475 } | 2541 } |
2476 | 2542 |
2477 void G1CollectedHeap::collect(GCCause::Cause cause) { | 2543 void G1CollectedHeap::collect(GCCause::Cause cause) { |
2478 assert_heap_not_locked(); | 2544 assert_heap_not_locked(); |
2479 | 2545 |
2674 if (chr->claim_value() == claim_value || | 2740 if (chr->claim_value() == claim_value || |
2675 !chr->continuesHumongous()) { | 2741 !chr->continuesHumongous()) { |
2676 break; | 2742 break; |
2677 } | 2743 } |
2678 | 2744 |
2679 // Noone should have claimed it directly. We can given | 2745 // No one should have claimed it directly. We can given |
2680 // that we claimed its "starts humongous" region. | 2746 // that we claimed its "starts humongous" region. |
2681 assert(chr->claim_value() != claim_value, "sanity"); | 2747 assert(chr->claim_value() != claim_value, "sanity"); |
2682 assert(chr->humongous_start_region() == r, "sanity"); | 2748 assert(chr->humongous_start_region() == r, "sanity"); |
2683 | 2749 |
2684 if (chr->claimHeapRegion(claim_value)) { | 2750 if (chr->claimHeapRegion(claim_value)) { |
2685 // we should always be able to claim it; noone else should | 2751 // we should always be able to claim it; no one else should |
2686 // be trying to claim this region | 2752 // be trying to claim this region |
2687 | 2753 |
2688 bool res2 = cl->doHeapRegion(chr); | 2754 bool res2 = cl->doHeapRegion(chr); |
2689 assert(!res2, "Should not abort"); | 2755 assert(!res2, "Should not abort"); |
2690 | 2756 |
2974 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | 3040 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { |
2975 // Return the remaining space in the cur alloc region, but not less than | 3041 // Return the remaining space in the cur alloc region, but not less than |
2976 // the min TLAB size. | 3042 // the min TLAB size. |
2977 | 3043 |
2978 // Also, this value can be at most the humongous object threshold, | 3044 // Also, this value can be at most the humongous object threshold, |
2979 // since we can't allow tlabs to grow big enough to accomodate | 3045 // since we can't allow tlabs to grow big enough to accommodate |
2980 // humongous objects. | 3046 // humongous objects. |
2981 | 3047 |
2982 HeapRegion* hr = _mutator_alloc_region.get(); | 3048 HeapRegion* hr = _mutator_alloc_region.get(); |
2983 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; | 3049 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; |
2984 if (hr == NULL) { | 3050 if (hr == NULL) { |
3741 | 3807 |
3742 if (GC_locker::check_active_before_gc()) { | 3808 if (GC_locker::check_active_before_gc()) { |
3743 return false; | 3809 return false; |
3744 } | 3810 } |
3745 | 3811 |
3812 _gc_timer_stw->register_gc_start(os::elapsed_counter()); | |
3813 | |
3814 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start()); | |
3815 | |
3746 SvcGCMarker sgcm(SvcGCMarker::MINOR); | 3816 SvcGCMarker sgcm(SvcGCMarker::MINOR); |
3747 ResourceMark rm; | 3817 ResourceMark rm; |
3748 | 3818 |
3749 print_heap_before_gc(); | 3819 print_heap_before_gc(); |
3820 trace_heap_before_gc(_gc_tracer_stw); | |
3750 | 3821 |
3751 HRSPhaseSetter x(HRSPhaseEvacuation); | 3822 HRSPhaseSetter x(HRSPhaseEvacuation); |
3752 verify_region_sets_optional(); | 3823 verify_region_sets_optional(); |
3753 verify_dirty_young_regions(); | 3824 verify_dirty_young_regions(); |
3754 | 3825 |
3769 // the CM thread, the flag's value in the policy has been reset. | 3840 // the CM thread, the flag's value in the policy has been reset. |
3770 bool should_start_conc_mark = g1_policy()->during_initial_mark_pause(); | 3841 bool should_start_conc_mark = g1_policy()->during_initial_mark_pause(); |
3771 | 3842 |
3772 // Inner scope for scope based logging, timers, and stats collection | 3843 // Inner scope for scope based logging, timers, and stats collection |
3773 { | 3844 { |
3845 EvacuationInfo evacuation_info; | |
3846 | |
3774 if (g1_policy()->during_initial_mark_pause()) { | 3847 if (g1_policy()->during_initial_mark_pause()) { |
3775 // We are about to start a marking cycle, so we increment the | 3848 // We are about to start a marking cycle, so we increment the |
3776 // full collection counter. | 3849 // full collection counter. |
3777 increment_old_marking_cycles_started(); | 3850 increment_old_marking_cycles_started(); |
3778 } | 3851 register_concurrent_cycle_start(_gc_timer_stw->gc_start()); |
3852 } | |
3853 | |
3854 _gc_tracer_stw->report_yc_type(yc_type()); | |
3855 | |
3779 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); | 3856 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); |
3780 | 3857 |
3781 int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? | 3858 int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? |
3782 workers()->active_workers() : 1); | 3859 workers()->active_workers() : 1); |
3783 double pause_start_sec = os::elapsedTime(); | 3860 double pause_start_sec = os::elapsedTime(); |
3883 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); | 3960 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
3884 _young_list->print(); | 3961 _young_list->print(); |
3885 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); | 3962 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
3886 #endif // YOUNG_LIST_VERBOSE | 3963 #endif // YOUNG_LIST_VERBOSE |
3887 | 3964 |
3888 g1_policy()->finalize_cset(target_pause_time_ms); | 3965 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info); |
3889 | 3966 |
3890 _cm->note_start_of_gc(); | 3967 _cm->note_start_of_gc(); |
3891 // We should not verify the per-thread SATB buffers given that | 3968 // We should not verify the per-thread SATB buffers given that |
3892 // we have not filtered them yet (we'll do so during the | 3969 // we have not filtered them yet (we'll do so during the |
3893 // GC). We also call this after finalize_cset() to | 3970 // GC). We also call this after finalize_cset() to |
3919 #endif // ASSERT | 3996 #endif // ASSERT |
3920 | 3997 |
3921 setup_surviving_young_words(); | 3998 setup_surviving_young_words(); |
3922 | 3999 |
3923 // Initialize the GC alloc regions. | 4000 // Initialize the GC alloc regions. |
3924 init_gc_alloc_regions(); | 4001 init_gc_alloc_regions(evacuation_info); |
3925 | 4002 |
3926 // Actually do the work... | 4003 // Actually do the work... |
3927 evacuate_collection_set(); | 4004 evacuate_collection_set(evacuation_info); |
3928 | 4005 |
3929 // We do this to mainly verify the per-thread SATB buffers | 4006 // We do this to mainly verify the per-thread SATB buffers |
3930 // (which have been filtered by now) since we didn't verify | 4007 // (which have been filtered by now) since we didn't verify |
3931 // them earlier. No point in re-checking the stacks / enqueued | 4008 // them earlier. No point in re-checking the stacks / enqueued |
3932 // buffers given that the CSet has not changed since last time | 4009 // buffers given that the CSet has not changed since last time |
3934 _cm->verify_no_cset_oops(false /* verify_stacks */, | 4011 _cm->verify_no_cset_oops(false /* verify_stacks */, |
3935 false /* verify_enqueued_buffers */, | 4012 false /* verify_enqueued_buffers */, |
3936 true /* verify_thread_buffers */, | 4013 true /* verify_thread_buffers */, |
3937 true /* verify_fingers */); | 4014 true /* verify_fingers */); |
3938 | 4015 |
3939 free_collection_set(g1_policy()->collection_set()); | 4016 free_collection_set(g1_policy()->collection_set(), evacuation_info); |
3940 g1_policy()->clear_collection_set(); | 4017 g1_policy()->clear_collection_set(); |
3941 | 4018 |
3942 cleanup_surviving_young_words(); | 4019 cleanup_surviving_young_words(); |
3943 | 4020 |
3944 // Start a new incremental collection set for the next pause. | 4021 // Start a new incremental collection set for the next pause. |
3962 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); | 4039 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
3963 _young_list->print(); | 4040 _young_list->print(); |
3964 #endif // YOUNG_LIST_VERBOSE | 4041 #endif // YOUNG_LIST_VERBOSE |
3965 | 4042 |
3966 g1_policy()->record_survivor_regions(_young_list->survivor_length(), | 4043 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
3967 _young_list->first_survivor_region(), | 4044 _young_list->first_survivor_region(), |
3968 _young_list->last_survivor_region()); | 4045 _young_list->last_survivor_region()); |
3969 | 4046 |
3970 _young_list->reset_auxilary_lists(); | 4047 _young_list->reset_auxilary_lists(); |
3971 | 4048 |
3972 if (evacuation_failed()) { | 4049 if (evacuation_failed()) { |
3973 _summary_bytes_used = recalculate_used(); | 4050 _summary_bytes_used = recalculate_used(); |
4051 uint n_queues = MAX2((int)ParallelGCThreads, 1); | |
4052 for (uint i = 0; i < n_queues; i++) { | |
4053 if (_evacuation_failed_info_array[i].has_failed()) { | |
4054 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); | |
4055 } | |
4056 } | |
3974 } else { | 4057 } else { |
3975 // The "used" of the the collection set have already been subtracted | 4058 // The "used" of the the collection set have already been subtracted |
3976 // when they were freed. Add in the bytes evacuated. | 4059 // when they were freed. Add in the bytes evacuated. |
3977 _summary_bytes_used += g1_policy()->bytes_copied_during_gc(); | 4060 _summary_bytes_used += g1_policy()->bytes_copied_during_gc(); |
3978 } | 4061 } |
4011 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); | 4094 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); |
4012 } | 4095 } |
4013 } | 4096 } |
4014 } | 4097 } |
4015 | 4098 |
4016 // We redo the verificaiton but now wrt to the new CSet which | 4099 // We redo the verification but now wrt to the new CSet which |
4017 // has just got initialized after the previous CSet was freed. | 4100 // has just got initialized after the previous CSet was freed. |
4018 _cm->verify_no_cset_oops(true /* verify_stacks */, | 4101 _cm->verify_no_cset_oops(true /* verify_stacks */, |
4019 true /* verify_enqueued_buffers */, | 4102 true /* verify_enqueued_buffers */, |
4020 true /* verify_thread_buffers */, | 4103 true /* verify_thread_buffers */, |
4021 true /* verify_fingers */); | 4104 true /* verify_fingers */); |
4024 // This timing is only used by the ergonomics to handle our pause target. | 4107 // This timing is only used by the ergonomics to handle our pause target. |
4025 // It is unclear why this should not include the full pause. We will | 4108 // It is unclear why this should not include the full pause. We will |
4026 // investigate this in CR 7178365. | 4109 // investigate this in CR 7178365. |
4027 double sample_end_time_sec = os::elapsedTime(); | 4110 double sample_end_time_sec = os::elapsedTime(); |
4028 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; | 4111 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; |
4029 g1_policy()->record_collection_pause_end(pause_time_ms); | 4112 g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info); |
4030 | 4113 |
4031 MemoryService::track_memory_usage(); | 4114 MemoryService::track_memory_usage(); |
4032 | 4115 |
4033 // In prepare_for_verify() below we'll need to scan the deferred | 4116 // In prepare_for_verify() below we'll need to scan the deferred |
4034 // update buffers to bring the RSets up-to-date if | 4117 // update buffers to bring the RSets up-to-date if |
4091 | 4174 |
4092 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); | 4175 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); |
4093 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); | 4176 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); |
4094 | 4177 |
4095 print_heap_after_gc(); | 4178 print_heap_after_gc(); |
4179 trace_heap_after_gc(_gc_tracer_stw); | |
4096 | 4180 |
4097 // We must call G1MonitoringSupport::update_sizes() in the same scoping level | 4181 // We must call G1MonitoringSupport::update_sizes() in the same scoping level |
4098 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the | 4182 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the |
4099 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated | 4183 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated |
4100 // before any GC notifications are raised. | 4184 // before any GC notifications are raised. |
4101 g1mm()->update_sizes(); | 4185 g1mm()->update_sizes(); |
4102 } | 4186 |
4103 | 4187 _gc_tracer_stw->report_evacuation_info(&evacuation_info); |
4188 _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold()); | |
4189 _gc_timer_stw->register_gc_end(os::elapsed_counter()); | |
4190 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions()); | |
4191 } | |
4104 // It should now be safe to tell the concurrent mark thread to start | 4192 // It should now be safe to tell the concurrent mark thread to start |
4105 // without its logging output interfering with the logging output | 4193 // without its logging output interfering with the logging output |
4106 // that came from the pause. | 4194 // that came from the pause. |
4107 | 4195 |
4108 if (should_start_conc_mark) { | 4196 if (should_start_conc_mark) { |
4150 void G1CollectedHeap::release_mutator_alloc_region() { | 4238 void G1CollectedHeap::release_mutator_alloc_region() { |
4151 _mutator_alloc_region.release(); | 4239 _mutator_alloc_region.release(); |
4152 assert(_mutator_alloc_region.get() == NULL, "post-condition"); | 4240 assert(_mutator_alloc_region.get() == NULL, "post-condition"); |
4153 } | 4241 } |
4154 | 4242 |
4155 void G1CollectedHeap::init_gc_alloc_regions() { | 4243 void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { |
4156 assert_at_safepoint(true /* should_be_vm_thread */); | 4244 assert_at_safepoint(true /* should_be_vm_thread */); |
4157 | 4245 |
4158 _survivor_gc_alloc_region.init(); | 4246 _survivor_gc_alloc_region.init(); |
4159 _old_gc_alloc_region.init(); | 4247 _old_gc_alloc_region.init(); |
4160 HeapRegion* retained_region = _retained_old_gc_alloc_region; | 4248 HeapRegion* retained_region = _retained_old_gc_alloc_region; |
4165 // b) it's already full (no point in using it), | 4253 // b) it's already full (no point in using it), |
4166 // c) it's empty (this means that it was emptied during | 4254 // c) it's empty (this means that it was emptied during |
4167 // a cleanup and it should be on the free list now), or | 4255 // a cleanup and it should be on the free list now), or |
4168 // d) it's humongous (this means that it was emptied | 4256 // d) it's humongous (this means that it was emptied |
4169 // during a cleanup and was added to the free list, but | 4257 // during a cleanup and was added to the free list, but |
4170 // has been subseqently used to allocate a humongous | 4258 // has been subsequently used to allocate a humongous |
4171 // object that may be less than the region size). | 4259 // object that may be less than the region size). |
4172 if (retained_region != NULL && | 4260 if (retained_region != NULL && |
4173 !retained_region->in_collection_set() && | 4261 !retained_region->in_collection_set() && |
4174 !(retained_region->top() == retained_region->end()) && | 4262 !(retained_region->top() == retained_region->end()) && |
4175 !retained_region->is_empty() && | 4263 !retained_region->is_empty() && |
4182 _old_set.remove(retained_region); | 4270 _old_set.remove(retained_region); |
4183 bool during_im = g1_policy()->during_initial_mark_pause(); | 4271 bool during_im = g1_policy()->during_initial_mark_pause(); |
4184 retained_region->note_start_of_copying(during_im); | 4272 retained_region->note_start_of_copying(during_im); |
4185 _old_gc_alloc_region.set(retained_region); | 4273 _old_gc_alloc_region.set(retained_region); |
4186 _hr_printer.reuse(retained_region); | 4274 _hr_printer.reuse(retained_region); |
4187 } | 4275 evacuation_info.set_alloc_regions_used_before(retained_region->used()); |
4188 } | 4276 } |
4189 | 4277 } |
4190 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) { | 4278 |
4279 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) { | |
4280 evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() + | |
4281 _old_gc_alloc_region.count()); | |
4191 _survivor_gc_alloc_region.release(); | 4282 _survivor_gc_alloc_region.release(); |
4192 // If we have an old GC alloc region to release, we'll save it in | 4283 // If we have an old GC alloc region to release, we'll save it in |
4193 // _retained_old_gc_alloc_region. If we don't | 4284 // _retained_old_gc_alloc_region. If we don't |
4194 // _retained_old_gc_alloc_region will become NULL. This is what we | 4285 // _retained_old_gc_alloc_region will become NULL. This is what we |
4195 // want either way so no reason to check explicitly for either | 4286 // want either way so no reason to check explicitly for either |
4268 obj->oop_iterate_backwards(_evac_failure_closure); | 4359 obj->oop_iterate_backwards(_evac_failure_closure); |
4269 } | 4360 } |
4270 } | 4361 } |
4271 | 4362 |
4272 oop | 4363 oop |
4273 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | 4364 G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, |
4274 oop old) { | 4365 oop old) { |
4275 assert(obj_in_cs(old), | 4366 assert(obj_in_cs(old), |
4276 err_msg("obj: "PTR_FORMAT" should still be in the CSet", | 4367 err_msg("obj: "PTR_FORMAT" should still be in the CSet", |
4277 (HeapWord*) old)); | 4368 (HeapWord*) old)); |
4278 markOop m = old->mark(); | 4369 markOop m = old->mark(); |
4279 oop forward_ptr = old->forward_to_atomic(old); | 4370 oop forward_ptr = old->forward_to_atomic(old); |
4280 if (forward_ptr == NULL) { | 4371 if (forward_ptr == NULL) { |
4281 // Forward-to-self succeeded. | 4372 // Forward-to-self succeeded. |
4282 | 4373 assert(_par_scan_state != NULL, "par scan state"); |
4374 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
4375 uint queue_num = _par_scan_state->queue_num(); | |
4376 | |
4377 _evacuation_failed = true; | |
4378 _evacuation_failed_info_array[queue_num].register_copy_failure(old->size()); | |
4283 if (_evac_failure_closure != cl) { | 4379 if (_evac_failure_closure != cl) { |
4284 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | 4380 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); |
4285 assert(!_drain_in_progress, | 4381 assert(!_drain_in_progress, |
4286 "Should only be true while someone holds the lock."); | 4382 "Should only be true while someone holds the lock."); |
4287 // Set the global evac-failure closure to the current thread's. | 4383 // Set the global evac-failure closure to the current thread's. |
4308 return forward_ptr; | 4404 return forward_ptr; |
4309 } | 4405 } |
4310 } | 4406 } |
4311 | 4407 |
4312 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | 4408 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { |
4313 set_evacuation_failed(true); | |
4314 | |
4315 preserve_mark_if_necessary(old, m); | 4409 preserve_mark_if_necessary(old, m); |
4316 | 4410 |
4317 HeapRegion* r = heap_region_containing(old); | 4411 HeapRegion* r = heap_region_containing(old); |
4318 if (!r->evacuation_failed()) { | 4412 if (!r->evacuation_failed()) { |
4319 r->set_evacuation_failed(true); | 4413 r->set_evacuation_failed(true); |
4559 #endif // !PRODUCT | 4653 #endif // !PRODUCT |
4560 | 4654 |
4561 if (obj_ptr == NULL) { | 4655 if (obj_ptr == NULL) { |
4562 // This will either forward-to-self, or detect that someone else has | 4656 // This will either forward-to-self, or detect that someone else has |
4563 // installed a forwarding pointer. | 4657 // installed a forwarding pointer. |
4564 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | 4658 return _g1->handle_evacuation_failure_par(_par_scan_state, old); |
4565 return _g1->handle_evacuation_failure_par(cl, old); | |
4566 } | 4659 } |
4567 | 4660 |
4568 oop obj = oop(obj_ptr); | 4661 oop obj = oop(obj_ptr); |
4569 | 4662 |
4570 // We're going to allocate linearly, so might as well prefetch ahead. | 4663 // We're going to allocate linearly, so might as well prefetch ahead. |
5164 // on the PSS queue. When the queue is drained (after each | 5257 // on the PSS queue. When the queue is drained (after each |
5165 // phase of reference processing) the object and it's followers | 5258 // phase of reference processing) the object and it's followers |
5166 // will be copied, the reference field set to point to the | 5259 // will be copied, the reference field set to point to the |
5167 // new location, and the RSet updated. Otherwise we need to | 5260 // new location, and the RSet updated. Otherwise we need to |
5168 // use the the non-heap or metadata closures directly to copy | 5261 // use the the non-heap or metadata closures directly to copy |
5169 // the refernt object and update the pointer, while avoiding | 5262 // the referent object and update the pointer, while avoiding |
5170 // updating the RSet. | 5263 // updating the RSet. |
5171 | 5264 |
5172 if (_g1h->is_in_g1_reserved(p)) { | 5265 if (_g1h->is_in_g1_reserved(p)) { |
5173 _par_scan_state->push_on_queue(p); | 5266 _par_scan_state->push_on_queue(p); |
5174 } else { | 5267 } else { |
5332 virtual void work(uint worker_id) { | 5425 virtual void work(uint worker_id) { |
5333 _enq_task.work(worker_id); | 5426 _enq_task.work(worker_id); |
5334 } | 5427 } |
5335 }; | 5428 }; |
5336 | 5429 |
5337 // Driver routine for parallel reference enqueing. | 5430 // Driver routine for parallel reference enqueueing. |
5338 // Creates an instance of the ref enqueueing gang | 5431 // Creates an instance of the ref enqueueing gang |
5339 // task and has the worker threads execute it. | 5432 // task and has the worker threads execute it. |
5340 | 5433 |
5341 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) { | 5434 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) { |
5342 assert(_workers != NULL, "Need parallel worker threads."); | 5435 assert(_workers != NULL, "Need parallel worker threads."); |
5461 // But some of the referents, that are in the collection set, that these | 5554 // But some of the referents, that are in the collection set, that these |
5462 // reference objects point to may not have been copied: the STW ref | 5555 // reference objects point to may not have been copied: the STW ref |
5463 // processor would have seen that the reference object had already | 5556 // processor would have seen that the reference object had already |
5464 // been 'discovered' and would have skipped discovering the reference, | 5557 // been 'discovered' and would have skipped discovering the reference, |
5465 // but would not have treated the reference object as a regular oop. | 5558 // but would not have treated the reference object as a regular oop. |
5466 // As a reult the copy closure would not have been applied to the | 5559 // As a result the copy closure would not have been applied to the |
5467 // referent object. | 5560 // referent object. |
5468 // | 5561 // |
5469 // We need to explicitly copy these referent objects - the references | 5562 // We need to explicitly copy these referent objects - the references |
5470 // will be processed at the end of remarking. | 5563 // will be processed at the end of remarking. |
5471 // | 5564 // |
5537 G1STWDrainQueueClosure drain_queue(this, &pss); | 5630 G1STWDrainQueueClosure drain_queue(this, &pss); |
5538 | 5631 |
5539 // Setup the soft refs policy... | 5632 // Setup the soft refs policy... |
5540 rp->setup_policy(false); | 5633 rp->setup_policy(false); |
5541 | 5634 |
5635 ReferenceProcessorStats stats; | |
5542 if (!rp->processing_is_mt()) { | 5636 if (!rp->processing_is_mt()) { |
5543 // Serial reference processing... | 5637 // Serial reference processing... |
5544 rp->process_discovered_references(&is_alive, | 5638 stats = rp->process_discovered_references(&is_alive, |
5545 &keep_alive, | 5639 &keep_alive, |
5546 &drain_queue, | 5640 &drain_queue, |
5547 NULL); | 5641 NULL, |
5642 _gc_timer_stw); | |
5548 } else { | 5643 } else { |
5549 // Parallel reference processing | 5644 // Parallel reference processing |
5550 assert(rp->num_q() == no_of_gc_workers, "sanity"); | 5645 assert(rp->num_q() == no_of_gc_workers, "sanity"); |
5551 assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); | 5646 assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); |
5552 | 5647 |
5553 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers); | 5648 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers); |
5554 rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor); | 5649 stats = rp->process_discovered_references(&is_alive, |
5555 } | 5650 &keep_alive, |
5556 | 5651 &drain_queue, |
5652 &par_task_executor, | |
5653 _gc_timer_stw); | |
5654 } | |
5655 | |
5656 _gc_tracer_stw->report_gc_reference_stats(stats); | |
5557 // We have completed copying any necessary live referent objects | 5657 // We have completed copying any necessary live referent objects |
5558 // (that were not copied during the actual pause) so we can | 5658 // (that were not copied during the actual pause) so we can |
5559 // retire any active alloc buffers | 5659 // retire any active alloc buffers |
5560 pss.retire_alloc_buffers(); | 5660 pss.retire_alloc_buffers(); |
5561 assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); | 5661 assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); |
5575 // the pending list. | 5675 // the pending list. |
5576 if (!rp->processing_is_mt()) { | 5676 if (!rp->processing_is_mt()) { |
5577 // Serial reference processing... | 5677 // Serial reference processing... |
5578 rp->enqueue_discovered_references(); | 5678 rp->enqueue_discovered_references(); |
5579 } else { | 5679 } else { |
5580 // Parallel reference enqueuing | 5680 // Parallel reference enqueueing |
5581 | 5681 |
5582 assert(no_of_gc_workers == workers()->active_workers(), | 5682 assert(no_of_gc_workers == workers()->active_workers(), |
5583 "Need to reset active workers"); | 5683 "Need to reset active workers"); |
5584 assert(rp->num_q() == no_of_gc_workers, "sanity"); | 5684 assert(rp->num_q() == no_of_gc_workers, "sanity"); |
5585 assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); | 5685 assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); |
5592 assert(!rp->discovery_enabled(), "should have been disabled"); | 5692 assert(!rp->discovery_enabled(), "should have been disabled"); |
5593 | 5693 |
5594 // FIXME | 5694 // FIXME |
5595 // CM's reference processing also cleans up the string and symbol tables. | 5695 // CM's reference processing also cleans up the string and symbol tables. |
5596 // Should we do that here also? We could, but it is a serial operation | 5696 // Should we do that here also? We could, but it is a serial operation |
5597 // and could signicantly increase the pause time. | 5697 // and could significantly increase the pause time. |
5598 | 5698 |
5599 double ref_enq_time = os::elapsedTime() - ref_enq_start; | 5699 double ref_enq_time = os::elapsedTime() - ref_enq_start; |
5600 g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0); | 5700 g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0); |
5601 } | 5701 } |
5602 | 5702 |
5603 void G1CollectedHeap::evacuate_collection_set() { | 5703 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) { |
5604 _expand_heap_after_alloc_failure = true; | 5704 _expand_heap_after_alloc_failure = true; |
5605 set_evacuation_failed(false); | 5705 _evacuation_failed = false; |
5606 | 5706 |
5607 // Should G1EvacuationFailureALot be in effect for this GC? | 5707 // Should G1EvacuationFailureALot be in effect for this GC? |
5608 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();) | 5708 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();) |
5609 | 5709 |
5610 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | 5710 g1_rem_set()->prepare_for_oops_into_collection_set_do(); |
5689 G1STWIsAliveClosure is_alive(this); | 5789 G1STWIsAliveClosure is_alive(this); |
5690 G1KeepAliveClosure keep_alive(this); | 5790 G1KeepAliveClosure keep_alive(this); |
5691 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | 5791 JNIHandles::weak_oops_do(&is_alive, &keep_alive); |
5692 } | 5792 } |
5693 | 5793 |
5694 release_gc_alloc_regions(n_workers); | 5794 release_gc_alloc_regions(n_workers, evacuation_info); |
5695 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); | 5795 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
5696 | 5796 |
5697 // Reset and re-enable the hot card cache. | 5797 // Reset and re-enable the hot card cache. |
5698 // Note the counts for the cards in the regions in the | 5798 // Note the counts for the cards in the regions in the |
5699 // collection set are reset when the collection set is freed. | 5799 // collection set are reset when the collection set is freed. |
5712 } | 5812 } |
5713 | 5813 |
5714 // Enqueue any remaining references remaining on the STW | 5814 // Enqueue any remaining references remaining on the STW |
5715 // reference processor's discovered lists. We need to do | 5815 // reference processor's discovered lists. We need to do |
5716 // this after the card table is cleaned (and verified) as | 5816 // this after the card table is cleaned (and verified) as |
5717 // the act of enqueuing entries on to the pending list | 5817 // the act of enqueueing entries on to the pending list |
5718 // will log these updates (and dirty their associated | 5818 // will log these updates (and dirty their associated |
5719 // cards). We need these updates logged to update any | 5819 // cards). We need these updates logged to update any |
5720 // RSets. | 5820 // RSets. |
5721 enqueue_discovered_references(n_workers); | 5821 enqueue_discovered_references(n_workers); |
5722 | 5822 |
5940 | 6040 |
5941 double elapsed = os::elapsedTime() - start; | 6041 double elapsed = os::elapsedTime() - start; |
5942 g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0); | 6042 g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0); |
5943 } | 6043 } |
5944 | 6044 |
5945 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | 6045 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) { |
5946 size_t pre_used = 0; | 6046 size_t pre_used = 0; |
5947 FreeRegionList local_free_list("Local List for CSet Freeing"); | 6047 FreeRegionList local_free_list("Local List for CSet Freeing"); |
5948 | 6048 |
5949 double young_time_ms = 0.0; | 6049 double young_time_ms = 0.0; |
5950 double non_young_time_ms = 0.0; | 6050 double non_young_time_ms = 0.0; |
6026 } | 6126 } |
6027 cur->set_not_young(); | 6127 cur->set_not_young(); |
6028 cur->set_evacuation_failed(false); | 6128 cur->set_evacuation_failed(false); |
6029 // The region is now considered to be old. | 6129 // The region is now considered to be old. |
6030 _old_set.add(cur); | 6130 _old_set.add(cur); |
6131 evacuation_info.increment_collectionset_used_after(cur->used()); | |
6031 } | 6132 } |
6032 cur = next; | 6133 cur = next; |
6033 } | 6134 } |
6034 | 6135 |
6136 evacuation_info.set_regions_freed(local_free_list.length()); | |
6035 policy->record_max_rs_lengths(rs_lengths); | 6137 policy->record_max_rs_lengths(rs_lengths); |
6036 policy->cset_regions_freed(); | 6138 policy->cset_regions_freed(); |
6037 | 6139 |
6038 double end_sec = os::elapsedTime(); | 6140 double end_sec = os::elapsedTime(); |
6039 double elapsed_ms = (end_sec - start_sec) * 1000.0; | 6141 double elapsed_ms = (end_sec - start_sec) * 1000.0; |