comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 12355:cefad50507d8

Merge with hs25-b53
author Gilles Duboscq <duboscq@ssw.jku.at>
date Fri, 11 Oct 2013 10:38:03 +0200
parents d0aeaf72c7bd 798522662fcd
children 096c224171c4
comparison
equal deleted inserted replaced
12058:ccb4f2af2319 12355:cefad50507d8
21 * questions. 21 * questions.
22 * 22 *
23 */ 23 */
24 24
25 #include "precompiled.hpp" 25 #include "precompiled.hpp"
26 #include "code/codeCache.hpp"
26 #include "code/icBuffer.hpp" 27 #include "code/icBuffer.hpp"
27 #include "gc_implementation/g1/bufferingOopClosure.hpp" 28 #include "gc_implementation/g1/bufferingOopClosure.hpp"
28 #include "gc_implementation/g1/concurrentG1Refine.hpp" 29 #include "gc_implementation/g1/concurrentG1Refine.hpp"
29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp" 30 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 31 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
122 G1CollectedHeap* _g1h; 123 G1CollectedHeap* _g1h;
123 CardTableModRefBS* _ctbs; 124 CardTableModRefBS* _ctbs;
124 int _histo[256]; 125 int _histo[256];
125 public: 126 public:
126 ClearLoggedCardTableEntryClosure() : 127 ClearLoggedCardTableEntryClosure() :
127 _calls(0) 128 _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set())
128 { 129 {
129 _g1h = G1CollectedHeap::heap();
130 _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
131 for (int i = 0; i < 256; i++) _histo[i] = 0; 130 for (int i = 0; i < 256; i++) _histo[i] = 0;
132 } 131 }
133 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 132 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
134 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { 133 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
135 _calls++; 134 _calls++;
155 int _calls; 154 int _calls;
156 G1CollectedHeap* _g1h; 155 G1CollectedHeap* _g1h;
157 CardTableModRefBS* _ctbs; 156 CardTableModRefBS* _ctbs;
158 public: 157 public:
159 RedirtyLoggedCardTableEntryClosure() : 158 RedirtyLoggedCardTableEntryClosure() :
160 _calls(0) 159 _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
161 { 160
162 _g1h = G1CollectedHeap::heap();
163 _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
164 }
165 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 161 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
166 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { 162 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
167 _calls++; 163 _calls++;
168 *card_ptr = 0; 164 *card_ptr = 0;
169 } 165 }
475 } 471 }
476 } 472 }
477 473
478 void G1CollectedHeap::check_ct_logs_at_safepoint() { 474 void G1CollectedHeap::check_ct_logs_at_safepoint() {
479 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 475 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
480 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); 476 CardTableModRefBS* ct_bs = g1_barrier_set();
481 477
482 // Count the dirty cards at the start. 478 // Count the dirty cards at the start.
483 CountNonCleanMemRegionClosure count1(this); 479 CountNonCleanMemRegionClosure count1(this);
484 ct_bs->mod_card_iterate(&count1); 480 ct_bs->mod_card_iterate(&count1);
485 int orig_count = count1.n(); 481 int orig_count = count1.n();
978 } 974 }
979 } 975 }
980 976
981 if (should_try_gc) { 977 if (should_try_gc) {
982 bool succeeded; 978 bool succeeded;
983 result = do_collection_pause(word_size, gc_count_before, &succeeded); 979 result = do_collection_pause(word_size, gc_count_before, &succeeded,
980 GCCause::_g1_inc_collection_pause);
984 if (result != NULL) { 981 if (result != NULL) {
985 assert(succeeded, "only way to get back a non-NULL result"); 982 assert(succeeded, "only way to get back a non-NULL result");
986 return result; 983 return result;
987 } 984 }
988 985
1103 // If we failed to allocate the humongous object, we should try to 1100 // If we failed to allocate the humongous object, we should try to
1104 // do a collection pause (if we're allowed) in case it reclaims 1101 // do a collection pause (if we're allowed) in case it reclaims
1105 // enough space for the allocation to succeed after the pause. 1102 // enough space for the allocation to succeed after the pause.
1106 1103
1107 bool succeeded; 1104 bool succeeded;
1108 result = do_collection_pause(word_size, gc_count_before, &succeeded); 1105 result = do_collection_pause(word_size, gc_count_before, &succeeded,
1106 GCCause::_g1_humongous_allocation);
1109 if (result != NULL) { 1107 if (result != NULL) {
1110 assert(succeeded, "only way to get back a non-NULL result"); 1108 assert(succeeded, "only way to get back a non-NULL result");
1111 return result; 1109 return result;
1112 } 1110 }
1113 1111
1174 class PostMCRemSetClearClosure: public HeapRegionClosure { 1172 class PostMCRemSetClearClosure: public HeapRegionClosure {
1175 G1CollectedHeap* _g1h; 1173 G1CollectedHeap* _g1h;
1176 ModRefBarrierSet* _mr_bs; 1174 ModRefBarrierSet* _mr_bs;
1177 public: 1175 public:
1178 PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) : 1176 PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1179 _g1h(g1h), _mr_bs(mr_bs) { } 1177 _g1h(g1h), _mr_bs(mr_bs) {}
1178
1180 bool doHeapRegion(HeapRegion* r) { 1179 bool doHeapRegion(HeapRegion* r) {
1180 HeapRegionRemSet* hrrs = r->rem_set();
1181
1181 if (r->continuesHumongous()) { 1182 if (r->continuesHumongous()) {
1183 // We'll assert that the strong code root list and RSet is empty
1184 assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
1185 assert(hrrs->occupied() == 0, "RSet should be empty");
1182 return false; 1186 return false;
1183 } 1187 }
1188
1184 _g1h->reset_gc_time_stamps(r); 1189 _g1h->reset_gc_time_stamps(r);
1185 HeapRegionRemSet* hrrs = r->rem_set(); 1190 hrrs->clear();
1186 if (hrrs != NULL) hrrs->clear();
1187 // You might think here that we could clear just the cards 1191 // You might think here that we could clear just the cards
1188 // corresponding to the used region. But no: if we leave a dirty card 1192 // corresponding to the used region. But no: if we leave a dirty card
1189 // in a region we might allocate into, then it would prevent that card 1193 // in a region we might allocate into, then it would prevent that card
1190 // from being enqueued, and cause it to be missed. 1194 // from being enqueued, and cause it to be missed.
1191 // Re: the performance cost: we shouldn't be doing full GC anyway! 1195 // Re: the performance cost: we shouldn't be doing full GC anyway!
1192 _mr_bs->clear(MemRegion(r->bottom(), r->end())); 1196 _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1197
1193 return false; 1198 return false;
1194 } 1199 }
1195 }; 1200 };
1196 1201
1197 void G1CollectedHeap::clear_rsets_post_compaction() { 1202 void G1CollectedHeap::clear_rsets_post_compaction() {
1198 PostMCRemSetClearClosure rs_clear(this, mr_bs()); 1203 PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
1199 heap_region_iterate(&rs_clear); 1204 heap_region_iterate(&rs_clear);
1200 } 1205 }
1201 1206
1202 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { 1207 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1203 G1CollectedHeap* _g1h; 1208 G1CollectedHeap* _g1h;
1267 void G1CollectedHeap::print_hrs_post_compaction() { 1272 void G1CollectedHeap::print_hrs_post_compaction() {
1268 PostCompactionPrinterClosure cl(hr_printer()); 1273 PostCompactionPrinterClosure cl(hr_printer());
1269 heap_region_iterate(&cl); 1274 heap_region_iterate(&cl);
1270 } 1275 }
1271 1276
1272 double G1CollectedHeap::verify(bool guard, const char* msg) {
1273 double verify_time_ms = 0.0;
1274
1275 if (guard && total_collections() >= VerifyGCStartAt) {
1276 double verify_start = os::elapsedTime();
1277 HandleMark hm; // Discard invalid handles created during verification
1278 prepare_for_verify();
1279 Universe::verify(VerifyOption_G1UsePrevMarking, msg);
1280 verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
1281 }
1282
1283 return verify_time_ms;
1284 }
1285
1286 void G1CollectedHeap::verify_before_gc() {
1287 double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
1288 g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
1289 }
1290
1291 void G1CollectedHeap::verify_after_gc() {
1292 double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
1293 g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
1294 }
1295
1296 bool G1CollectedHeap::do_collection(bool explicit_gc, 1277 bool G1CollectedHeap::do_collection(bool explicit_gc,
1297 bool clear_all_soft_refs, 1278 bool clear_all_soft_refs,
1298 size_t word_size) { 1279 size_t word_size) {
1299 assert_at_safepoint(true /* should_be_vm_thread */); 1280 assert_at_safepoint(true /* should_be_vm_thread */);
1300 1281
1431 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); 1412 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1432 ref_processor_stw()->verify_no_references_recorded(); 1413 ref_processor_stw()->verify_no_references_recorded();
1433 1414
1434 // Delete metaspaces for unloaded class loaders and clean up loader_data graph 1415 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1435 ClassLoaderDataGraph::purge(); 1416 ClassLoaderDataGraph::purge();
1436 MetaspaceAux::verify_metrics(); 1417 MetaspaceAux::verify_metrics();
1437 1418
1438 // Note: since we've just done a full GC, concurrent 1419 // Note: since we've just done a full GC, concurrent
1439 // marking is no longer active. Therefore we need not 1420 // marking is no longer active. Therefore we need not
1440 // re-enable reference discovery for the CM ref processor. 1421 // re-enable reference discovery for the CM ref processor.
1441 // That will be done at the start of the next marking cycle. 1422 // That will be done at the start of the next marking cycle.
1502 } else { 1483 } else {
1503 RebuildRSOutOfRegionClosure rebuild_rs(this); 1484 RebuildRSOutOfRegionClosure rebuild_rs(this);
1504 heap_region_iterate(&rebuild_rs); 1485 heap_region_iterate(&rebuild_rs);
1505 } 1486 }
1506 1487
1488 // Rebuild the strong code root lists for each region
1489 rebuild_strong_code_roots();
1490
1507 if (true) { // FIXME 1491 if (true) { // FIXME
1508 MetaspaceGC::compute_new_size(); 1492 MetaspaceGC::compute_new_size();
1509 } 1493 }
1510 1494
1511 #ifdef TRACESPINNING 1495 #ifdef TRACESPINNING
1786 // Tell the hot card cache about the update 1770 // Tell the hot card cache about the update
1787 _cg1r->hot_card_cache()->resize_card_counts(capacity()); 1771 _cg1r->hot_card_cache()->resize_card_counts(capacity());
1788 } 1772 }
1789 1773
1790 bool G1CollectedHeap::expand(size_t expand_bytes) { 1774 bool G1CollectedHeap::expand(size_t expand_bytes) {
1791 size_t old_mem_size = _g1_storage.committed_size();
1792 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); 1775 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1793 aligned_expand_bytes = align_size_up(aligned_expand_bytes, 1776 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1794 HeapRegion::GrainBytes); 1777 HeapRegion::GrainBytes);
1795 ergo_verbose2(ErgoHeapSizing, 1778 ergo_verbose2(ErgoHeapSizing,
1796 "expand the heap", 1779 "expand the heap",
1797 ergo_format_byte("requested expansion amount") 1780 ergo_format_byte("requested expansion amount")
1798 ergo_format_byte("attempted expansion amount"), 1781 ergo_format_byte("attempted expansion amount"),
1799 expand_bytes, aligned_expand_bytes); 1782 expand_bytes, aligned_expand_bytes);
1783
1784 if (_g1_storage.uncommitted_size() == 0) {
1785 ergo_verbose0(ErgoHeapSizing,
1786 "did not expand the heap",
1787 ergo_format_reason("heap already fully expanded"));
1788 return false;
1789 }
1800 1790
1801 // First commit the memory. 1791 // First commit the memory.
1802 HeapWord* old_end = (HeapWord*) _g1_storage.high(); 1792 HeapWord* old_end = (HeapWord*) _g1_storage.high();
1803 bool successful = _g1_storage.expand_by(aligned_expand_bytes); 1793 bool successful = _g1_storage.expand_by(aligned_expand_bytes);
1804 if (successful) { 1794 if (successful) {
1854 } 1844 }
1855 return successful; 1845 return successful;
1856 } 1846 }
1857 1847
1858 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { 1848 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1859 size_t old_mem_size = _g1_storage.committed_size();
1860 size_t aligned_shrink_bytes = 1849 size_t aligned_shrink_bytes =
1861 ReservedSpace::page_align_size_down(shrink_bytes); 1850 ReservedSpace::page_align_size_down(shrink_bytes);
1862 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, 1851 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1863 HeapRegion::GrainBytes); 1852 HeapRegion::GrainBytes);
1864 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); 1853 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
2017 // HeapWordSize). 2006 // HeapWordSize).
2018 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); 2007 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
2019 2008
2020 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); 2009 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
2021 size_t max_byte_size = collector_policy()->max_heap_byte_size(); 2010 size_t max_byte_size = collector_policy()->max_heap_byte_size();
2011 size_t heap_alignment = collector_policy()->max_alignment();
2022 2012
2023 // Ensure that the sizes are properly aligned. 2013 // Ensure that the sizes are properly aligned.
2024 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); 2014 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
2025 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); 2015 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
2016 Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
2026 2017
2027 _cg1r = new ConcurrentG1Refine(this); 2018 _cg1r = new ConcurrentG1Refine(this);
2028 2019
2029 // Reserve the maximum. 2020 // Reserve the maximum.
2030 2021
2037 // base of the reserved heap may end up differing from the 2028 // base of the reserved heap may end up differing from the
2038 // address that was requested (i.e. the preferred heap base). 2029 // address that was requested (i.e. the preferred heap base).
2039 // If this happens then we could end up using a non-optimal 2030 // If this happens then we could end up using a non-optimal
2040 // compressed oops mode. 2031 // compressed oops mode.
2041 2032
2042 // Since max_byte_size is aligned to the size of a heap region (checked
2043 // above).
2044 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
2045
2046 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, 2033 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
2047 HeapRegion::GrainBytes); 2034 heap_alignment);
2048 2035
2049 // It is important to do this in a way such that concurrent readers can't 2036 // It is important to do this in a way such that concurrent readers can't
2050 // temporarily think something is in the heap. (I've actually seen this 2037 // temporarily think something is in the heap. (I've actually seen this
2051 // happen in asserts: DLD.) 2038 // happen in asserts: DLD.)
2052 _reserved.set_word_size(0); 2039 _reserved.set_word_size(0);
2056 _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes); 2043 _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
2057 2044
2058 // Create the gen rem set (and barrier set) for the entire reserved region. 2045 // Create the gen rem set (and barrier set) for the entire reserved region.
2059 _rem_set = collector_policy()->create_rem_set(_reserved, 2); 2046 _rem_set = collector_policy()->create_rem_set(_reserved, 2);
2060 set_barrier_set(rem_set()->bs()); 2047 set_barrier_set(rem_set()->bs());
2061 if (barrier_set()->is_a(BarrierSet::ModRef)) { 2048 if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
2062 _mr_bs = (ModRefBarrierSet*)_barrier_set; 2049 vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
2063 } else {
2064 vm_exit_during_initialization("G1 requires a mod ref bs.");
2065 return JNI_ENOMEM; 2050 return JNI_ENOMEM;
2066 } 2051 }
2067 2052
2068 // Also create a G1 rem set. 2053 // Also create a G1 rem set.
2069 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { 2054 _g1_rem_set = new G1RemSet(this, g1_barrier_set());
2070 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
2071 } else {
2072 vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
2073 return JNI_ENOMEM;
2074 }
2075 2055
2076 // Carve out the G1 part of the heap. 2056 // Carve out the G1 part of the heap.
2077 2057
2078 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); 2058 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
2079 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), 2059 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
2080 g1_rs.size()/HeapWordSize); 2060 g1_rs.size()/HeapWordSize);
2081 2061
2082 _g1_storage.initialize(g1_rs, 0); 2062 _g1_storage.initialize(g1_rs, 0);
2083 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); 2063 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
2084 _hrs.initialize((HeapWord*) _g1_reserved.start(), 2064 _hrs.initialize((HeapWord*) _g1_reserved.start(),
2085 (HeapWord*) _g1_reserved.end(), 2065 (HeapWord*) _g1_reserved.end());
2086 _expansion_regions); 2066 assert(_hrs.max_length() == _expansion_regions,
2067 err_msg("max length: %u expansion regions: %u",
2068 _hrs.max_length(), _expansion_regions));
2087 2069
2088 // Do later initialization work for concurrent refinement. 2070 // Do later initialization work for concurrent refinement.
2089 _cg1r->init(); 2071 _cg1r->init();
2090 2072
2091 // 6843694 - ensure that the maximum region index can fit 2073 // 6843694 - ensure that the maximum region index can fit
2200 // Do create of the monitoring and management support so that 2182 // Do create of the monitoring and management support so that
2201 // values in the heap have been properly initialized. 2183 // values in the heap have been properly initialized.
2202 _g1mm = new G1MonitoringSupport(this); 2184 _g1mm = new G1MonitoringSupport(this);
2203 2185
2204 return JNI_OK; 2186 return JNI_OK;
2187 }
2188
2189 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2190 return HeapRegion::max_region_size();
2205 } 2191 }
2206 2192
2207 void G1CollectedHeap::ref_processing_init() { 2193 void G1CollectedHeap::ref_processing_init() {
2208 // Reference processing in G1 currently works as follows: 2194 // Reference processing in G1 currently works as follows:
2209 // 2195 //
2514 trace_heap_before_gc(_gc_tracer_cm); 2500 trace_heap_before_gc(_gc_tracer_cm);
2515 } 2501 }
2516 2502
2517 void G1CollectedHeap::register_concurrent_cycle_end() { 2503 void G1CollectedHeap::register_concurrent_cycle_end() {
2518 if (_concurrent_cycle_started) { 2504 if (_concurrent_cycle_started) {
2519 _gc_timer_cm->register_gc_end(os::elapsed_counter());
2520
2521 if (_cm->has_aborted()) { 2505 if (_cm->has_aborted()) {
2522 _gc_tracer_cm->report_concurrent_mode_failure(); 2506 _gc_tracer_cm->report_concurrent_mode_failure();
2523 } 2507 }
2508
2509 _gc_timer_cm->register_gc_end(os::elapsed_counter());
2524 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 2510 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2525 2511
2526 _concurrent_cycle_started = false; 2512 _concurrent_cycle_started = false;
2527 } 2513 }
2528 } 2514 }
3116 case VerifyOption_G1UseMarkWord: return "NONE"; 3102 case VerifyOption_G1UseMarkWord: return "NONE";
3117 default: ShouldNotReachHere(); 3103 default: ShouldNotReachHere();
3118 } 3104 }
3119 return NULL; // keep some compilers happy 3105 return NULL; // keep some compilers happy
3120 } 3106 }
3107
3108 // TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
3109 // pass it as the perm_blk to SharedHeap::process_strong_roots.
3110 // When process_strong_roots stop calling perm_blk->younger_refs_iterate
3111 // we can change this closure to extend the simpler OopClosure.
3112 class VerifyRootsClosure: public OopsInGenClosure {
3113 private:
3114 G1CollectedHeap* _g1h;
3115 VerifyOption _vo;
3116 bool _failures;
3117 public:
3118 // _vo == UsePrevMarking -> use "prev" marking information,
3119 // _vo == UseNextMarking -> use "next" marking information,
3120 // _vo == UseMarkWord -> use mark word from object header.
3121 VerifyRootsClosure(VerifyOption vo) :
3122 _g1h(G1CollectedHeap::heap()),
3123 _vo(vo),
3124 _failures(false) { }
3125
3126 bool failures() { return _failures; }
3127
3128 template <class T> void do_oop_nv(T* p) {
3129 T heap_oop = oopDesc::load_heap_oop(p);
3130 if (!oopDesc::is_null(heap_oop)) {
3131 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3132 if (_g1h->is_obj_dead_cond(obj, _vo)) {
3133 gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
3134 "points to dead obj "PTR_FORMAT, p, (void*) obj);
3135 if (_vo == VerifyOption_G1UseMarkWord) {
3136 gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark()));
3137 }
3138 obj->print_on(gclog_or_tty);
3139 _failures = true;
3140 }
3141 }
3142 }
3143
3144 void do_oop(oop* p) { do_oop_nv(p); }
3145 void do_oop(narrowOop* p) { do_oop_nv(p); }
3146 };
3147
3148 class G1VerifyCodeRootOopClosure: public OopsInGenClosure {
3149 G1CollectedHeap* _g1h;
3150 OopClosure* _root_cl;
3151 nmethod* _nm;
3152 VerifyOption _vo;
3153 bool _failures;
3154
3155 template <class T> void do_oop_work(T* p) {
3156 // First verify that this root is live
3157 _root_cl->do_oop(p);
3158
3159 if (!G1VerifyHeapRegionCodeRoots) {
3160 // We're not verifying the code roots attached to heap region.
3161 return;
3162 }
3163
3164 // Don't check the code roots during marking verification in a full GC
3165 if (_vo == VerifyOption_G1UseMarkWord) {
3166 return;
3167 }
3168
3169 // Now verify that the current nmethod (which contains p) is
3170 // in the code root list of the heap region containing the
3171 // object referenced by p.
3172
3173 T heap_oop = oopDesc::load_heap_oop(p);
3174 if (!oopDesc::is_null(heap_oop)) {
3175 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3176
3177 // Now fetch the region containing the object
3178 HeapRegion* hr = _g1h->heap_region_containing(obj);
3179 HeapRegionRemSet* hrrs = hr->rem_set();
3180 // Verify that the strong code root list for this region
3181 // contains the nmethod
3182 if (!hrrs->strong_code_roots_list_contains(_nm)) {
3183 gclog_or_tty->print_cr("Code root location "PTR_FORMAT" "
3184 "from nmethod "PTR_FORMAT" not in strong "
3185 "code roots for region ["PTR_FORMAT","PTR_FORMAT")",
3186 p, _nm, hr->bottom(), hr->end());
3187 _failures = true;
3188 }
3189 }
3190 }
3191
3192 public:
3193 G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
3194 _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
3195
3196 void do_oop(oop* p) { do_oop_work(p); }
3197 void do_oop(narrowOop* p) { do_oop_work(p); }
3198
3199 void set_nmethod(nmethod* nm) { _nm = nm; }
3200 bool failures() { return _failures; }
3201 };
3202
3203 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
3204 G1VerifyCodeRootOopClosure* _oop_cl;
3205
3206 public:
3207 G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
3208 _oop_cl(oop_cl) {}
3209
3210 void do_code_blob(CodeBlob* cb) {
3211 nmethod* nm = cb->as_nmethod_or_null();
3212 if (nm != NULL) {
3213 _oop_cl->set_nmethod(nm);
3214 nm->oops_do(_oop_cl);
3215 }
3216 }
3217 };
3218
3219 class YoungRefCounterClosure : public OopClosure {
3220 G1CollectedHeap* _g1h;
3221 int _count;
3222 public:
3223 YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
3224 void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } }
3225 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3226
3227 int count() { return _count; }
3228 void reset_count() { _count = 0; };
3229 };
3230
3231 class VerifyKlassClosure: public KlassClosure {
3232 YoungRefCounterClosure _young_ref_counter_closure;
3233 OopClosure *_oop_closure;
3234 public:
3235 VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
3236 void do_klass(Klass* k) {
3237 k->oops_do(_oop_closure);
3238
3239 _young_ref_counter_closure.reset_count();
3240 k->oops_do(&_young_ref_counter_closure);
3241 if (_young_ref_counter_closure.count() > 0) {
3242 guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
3243 }
3244 }
3245 };
3121 3246
3122 class VerifyLivenessOopClosure: public OopClosure { 3247 class VerifyLivenessOopClosure: public OopClosure {
3123 G1CollectedHeap* _g1h; 3248 G1CollectedHeap* _g1h;
3124 VerifyOption _vo; 3249 VerifyOption _vo;
3125 public: 3250 public:
3250 } 3375 }
3251 return false; // stop the region iteration if we hit a failure 3376 return false; // stop the region iteration if we hit a failure
3252 } 3377 }
3253 }; 3378 };
3254 3379
3255 class YoungRefCounterClosure : public OopClosure { 3380 // This is the task used for parallel verification of the heap regions
3256 G1CollectedHeap* _g1h;
3257 int _count;
3258 public:
3259 YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
3260 void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } }
3261 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3262
3263 int count() { return _count; }
3264 void reset_count() { _count = 0; };
3265 };
3266
3267 class VerifyKlassClosure: public KlassClosure {
3268 YoungRefCounterClosure _young_ref_counter_closure;
3269 OopClosure *_oop_closure;
3270 public:
3271 VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
3272 void do_klass(Klass* k) {
3273 k->oops_do(_oop_closure);
3274
3275 _young_ref_counter_closure.reset_count();
3276 k->oops_do(&_young_ref_counter_closure);
3277 if (_young_ref_counter_closure.count() > 0) {
3278 guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
3279 }
3280 }
3281 };
3282
3283 // TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
3284 // pass it as the perm_blk to SharedHeap::process_strong_roots.
3285 // When process_strong_roots stop calling perm_blk->younger_refs_iterate
3286 // we can change this closure to extend the simpler OopClosure.
3287 class VerifyRootsClosure: public OopsInGenClosure {
3288 private:
3289 G1CollectedHeap* _g1h;
3290 VerifyOption _vo;
3291 bool _failures;
3292 public:
3293 // _vo == UsePrevMarking -> use "prev" marking information,
3294 // _vo == UseNextMarking -> use "next" marking information,
3295 // _vo == UseMarkWord -> use mark word from object header.
3296 VerifyRootsClosure(VerifyOption vo) :
3297 _g1h(G1CollectedHeap::heap()),
3298 _vo(vo),
3299 _failures(false) { }
3300
3301 bool failures() { return _failures; }
3302
3303 template <class T> void do_oop_nv(T* p) {
3304 T heap_oop = oopDesc::load_heap_oop(p);
3305 if (!oopDesc::is_null(heap_oop)) {
3306 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3307 if (_g1h->is_obj_dead_cond(obj, _vo)) {
3308 gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
3309 "points to dead obj "PTR_FORMAT, p, (void*) obj);
3310 if (_vo == VerifyOption_G1UseMarkWord) {
3311 gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark()));
3312 }
3313 obj->print_on(gclog_or_tty);
3314 _failures = true;
3315 }
3316 }
3317 }
3318
3319 void do_oop(oop* p) { do_oop_nv(p); }
3320 void do_oop(narrowOop* p) { do_oop_nv(p); }
3321 };
3322
3323 // This is the task used for parallel heap verification.
3324 3381
3325 class G1ParVerifyTask: public AbstractGangTask { 3382 class G1ParVerifyTask: public AbstractGangTask {
3326 private: 3383 private:
3327 G1CollectedHeap* _g1h; 3384 G1CollectedHeap* _g1h;
3328 VerifyOption _vo; 3385 VerifyOption _vo;
3352 _failures = true; 3409 _failures = true;
3353 } 3410 }
3354 } 3411 }
3355 }; 3412 };
3356 3413
3357 void G1CollectedHeap::verify(bool silent) { 3414 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3358 verify(silent, VerifyOption_G1UsePrevMarking);
3359 }
3360
3361 void G1CollectedHeap::verify(bool silent,
3362 VerifyOption vo) {
3363 if (SafepointSynchronize::is_at_safepoint()) { 3415 if (SafepointSynchronize::is_at_safepoint()) {
3416 assert(Thread::current()->is_VM_thread(),
3417 "Expected to be executed serially by the VM thread at this point");
3418
3364 if (!silent) { gclog_or_tty->print("Roots "); } 3419 if (!silent) { gclog_or_tty->print("Roots "); }
3365 VerifyRootsClosure rootsCl(vo); 3420 VerifyRootsClosure rootsCl(vo);
3366 3421 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3367 assert(Thread::current()->is_VM_thread(), 3422 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3368 "Expected to be executed serially by the VM thread at this point");
3369
3370 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
3371 VerifyKlassClosure klassCl(this, &rootsCl); 3423 VerifyKlassClosure klassCl(this, &rootsCl);
3372 3424
3373 // We apply the relevant closures to all the oops in the 3425 // We apply the relevant closures to all the oops in the
3374 // system dictionary, the string table and the code cache. 3426 // system dictionary, the string table and the code cache.
3375 const int so = SO_AllClasses | SO_Strings | SO_CodeCache; 3427 const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
3384 &rootsCl, 3436 &rootsCl,
3385 &blobsCl, 3437 &blobsCl,
3386 &klassCl 3438 &klassCl
3387 ); 3439 );
3388 3440
3389 bool failures = rootsCl.failures(); 3441 bool failures = rootsCl.failures() || codeRootsCl.failures();
3390 3442
3391 if (vo != VerifyOption_G1UseMarkWord) { 3443 if (vo != VerifyOption_G1UseMarkWord) {
3392 // If we're verifying during a full GC then the region sets 3444 // If we're verifying during a full GC then the region sets
3393 // will have been torn down at the start of the GC. Therefore 3445 // will have been torn down at the start of the GC. Therefore
3394 // verifying the region sets will fail. So we only verify 3446 // verifying the region sets will fail. So we only verify
3451 guarantee(!failures, "there should not have been any failures"); 3503 guarantee(!failures, "there should not have been any failures");
3452 } else { 3504 } else {
3453 if (!silent) 3505 if (!silent)
3454 gclog_or_tty->print("(SKIPPING roots, heapRegionSets, heapRegions, remset) "); 3506 gclog_or_tty->print("(SKIPPING roots, heapRegionSets, heapRegions, remset) ");
3455 } 3507 }
3508 }
3509
3510 void G1CollectedHeap::verify(bool silent) {
3511 verify(silent, VerifyOption_G1UsePrevMarking);
3512 }
3513
3514 double G1CollectedHeap::verify(bool guard, const char* msg) {
3515 double verify_time_ms = 0.0;
3516
3517 if (guard && total_collections() >= VerifyGCStartAt) {
3518 double verify_start = os::elapsedTime();
3519 HandleMark hm; // Discard invalid handles created during verification
3520 prepare_for_verify();
3521 Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3522 verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3523 }
3524
3525 return verify_time_ms;
3526 }
3527
3528 void G1CollectedHeap::verify_before_gc() {
3529 double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
3530 g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3531 }
3532
3533 void G1CollectedHeap::verify_after_gc() {
3534 double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
3535 g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3456 } 3536 }
3457 3537
3458 class PrintRegionClosure: public HeapRegionClosure { 3538 class PrintRegionClosure: public HeapRegionClosure {
3459 outputStream* _st; 3539 outputStream* _st;
3460 public: 3540 public:
3602 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { 3682 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3603 // always_do_update_barrier = false; 3683 // always_do_update_barrier = false;
3604 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); 3684 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3605 // Fill TLAB's and such 3685 // Fill TLAB's and such
3606 ensure_parsability(true); 3686 ensure_parsability(true);
3687
3688 if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3689 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3690 g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3691 }
3607 } 3692 }
3608 3693
3609 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { 3694 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
3610 3695
3611 if (G1SummarizeRSetStats && 3696 if (G1SummarizeRSetStats &&
3612 (G1SummarizeRSetStatsPeriod > 0) && 3697 (G1SummarizeRSetStatsPeriod > 0) &&
3613 // we are at the end of the GC. Total collections has already been increased. 3698 // we are at the end of the GC. Total collections has already been increased.
3614 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) { 3699 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3615 g1_rem_set()->print_periodic_summary_info(); 3700 g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3616 } 3701 }
3617 3702
3618 // FIXME: what is this about? 3703 // FIXME: what is this about?
3619 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" 3704 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3620 // is set. 3705 // is set.
3627 Universe::update_heap_info_at_gc(); 3712 Universe::update_heap_info_at_gc();
3628 } 3713 }
3629 3714
3630 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, 3715 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3631 unsigned int gc_count_before, 3716 unsigned int gc_count_before,
3632 bool* succeeded) { 3717 bool* succeeded,
3718 GCCause::Cause gc_cause) {
3633 assert_heap_not_locked_and_not_at_safepoint(); 3719 assert_heap_not_locked_and_not_at_safepoint();
3634 g1_policy()->record_stop_world_start(); 3720 g1_policy()->record_stop_world_start();
3635 VM_G1IncCollectionPause op(gc_count_before, 3721 VM_G1IncCollectionPause op(gc_count_before,
3636 word_size, 3722 word_size,
3637 false, /* should_initiate_conc_mark */ 3723 false, /* should_initiate_conc_mark */
3638 g1_policy()->max_pause_time_ms(), 3724 g1_policy()->max_pause_time_ms(),
3639 GCCause::_g1_inc_collection_pause); 3725 gc_cause);
3640 VMThread::execute(&op); 3726 VMThread::execute(&op);
3641 3727
3642 HeapWord* result = op.result(); 3728 HeapWord* result = op.result();
3643 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); 3729 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3644 assert(result == NULL || ret_succeeded, 3730 assert(result == NULL || ret_succeeded,
3874 // get entries from the secondary_free_list. 3960 // get entries from the secondary_free_list.
3875 if (!G1StressConcRegionFreeing) { 3961 if (!G1StressConcRegionFreeing) {
3876 append_secondary_free_list_if_not_empty_with_lock(); 3962 append_secondary_free_list_if_not_empty_with_lock();
3877 } 3963 }
3878 3964
3879 assert(check_young_list_well_formed(), 3965 assert(check_young_list_well_formed(), "young list should be well formed");
3880 "young list should be well formed"); 3966 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3967 "sanity check");
3881 3968
3882 // Don't dynamically change the number of GC threads this early. A value of 3969 // Don't dynamically change the number of GC threads this early. A value of
3883 // 0 is used to indicate serial work. When parallel work is done, 3970 // 0 is used to indicate serial work. When parallel work is done,
3884 // it will be set. 3971 // it will be set.
3885 3972
4469 4556
4470 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num) 4557 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
4471 : _g1h(g1h), 4558 : _g1h(g1h),
4472 _refs(g1h->task_queue(queue_num)), 4559 _refs(g1h->task_queue(queue_num)),
4473 _dcq(&g1h->dirty_card_queue_set()), 4560 _dcq(&g1h->dirty_card_queue_set()),
4474 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), 4561 _ct_bs(g1h->g1_barrier_set()),
4475 _g1_rem(g1h->g1_rem_set()), 4562 _g1_rem(g1h->g1_rem_set()),
4476 _hash_seed(17), _queue_num(queue_num), 4563 _hash_seed(17), _queue_num(queue_num),
4477 _term_attempts(0), 4564 _term_attempts(0),
4478 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), 4565 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4479 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), 4566 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4536 assert(ref != NULL, "invariant"); 4623 assert(ref != NULL, "invariant");
4537 assert(UseCompressedOops, "sanity"); 4624 assert(UseCompressedOops, "sanity");
4538 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref)); 4625 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
4539 oop p = oopDesc::load_decode_heap_oop(ref); 4626 oop p = oopDesc::load_decode_heap_oop(ref);
4540 assert(_g1h->is_in_g1_reserved(p), 4627 assert(_g1h->is_in_g1_reserved(p),
4541 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); 4628 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4542 return true; 4629 return true;
4543 } 4630 }
4544 4631
4545 bool G1ParScanThreadState::verify_ref(oop* ref) const { 4632 bool G1ParScanThreadState::verify_ref(oop* ref) const {
4546 assert(ref != NULL, "invariant"); 4633 assert(ref != NULL, "invariant");
4547 if (has_partial_array_mask(ref)) { 4634 if (has_partial_array_mask(ref)) {
4548 // Must be in the collection set--it's already been copied. 4635 // Must be in the collection set--it's already been copied.
4549 oop p = clear_partial_array_mask(ref); 4636 oop p = clear_partial_array_mask(ref);
4550 assert(_g1h->obj_in_cs(p), 4637 assert(_g1h->obj_in_cs(p),
4551 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); 4638 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4552 } else { 4639 } else {
4553 oop p = oopDesc::load_decode_heap_oop(ref); 4640 oop p = oopDesc::load_decode_heap_oop(ref);
4554 assert(_g1h->is_in_g1_reserved(p), 4641 assert(_g1h->is_in_g1_reserved(p),
4555 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); 4642 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4556 } 4643 }
4557 return true; 4644 return true;
4558 } 4645 }
4559 4646
4560 bool G1ParScanThreadState::verify_task(StarTask ref) const { 4647 bool G1ParScanThreadState::verify_task(StarTask ref) const {
4995 scan_klasses_cl = &scan_mark_klasses_cl_s; 5082 scan_klasses_cl = &scan_mark_klasses_cl_s;
4996 } 5083 }
4997 5084
4998 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); 5085 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4999 5086
5000 int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; 5087 // Don't scan the scavengable methods in the code cache as part
5088 // of strong root scanning. The code roots that point into a
5089 // region in the collection set are scanned when we scan the
5090 // region's RSet.
5091 int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
5001 5092
5002 pss.start_strong_roots(); 5093 pss.start_strong_roots();
5003 _g1h->g1_process_strong_roots(/* is scavenging */ true, 5094 _g1h->g1_process_strong_roots(/* is scavenging */ true,
5004 SharedHeap::ScanningOption(so), 5095 SharedHeap::ScanningOption(so),
5005 scan_root_cl, 5096 scan_root_cl,
5037 } 5128 }
5038 }; 5129 };
5039 5130
5040 // *** Common G1 Evacuation Stuff 5131 // *** Common G1 Evacuation Stuff
5041 5132
5042 // Closures that support the filtering of CodeBlobs scanned during
5043 // external root scanning.
5044
5045 // Closure applied to reference fields in code blobs (specifically nmethods)
5046 // to determine whether an nmethod contains references that point into
5047 // the collection set. Used as a predicate when walking code roots so
5048 // that only nmethods that point into the collection set are added to the
5049 // 'marked' list.
5050
5051 class G1FilteredCodeBlobToOopClosure : public CodeBlobToOopClosure {
5052
5053 class G1PointsIntoCSOopClosure : public OopClosure {
5054 G1CollectedHeap* _g1;
5055 bool _points_into_cs;
5056 public:
5057 G1PointsIntoCSOopClosure(G1CollectedHeap* g1) :
5058 _g1(g1), _points_into_cs(false) { }
5059
5060 bool points_into_cs() const { return _points_into_cs; }
5061
5062 template <class T>
5063 void do_oop_nv(T* p) {
5064 if (!_points_into_cs) {
5065 T heap_oop = oopDesc::load_heap_oop(p);
5066 if (!oopDesc::is_null(heap_oop) &&
5067 _g1->in_cset_fast_test(oopDesc::decode_heap_oop_not_null(heap_oop))) {
5068 _points_into_cs = true;
5069 }
5070 }
5071 }
5072
5073 virtual void do_oop(oop* p) { do_oop_nv(p); }
5074 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
5075 };
5076
5077 G1CollectedHeap* _g1;
5078
5079 public:
5080 G1FilteredCodeBlobToOopClosure(G1CollectedHeap* g1, OopClosure* cl) :
5081 CodeBlobToOopClosure(cl, true), _g1(g1) { }
5082
5083 virtual void do_code_blob(CodeBlob* cb) {
5084 nmethod* nm = cb->as_nmethod_or_null();
5085 if (nm != NULL && !(nm->test_oops_do_mark())) {
5086 G1PointsIntoCSOopClosure predicate_cl(_g1);
5087 nm->oops_do(&predicate_cl);
5088
5089 if (predicate_cl.points_into_cs()) {
5090 // At least one of the reference fields or the oop relocations
5091 // in the nmethod points into the collection set. We have to
5092 // 'mark' this nmethod.
5093 // Note: Revisit the following if CodeBlobToOopClosure::do_code_blob()
5094 // or MarkingCodeBlobClosure::do_code_blob() change.
5095 if (!nm->test_set_oops_do_mark()) {
5096 do_newly_marked_nmethod(nm);
5097 }
5098 }
5099 }
5100 }
5101 };
5102
5103 // This method is run in a GC worker. 5133 // This method is run in a GC worker.
5104 5134
5105 void 5135 void
5106 G1CollectedHeap:: 5136 G1CollectedHeap::
5107 g1_process_strong_roots(bool is_scavenging, 5137 g1_process_strong_roots(bool is_scavenging,
5115 double ext_roots_start = os::elapsedTime(); 5145 double ext_roots_start = os::elapsedTime();
5116 double closure_app_time_sec = 0.0; 5146 double closure_app_time_sec = 0.0;
5117 5147
5118 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); 5148 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
5119 5149
5120 // Walk the code cache w/o buffering, because StarTask cannot handle 5150 assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow");
5121 // unaligned oop locations. 5151 // Walk the code cache/strong code roots w/o buffering, because StarTask
5122 G1FilteredCodeBlobToOopClosure eager_scan_cs_code_roots(this, scan_non_heap_roots); 5152 // cannot handle unaligned oop locations.
5123 5153 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
5124 // Scan all code roots from stack
5125 CodeBlobToOopClosure eager_scan_all_code_roots(scan_non_heap_roots, true);
5126 CodeBlobToOopClosure* blobs = &eager_scan_cs_code_roots;
5127 if (UseNewCode && g1_policy()->during_initial_mark_pause()) {
5128 // during initial-mark we need to take care to follow all code roots
5129 blobs = &eager_scan_all_code_roots;
5130 }
5131 5154
5132 process_strong_roots(false, // no scoping; this is parallel code 5155 process_strong_roots(false, // no scoping; this is parallel code
5133 is_scavenging, so, 5156 is_scavenging, so,
5134 &buf_scan_non_heap_roots, 5157 &buf_scan_non_heap_roots,
5135 blobs, 5158 &eager_scan_code_roots,
5136 scan_klasses 5159 scan_klasses
5137 ); 5160 );
5138 5161
5139 // Now the CM ref_processor roots. 5162 // Now the CM ref_processor roots.
5140 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { 5163 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
5170 satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0; 5193 satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
5171 } 5194 }
5172 } 5195 }
5173 g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms); 5196 g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
5174 5197
5198 // If this is an initial mark pause, and we're not scanning
5199 // the entire code cache, we need to mark the oops in the
5200 // strong code root lists for the regions that are not in
5201 // the collection set.
5202 // Note all threads participate in this set of root tasks.
5203 double mark_strong_code_roots_ms = 0.0;
5204 if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
5205 double mark_strong_roots_start = os::elapsedTime();
5206 mark_strong_code_roots(worker_i);
5207 mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
5208 }
5209 g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
5210
5175 // Now scan the complement of the collection set. 5211 // Now scan the complement of the collection set.
5176 if (scan_rs != NULL) { 5212 if (scan_rs != NULL) {
5177 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); 5213 g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
5178 } 5214 }
5179 _process_strong_tasks->all_tasks_completed(); 5215 _process_strong_tasks->all_tasks_completed();
5180 } 5216 }
5181 5217
5182 void 5218 void
5790 // objects (and their reachable sub-graphs) that were 5826 // objects (and their reachable sub-graphs) that were
5791 // not copied during the pause. 5827 // not copied during the pause.
5792 process_discovered_references(n_workers); 5828 process_discovered_references(n_workers);
5793 5829
5794 // Weak root processing. 5830 // Weak root processing.
5795 // Note: when JSR 292 is enabled and code blobs can contain
5796 // non-perm oops then we will need to process the code blobs
5797 // here too.
5798 { 5831 {
5799 G1STWIsAliveClosure is_alive(this); 5832 G1STWIsAliveClosure is_alive(this);
5800 G1KeepAliveClosure keep_alive(this); 5833 G1KeepAliveClosure keep_alive(this);
5801 JNIHandles::weak_oops_do(&is_alive, &keep_alive); 5834 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5802 } 5835 }
5807 // Reset and re-enable the hot card cache. 5840 // Reset and re-enable the hot card cache.
5808 // Note the counts for the cards in the regions in the 5841 // Note the counts for the cards in the regions in the
5809 // collection set are reset when the collection set is freed. 5842 // collection set are reset when the collection set is freed.
5810 hot_card_cache->reset_hot_cache(); 5843 hot_card_cache->reset_hot_cache();
5811 hot_card_cache->set_use_cache(true); 5844 hot_card_cache->set_use_cache(true);
5845
5846 // Migrate the strong code roots attached to each region in
5847 // the collection set. Ideally we would like to do this
5848 // after we have finished the scanning/evacuation of the
5849 // strong code roots for a particular heap region.
5850 migrate_strong_code_roots();
5851
5852 if (g1_policy()->during_initial_mark_pause()) {
5853 // Reset the claim values set during marking the strong code roots
5854 reset_heap_region_claim_values();
5855 }
5812 5856
5813 finalize_for_evac_failure(); 5857 finalize_for_evac_failure();
5814 5858
5815 if (evacuation_failed()) { 5859 if (evacuation_failed()) {
5816 remove_self_forwarding_pointers(); 5860 remove_self_forwarding_pointers();
5941 _humongous_set.update_from_proxy(humongous_proxy_set); 5985 _humongous_set.update_from_proxy(humongous_proxy_set);
5942 } 5986 }
5943 } 5987 }
5944 5988
5945 class G1ParCleanupCTTask : public AbstractGangTask { 5989 class G1ParCleanupCTTask : public AbstractGangTask {
5946 CardTableModRefBS* _ct_bs; 5990 G1SATBCardTableModRefBS* _ct_bs;
5947 G1CollectedHeap* _g1h; 5991 G1CollectedHeap* _g1h;
5948 HeapRegion* volatile _su_head; 5992 HeapRegion* volatile _su_head;
5949 public: 5993 public:
5950 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, 5994 G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
5951 G1CollectedHeap* g1h) : 5995 G1CollectedHeap* g1h) :
5952 AbstractGangTask("G1 Par Cleanup CT Task"), 5996 AbstractGangTask("G1 Par Cleanup CT Task"),
5953 _ct_bs(ct_bs), _g1h(g1h) { } 5997 _ct_bs(ct_bs), _g1h(g1h) { }
5954 5998
5955 void work(uint worker_id) { 5999 void work(uint worker_id) {
5968 }; 6012 };
5969 6013
5970 #ifndef PRODUCT 6014 #ifndef PRODUCT
5971 class G1VerifyCardTableCleanup: public HeapRegionClosure { 6015 class G1VerifyCardTableCleanup: public HeapRegionClosure {
5972 G1CollectedHeap* _g1h; 6016 G1CollectedHeap* _g1h;
5973 CardTableModRefBS* _ct_bs; 6017 G1SATBCardTableModRefBS* _ct_bs;
5974 public: 6018 public:
5975 G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs) 6019 G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs)
5976 : _g1h(g1h), _ct_bs(ct_bs) { } 6020 : _g1h(g1h), _ct_bs(ct_bs) { }
5977 virtual bool doHeapRegion(HeapRegion* r) { 6021 virtual bool doHeapRegion(HeapRegion* r) {
5978 if (r->is_survivor()) { 6022 if (r->is_survivor()) {
5979 _g1h->verify_dirty_region(r); 6023 _g1h->verify_dirty_region(r);
5980 } else { 6024 } else {
5984 } 6028 }
5985 }; 6029 };
5986 6030
5987 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) { 6031 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
5988 // All of the region should be clean. 6032 // All of the region should be clean.
5989 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); 6033 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5990 MemRegion mr(hr->bottom(), hr->end()); 6034 MemRegion mr(hr->bottom(), hr->end());
5991 ct_bs->verify_not_dirty_region(mr); 6035 ct_bs->verify_not_dirty_region(mr);
5992 } 6036 }
5993 6037
5994 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) { 6038 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
5997 // retires each region and replaces it with a new one will do a 6041 // retires each region and replaces it with a new one will do a
5998 // maximal allocation to fill in [pre_dummy_top(),end()] but will 6042 // maximal allocation to fill in [pre_dummy_top(),end()] but will
5999 // not dirty that area (one less thing to have to do while holding 6043 // not dirty that area (one less thing to have to do while holding
6000 // a lock). So we can only verify that [bottom(),pre_dummy_top()] 6044 // a lock). So we can only verify that [bottom(),pre_dummy_top()]
6001 // is dirty. 6045 // is dirty.
6002 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); 6046 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6003 MemRegion mr(hr->bottom(), hr->pre_dummy_top()); 6047 MemRegion mr(hr->bottom(), hr->pre_dummy_top());
6004 ct_bs->verify_dirty_region(mr); 6048 ct_bs->verify_dirty_region(mr);
6005 } 6049 }
6006 6050
6007 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { 6051 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
6008 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); 6052 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6009 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { 6053 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
6010 verify_dirty_region(hr); 6054 verify_dirty_region(hr);
6011 } 6055 }
6012 } 6056 }
6013 6057
6015 verify_dirty_young_list(_young_list->first_region()); 6059 verify_dirty_young_list(_young_list->first_region());
6016 } 6060 }
6017 #endif 6061 #endif
6018 6062
6019 void G1CollectedHeap::cleanUpCardTable() { 6063 void G1CollectedHeap::cleanUpCardTable() {
6020 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); 6064 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6021 double start = os::elapsedTime(); 6065 double start = os::elapsedTime();
6022 6066
6023 { 6067 {
6024 // Iterate over the dirty cards region list. 6068 // Iterate over the dirty cards region list.
6025 G1ParCleanupCTTask cleanup_task(ct_bs, this); 6069 G1ParCleanupCTTask cleanup_task(ct_bs, this);
6604 6648
6605 _old_set.verify_end(); 6649 _old_set.verify_end();
6606 _humongous_set.verify_end(); 6650 _humongous_set.verify_end();
6607 _free_list.verify_end(); 6651 _free_list.verify_end();
6608 } 6652 }
6653
6654 // Optimized nmethod scanning
6655
6656 class RegisterNMethodOopClosure: public OopClosure {
6657 G1CollectedHeap* _g1h;
6658 nmethod* _nm;
6659
6660 template <class T> void do_oop_work(T* p) {
6661 T heap_oop = oopDesc::load_heap_oop(p);
6662 if (!oopDesc::is_null(heap_oop)) {
6663 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6664 HeapRegion* hr = _g1h->heap_region_containing(obj);
6665 assert(!hr->isHumongous(), "code root in humongous region?");
6666
6667 // HeapRegion::add_strong_code_root() avoids adding duplicate
6668 // entries but having duplicates is OK since we "mark" nmethods
6669 // as visited when we scan the strong code root lists during the GC.
6670 hr->add_strong_code_root(_nm);
6671 assert(hr->rem_set()->strong_code_roots_list_contains(_nm), "add failed?");
6672 }
6673 }
6674
6675 public:
6676 RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6677 _g1h(g1h), _nm(nm) {}
6678
6679 void do_oop(oop* p) { do_oop_work(p); }
6680 void do_oop(narrowOop* p) { do_oop_work(p); }
6681 };
6682
6683 class UnregisterNMethodOopClosure: public OopClosure {
6684 G1CollectedHeap* _g1h;
6685 nmethod* _nm;
6686
6687 template <class T> void do_oop_work(T* p) {
6688 T heap_oop = oopDesc::load_heap_oop(p);
6689 if (!oopDesc::is_null(heap_oop)) {
6690 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6691 HeapRegion* hr = _g1h->heap_region_containing(obj);
6692 assert(!hr->isHumongous(), "code root in humongous region?");
6693 hr->remove_strong_code_root(_nm);
6694 assert(!hr->rem_set()->strong_code_roots_list_contains(_nm), "remove failed?");
6695 }
6696 }
6697
6698 public:
6699 UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6700 _g1h(g1h), _nm(nm) {}
6701
6702 void do_oop(oop* p) { do_oop_work(p); }
6703 void do_oop(narrowOop* p) { do_oop_work(p); }
6704 };
6705
6706 void G1CollectedHeap::register_nmethod(nmethod* nm) {
6707 CollectedHeap::register_nmethod(nm);
6708
6709 guarantee(nm != NULL, "sanity");
6710 RegisterNMethodOopClosure reg_cl(this, nm);
6711 nm->oops_do(&reg_cl);
6712 }
6713
6714 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
6715 CollectedHeap::unregister_nmethod(nm);
6716
6717 guarantee(nm != NULL, "sanity");
6718 UnregisterNMethodOopClosure reg_cl(this, nm);
6719 nm->oops_do(&reg_cl, true);
6720 }
6721
6722 class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
6723 public:
6724 bool doHeapRegion(HeapRegion *hr) {
6725 assert(!hr->isHumongous(), "humongous region in collection set?");
6726 hr->migrate_strong_code_roots();
6727 return false;
6728 }
6729 };
6730
6731 void G1CollectedHeap::migrate_strong_code_roots() {
6732 MigrateCodeRootsHeapRegionClosure cl;
6733 double migrate_start = os::elapsedTime();
6734 collection_set_iterate(&cl);
6735 double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
6736 g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
6737 }
6738
6739 // Mark all the code roots that point into regions *not* in the
6740 // collection set.
6741 //
6742 // Note we do not want to use a "marking" CodeBlobToOopClosure while
6743 // walking the the code roots lists of regions not in the collection
6744 // set. Suppose we have an nmethod (M) that points to objects in two
6745 // separate regions - one in the collection set (R1) and one not (R2).
6746 // Using a "marking" CodeBlobToOopClosure here would result in "marking"
6747 // nmethod M when walking the code roots for R1. When we come to scan
6748 // the code roots for R2, we would see that M is already marked and it
6749 // would be skipped and the objects in R2 that are referenced from M
6750 // would not be evacuated.
6751
6752 class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
6753
6754 class MarkStrongCodeRootOopClosure: public OopClosure {
6755 ConcurrentMark* _cm;
6756 HeapRegion* _hr;
6757 uint _worker_id;
6758
6759 template <class T> void do_oop_work(T* p) {
6760 T heap_oop = oopDesc::load_heap_oop(p);
6761 if (!oopDesc::is_null(heap_oop)) {
6762 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6763 // Only mark objects in the region (which is assumed
6764 // to be not in the collection set).
6765 if (_hr->is_in(obj)) {
6766 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
6767 }
6768 }
6769 }
6770
6771 public:
6772 MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
6773 _cm(cm), _hr(hr), _worker_id(worker_id) {
6774 assert(!_hr->in_collection_set(), "sanity");
6775 }
6776
6777 void do_oop(narrowOop* p) { do_oop_work(p); }
6778 void do_oop(oop* p) { do_oop_work(p); }
6779 };
6780
6781 MarkStrongCodeRootOopClosure _oop_cl;
6782
6783 public:
6784 MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
6785 _oop_cl(cm, hr, worker_id) {}
6786
6787 void do_code_blob(CodeBlob* cb) {
6788 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
6789 if (nm != NULL) {
6790 nm->oops_do(&_oop_cl);
6791 }
6792 }
6793 };
6794
6795 class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {
6796 G1CollectedHeap* _g1h;
6797 uint _worker_id;
6798
6799 public:
6800 MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) :
6801 _g1h(g1h), _worker_id(worker_id) {}
6802
6803 bool doHeapRegion(HeapRegion *hr) {
6804 HeapRegionRemSet* hrrs = hr->rem_set();
6805 if (hr->isHumongous()) {
6806 // Code roots should never be attached to a humongous region
6807 assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
6808 return false;
6809 }
6810
6811 if (hr->in_collection_set()) {
6812 // Don't mark code roots into regions in the collection set here.
6813 // They will be marked when we scan them.
6814 return false;
6815 }
6816
6817 MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id);
6818 hr->strong_code_roots_do(&cb_cl);
6819 return false;
6820 }
6821 };
6822
6823 void G1CollectedHeap::mark_strong_code_roots(uint worker_id) {
6824 MarkStrongCodeRootsHRClosure cl(this, worker_id);
6825 if (G1CollectedHeap::use_parallel_gc_threads()) {
6826 heap_region_par_iterate_chunked(&cl,
6827 worker_id,
6828 workers()->active_workers(),
6829 HeapRegion::ParMarkRootClaimValue);
6830 } else {
6831 heap_region_iterate(&cl);
6832 }
6833 }
6834
6835 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
6836 G1CollectedHeap* _g1h;
6837
6838 public:
6839 RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
6840 _g1h(g1h) {}
6841
6842 void do_code_blob(CodeBlob* cb) {
6843 nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
6844 if (nm == NULL) {
6845 return;
6846 }
6847
6848 if (ScavengeRootsInCode && nm->detect_scavenge_root_oops()) {
6849 _g1h->register_nmethod(nm);
6850 }
6851 }
6852 };
6853
6854 void G1CollectedHeap::rebuild_strong_code_roots() {
6855 RebuildStrongCodeRootClosure blob_cl(this);
6856 CodeCache::blobs_do(&blob_cl);
6857 }