Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 6948:e522a00b91aa
Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/ after NPG - C++ build works
author | Doug Simon <doug.simon@oracle.com> |
---|---|
date | Mon, 12 Nov 2012 23:14:12 +0100 |
parents | 04155d9c8c76 |
children | 0f80645e9c26 |
comparison
equal
deleted
inserted
replaced
6711:ae13cc658b80 | 6948:e522a00b91aa |
---|---|
461 bool G1CollectedHeap::is_scavengable(const void* p) { | 461 bool G1CollectedHeap::is_scavengable(const void* p) { |
462 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | 462 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
463 G1CollectorPolicy* g1p = g1h->g1_policy(); | 463 G1CollectorPolicy* g1p = g1h->g1_policy(); |
464 HeapRegion* hr = heap_region_containing(p); | 464 HeapRegion* hr = heap_region_containing(p); |
465 if (hr == NULL) { | 465 if (hr == NULL) { |
466 // perm gen (or null) | 466 // null |
467 assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p)); | |
467 return false; | 468 return false; |
468 } else { | 469 } else { |
469 return !hr->isHumongous(); | 470 return !hr->isHumongous(); |
470 } | 471 } |
471 } | 472 } |
1283 SvcGCMarker sgcm(SvcGCMarker::FULL); | 1284 SvcGCMarker sgcm(SvcGCMarker::FULL); |
1284 ResourceMark rm; | 1285 ResourceMark rm; |
1285 | 1286 |
1286 print_heap_before_gc(); | 1287 print_heap_before_gc(); |
1287 | 1288 |
1289 size_t metadata_prev_used = MetaspaceAux::used_in_bytes(); | |
1290 | |
1288 HRSPhaseSetter x(HRSPhaseFullGC); | 1291 HRSPhaseSetter x(HRSPhaseFullGC); |
1289 verify_region_sets_optional(); | 1292 verify_region_sets_optional(); |
1290 | 1293 |
1291 const bool do_clear_all_soft_refs = clear_all_soft_refs || | 1294 const bool do_clear_all_soft_refs = clear_all_soft_refs || |
1292 collector_policy()->should_clear_all_soft_refs(); | 1295 collector_policy()->should_clear_all_soft_refs(); |
1399 | 1402 |
1400 verify_after_gc(); | 1403 verify_after_gc(); |
1401 | 1404 |
1402 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); | 1405 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); |
1403 ref_processor_stw()->verify_no_references_recorded(); | 1406 ref_processor_stw()->verify_no_references_recorded(); |
1407 | |
1408 // Delete metaspaces for unloaded class loaders and clean up loader_data graph | |
1409 ClassLoaderDataGraph::purge(); | |
1404 | 1410 |
1405 // Note: since we've just done a full GC, concurrent | 1411 // Note: since we've just done a full GC, concurrent |
1406 // marking is no longer active. Therefore we need not | 1412 // marking is no longer active. Therefore we need not |
1407 // re-enable reference discovery for the CM ref processor. | 1413 // re-enable reference discovery for the CM ref processor. |
1408 // That will be done at the start of the next marking cycle. | 1414 // That will be done at the start of the next marking cycle. |
1473 if (G1Log::fine()) { | 1479 if (G1Log::fine()) { |
1474 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | 1480 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); |
1475 } | 1481 } |
1476 | 1482 |
1477 if (true) { // FIXME | 1483 if (true) { // FIXME |
1478 // Ask the permanent generation to adjust size for full collections | 1484 MetaspaceGC::compute_new_size(); |
1479 perm()->compute_new_size(); | |
1480 } | 1485 } |
1481 | 1486 |
1482 // Start a new incremental collection set for the next pause | 1487 // Start a new incremental collection set for the next pause |
1483 assert(g1_policy()->collection_set() == NULL, "must be"); | 1488 assert(g1_policy()->collection_set() == NULL, "must be"); |
1484 g1_policy()->start_incremental_cset_building(); | 1489 g1_policy()->start_incremental_cset_building(); |
1947 | 1952 |
1948 // Initialize the G1EvacuationFailureALot counters and flags. | 1953 // Initialize the G1EvacuationFailureALot counters and flags. |
1949 NOT_PRODUCT(reset_evacuation_should_fail();) | 1954 NOT_PRODUCT(reset_evacuation_should_fail();) |
1950 | 1955 |
1951 guarantee(_task_queues != NULL, "task_queues allocation failure."); | 1956 guarantee(_task_queues != NULL, "task_queues allocation failure."); |
1952 #ifdef SPARC | |
1953 // Issue a stern warning, but allow use for experimentation and debugging. | |
1954 if (VM_Version::is_sun4v() && UseMemSetInBOT) { | |
1955 assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error"); | |
1956 warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability" | |
1957 " on sun4v; please understand that you are using at your own risk!"); | |
1958 } | |
1959 #endif | |
1960 } | 1957 } |
1961 | 1958 |
1962 jint G1CollectedHeap::initialize() { | 1959 jint G1CollectedHeap::initialize() { |
1963 CollectedHeap::pre_initialize(); | 1960 CollectedHeap::pre_initialize(); |
1964 os::enable_vtime(); | 1961 os::enable_vtime(); |
1988 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | 1985 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); |
1989 | 1986 |
1990 _cg1r = new ConcurrentG1Refine(); | 1987 _cg1r = new ConcurrentG1Refine(); |
1991 | 1988 |
1992 // Reserve the maximum. | 1989 // Reserve the maximum. |
1993 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1994 // Includes the perm-gen. | |
1995 | 1990 |
1996 // When compressed oops are enabled, the preferred heap base | 1991 // When compressed oops are enabled, the preferred heap base |
1997 // is calculated by subtracting the requested size from the | 1992 // is calculated by subtracting the requested size from the |
1998 // 32Gb boundary and using the result as the base address for | 1993 // 32Gb boundary and using the result as the base address for |
1999 // heap reservation. If the requested size is not aligned to | 1994 // heap reservation. If the requested size is not aligned to |
2003 // address that was requested (i.e. the preferred heap base). | 1998 // address that was requested (i.e. the preferred heap base). |
2004 // If this happens then we could end up using a non-optimal | 1999 // If this happens then we could end up using a non-optimal |
2005 // compressed oops mode. | 2000 // compressed oops mode. |
2006 | 2001 |
2007 // Since max_byte_size is aligned to the size of a heap region (checked | 2002 // Since max_byte_size is aligned to the size of a heap region (checked |
2008 // above), we also need to align the perm gen size as it might not be. | 2003 // above). |
2009 const size_t total_reserved = max_byte_size + | 2004 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); |
2010 align_size_up(pgs->max_size(), HeapRegion::GrainBytes); | 2005 |
2011 Universe::check_alignment(total_reserved, HeapRegion::GrainBytes, "g1 heap and perm"); | 2006 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, |
2012 | 2007 HeapRegion::GrainBytes); |
2013 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); | |
2014 | |
2015 ReservedHeapSpace heap_rs(total_reserved, HeapRegion::GrainBytes, | |
2016 UseLargePages, addr); | |
2017 | |
2018 if (UseCompressedOops) { | |
2019 if (addr != NULL && !heap_rs.is_reserved()) { | |
2020 // Failed to reserve at specified address - the requested memory | |
2021 // region is taken already, for example, by 'java' launcher. | |
2022 // Try again to reserver heap higher. | |
2023 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); | |
2024 | |
2025 ReservedHeapSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, | |
2026 UseLargePages, addr); | |
2027 | |
2028 if (addr != NULL && !heap_rs0.is_reserved()) { | |
2029 // Failed to reserve at specified address again - give up. | |
2030 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); | |
2031 assert(addr == NULL, ""); | |
2032 | |
2033 ReservedHeapSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, | |
2034 UseLargePages, addr); | |
2035 heap_rs = heap_rs1; | |
2036 } else { | |
2037 heap_rs = heap_rs0; | |
2038 } | |
2039 } | |
2040 } | |
2041 | |
2042 if (!heap_rs.is_reserved()) { | |
2043 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
2044 return JNI_ENOMEM; | |
2045 } | |
2046 | 2008 |
2047 // It is important to do this in a way such that concurrent readers can't | 2009 // It is important to do this in a way such that concurrent readers can't |
2048 // temporarily think somethings in the heap. (I've actually seen this | 2010 // temporarily think somethings in the heap. (I've actually seen this |
2049 // happen in asserts: DLD.) | 2011 // happen in asserts: DLD.) |
2050 _reserved.set_word_size(0); | 2012 _reserved.set_word_size(0); |
2074 // Carve out the G1 part of the heap. | 2036 // Carve out the G1 part of the heap. |
2075 | 2037 |
2076 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | 2038 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); |
2077 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | 2039 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), |
2078 g1_rs.size()/HeapWordSize); | 2040 g1_rs.size()/HeapWordSize); |
2079 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
2080 | |
2081 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
2082 | 2041 |
2083 _g1_storage.initialize(g1_rs, 0); | 2042 _g1_storage.initialize(g1_rs, 0); |
2084 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | 2043 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); |
2085 _hrs.initialize((HeapWord*) _g1_reserved.start(), | 2044 _hrs.initialize((HeapWord*) _g1_reserved.start(), |
2086 (HeapWord*) _g1_reserved.end(), | 2045 (HeapWord*) _g1_reserved.end(), |
2490 // and it's waiting for a full GC to finish will be woken up. It is | 2449 // and it's waiting for a full GC to finish will be woken up. It is |
2491 // waiting in VM_G1IncCollectionPause::doit_epilogue(). | 2450 // waiting in VM_G1IncCollectionPause::doit_epilogue(). |
2492 FullGCCount_lock->notify_all(); | 2451 FullGCCount_lock->notify_all(); |
2493 } | 2452 } |
2494 | 2453 |
2495 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { | |
2496 assert_at_safepoint(true /* should_be_vm_thread */); | |
2497 GCCauseSetter gcs(this, cause); | |
2498 switch (cause) { | |
2499 case GCCause::_heap_inspection: | |
2500 case GCCause::_heap_dump: { | |
2501 HandleMark hm; | |
2502 do_full_collection(false); // don't clear all soft refs | |
2503 break; | |
2504 } | |
2505 default: // XXX FIX ME | |
2506 ShouldNotReachHere(); // Unexpected use of this function | |
2507 } | |
2508 } | |
2509 | |
2510 void G1CollectedHeap::collect(GCCause::Cause cause) { | 2454 void G1CollectedHeap::collect(GCCause::Cause cause) { |
2511 assert_heap_not_locked(); | 2455 assert_heap_not_locked(); |
2512 | 2456 |
2513 unsigned int gc_count_before; | 2457 unsigned int gc_count_before; |
2514 unsigned int old_marking_count_before; | 2458 unsigned int old_marking_count_before; |
2578 // heap_region_containing_raw() should successfully | 2522 // heap_region_containing_raw() should successfully |
2579 // return the containing region. | 2523 // return the containing region. |
2580 HeapRegion* hr = heap_region_containing_raw(p); | 2524 HeapRegion* hr = heap_region_containing_raw(p); |
2581 return hr->is_in(p); | 2525 return hr->is_in(p); |
2582 } else { | 2526 } else { |
2583 return _perm_gen->as_gen()->is_in(p); | 2527 return false; |
2584 } | 2528 } |
2585 } | 2529 } |
2586 | 2530 |
2587 // Iteration functions. | 2531 // Iteration functions. |
2588 | 2532 |
2589 // Iterates an OopClosure over all ref-containing fields of objects | 2533 // Iterates an OopClosure over all ref-containing fields of objects |
2590 // within a HeapRegion. | 2534 // within a HeapRegion. |
2591 | 2535 |
2592 class IterateOopClosureRegionClosure: public HeapRegionClosure { | 2536 class IterateOopClosureRegionClosure: public HeapRegionClosure { |
2593 MemRegion _mr; | 2537 MemRegion _mr; |
2594 OopClosure* _cl; | 2538 ExtendedOopClosure* _cl; |
2595 public: | 2539 public: |
2596 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | 2540 IterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl) |
2597 : _mr(mr), _cl(cl) {} | 2541 : _mr(mr), _cl(cl) {} |
2598 bool doHeapRegion(HeapRegion* r) { | 2542 bool doHeapRegion(HeapRegion* r) { |
2599 if (!r->continuesHumongous()) { | 2543 if (!r->continuesHumongous()) { |
2600 r->oop_iterate(_cl); | 2544 r->oop_iterate(_cl); |
2601 } | 2545 } |
2602 return false; | 2546 return false; |
2603 } | 2547 } |
2604 }; | 2548 }; |
2605 | 2549 |
2606 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { | 2550 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) { |
2607 IterateOopClosureRegionClosure blk(_g1_committed, cl); | 2551 IterateOopClosureRegionClosure blk(_g1_committed, cl); |
2608 heap_region_iterate(&blk); | 2552 heap_region_iterate(&blk); |
2609 if (do_perm) { | 2553 } |
2610 perm_gen()->oop_iterate(cl); | 2554 |
2611 } | 2555 void G1CollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) { |
2612 } | |
2613 | |
2614 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { | |
2615 IterateOopClosureRegionClosure blk(mr, cl); | 2556 IterateOopClosureRegionClosure blk(mr, cl); |
2616 heap_region_iterate(&blk); | 2557 heap_region_iterate(&blk); |
2617 if (do_perm) { | |
2618 perm_gen()->oop_iterate(cl); | |
2619 } | |
2620 } | 2558 } |
2621 | 2559 |
2622 // Iterates an ObjectClosure over all objects within a HeapRegion. | 2560 // Iterates an ObjectClosure over all objects within a HeapRegion. |
2623 | 2561 |
2624 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | 2562 class IterateObjectClosureRegionClosure: public HeapRegionClosure { |
2631 } | 2569 } |
2632 return false; | 2570 return false; |
2633 } | 2571 } |
2634 }; | 2572 }; |
2635 | 2573 |
2636 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { | 2574 void G1CollectedHeap::object_iterate(ObjectClosure* cl) { |
2637 IterateObjectClosureRegionClosure blk(cl); | 2575 IterateObjectClosureRegionClosure blk(cl); |
2638 heap_region_iterate(&blk); | 2576 heap_region_iterate(&blk); |
2639 if (do_perm) { | |
2640 perm_gen()->object_iterate(cl); | |
2641 } | |
2642 } | 2577 } |
2643 | 2578 |
2644 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | 2579 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { |
2645 // FIXME: is this right? | 2580 // FIXME: is this right? |
2646 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | 2581 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); |
2981 } | 2916 } |
2982 | 2917 |
2983 | 2918 |
2984 Space* G1CollectedHeap::space_containing(const void* addr) const { | 2919 Space* G1CollectedHeap::space_containing(const void* addr) const { |
2985 Space* res = heap_region_containing(addr); | 2920 Space* res = heap_region_containing(addr); |
2986 if (res == NULL) | |
2987 res = perm_gen()->space_containing(addr); | |
2988 return res; | 2921 return res; |
2989 } | 2922 } |
2990 | 2923 |
2991 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | 2924 HeapWord* G1CollectedHeap::block_start(const void* addr) const { |
2992 Space* sp = space_containing(addr); | 2925 Space* sp = space_containing(addr); |
3137 // since the last marking. | 3070 // since the last marking. |
3138 if (_vo == VerifyOption_G1UseMarkWord) { | 3071 if (_vo == VerifyOption_G1UseMarkWord) { |
3139 guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch"); | 3072 guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch"); |
3140 } | 3073 } |
3141 | 3074 |
3142 o->oop_iterate(&isLive); | 3075 o->oop_iterate_no_header(&isLive); |
3143 if (!_hr->obj_allocated_since_prev_marking(o)) { | 3076 if (!_hr->obj_allocated_since_prev_marking(o)) { |
3144 size_t obj_size = o->size(); // Make sure we don't overflow | 3077 size_t obj_size = o->size(); // Make sure we don't overflow |
3145 _live_bytes += (obj_size * HeapWordSize); | 3078 _live_bytes += (obj_size * HeapWordSize); |
3146 } | 3079 } |
3147 } | 3080 } |
3224 } | 3157 } |
3225 return false; // stop the region iteration if we hit a failure | 3158 return false; // stop the region iteration if we hit a failure |
3226 } | 3159 } |
3227 }; | 3160 }; |
3228 | 3161 |
3162 class YoungRefCounterClosure : public OopClosure { | |
3163 G1CollectedHeap* _g1h; | |
3164 int _count; | |
3165 public: | |
3166 YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {} | |
3167 void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } } | |
3168 void do_oop(narrowOop* p) { ShouldNotReachHere(); } | |
3169 | |
3170 int count() { return _count; } | |
3171 void reset_count() { _count = 0; }; | |
3172 }; | |
3173 | |
3174 class VerifyKlassClosure: public KlassClosure { | |
3175 YoungRefCounterClosure _young_ref_counter_closure; | |
3176 OopClosure *_oop_closure; | |
3177 public: | |
3178 VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {} | |
3179 void do_klass(Klass* k) { | |
3180 k->oops_do(_oop_closure); | |
3181 | |
3182 _young_ref_counter_closure.reset_count(); | |
3183 k->oops_do(&_young_ref_counter_closure); | |
3184 if (_young_ref_counter_closure.count() > 0) { | |
3185 guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k)); | |
3186 } | |
3187 } | |
3188 }; | |
3189 | |
3190 // TODO: VerifyRootsClosure extends OopsInGenClosure so that we can | |
3191 // pass it as the perm_blk to SharedHeap::process_strong_roots. | |
3192 // When process_strong_roots stop calling perm_blk->younger_refs_iterate | |
3193 // we can change this closure to extend the simpler OopClosure. | |
3229 class VerifyRootsClosure: public OopsInGenClosure { | 3194 class VerifyRootsClosure: public OopsInGenClosure { |
3230 private: | 3195 private: |
3231 G1CollectedHeap* _g1h; | 3196 G1CollectedHeap* _g1h; |
3232 VerifyOption _vo; | 3197 VerifyOption _vo; |
3233 bool _failures; | 3198 bool _failures; |
3301 } | 3266 } |
3302 | 3267 |
3303 void G1CollectedHeap::verify(bool silent, | 3268 void G1CollectedHeap::verify(bool silent, |
3304 VerifyOption vo) { | 3269 VerifyOption vo) { |
3305 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | 3270 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
3306 if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); } | 3271 if (!silent) { gclog_or_tty->print("Roots "); } |
3307 VerifyRootsClosure rootsCl(vo); | 3272 VerifyRootsClosure rootsCl(vo); |
3308 | 3273 |
3309 assert(Thread::current()->is_VM_thread(), | 3274 assert(Thread::current()->is_VM_thread(), |
3310 "Expected to be executed serially by the VM thread at this point"); | 3275 "Expected to be executed serially by the VM thread at this point"); |
3311 | 3276 |
3312 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); | 3277 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); |
3278 VerifyKlassClosure klassCl(this, &rootsCl); | |
3313 | 3279 |
3314 // We apply the relevant closures to all the oops in the | 3280 // We apply the relevant closures to all the oops in the |
3315 // system dictionary, the string table and the code cache. | 3281 // system dictionary, the string table and the code cache. |
3316 const int so = SO_AllClasses | SO_Strings | SO_CodeCache; | 3282 const int so = SO_AllClasses | SO_Strings | SO_CodeCache; |
3317 | 3283 |
3284 // Need cleared claim bits for the strong roots processing | |
3285 ClassLoaderDataGraph::clear_claimed_marks(); | |
3286 | |
3318 process_strong_roots(true, // activate StrongRootsScope | 3287 process_strong_roots(true, // activate StrongRootsScope |
3319 true, // we set "collecting perm gen" to true, | 3288 false, // we set "is scavenging" to false, |
3320 // so we don't reset the dirty cards in the perm gen. | 3289 // so we don't reset the dirty cards. |
3321 ScanningOption(so), // roots scanning options | 3290 ScanningOption(so), // roots scanning options |
3322 &rootsCl, | 3291 &rootsCl, |
3323 &blobsCl, | 3292 &blobsCl, |
3324 &rootsCl); | 3293 &klassCl |
3325 | 3294 ); |
3326 // If we're verifying after the marking phase of a Full GC then we can't | 3295 |
3327 // treat the perm gen as roots into the G1 heap. Some of the objects in | |
3328 // the perm gen may be dead and hence not marked. If one of these dead | |
3329 // objects is considered to be a root then we may end up with a false | |
3330 // "Root location <x> points to dead ob <y>" failure. | |
3331 if (vo != VerifyOption_G1UseMarkWord) { | |
3332 // Since we used "collecting_perm_gen" == true above, we will not have | |
3333 // checked the refs from perm into the G1-collected heap. We check those | |
3334 // references explicitly below. Whether the relevant cards are dirty | |
3335 // is checked further below in the rem set verification. | |
3336 if (!silent) { gclog_or_tty->print("Permgen roots "); } | |
3337 perm_gen()->oop_iterate(&rootsCl); | |
3338 } | |
3339 bool failures = rootsCl.failures(); | 3296 bool failures = rootsCl.failures(); |
3340 | 3297 |
3341 if (vo != VerifyOption_G1UseMarkWord) { | 3298 if (vo != VerifyOption_G1UseMarkWord) { |
3342 // If we're verifying during a full GC then the region sets | 3299 // If we're verifying during a full GC then the region sets |
3343 // will have been torn down at the start of the GC. Therefore | 3300 // will have been torn down at the start of the GC. Therefore |
3429 (size_t) young_regions * HeapRegion::GrainBytes / K); | 3386 (size_t) young_regions * HeapRegion::GrainBytes / K); |
3430 uint survivor_regions = g1_policy()->recorded_survivor_regions(); | 3387 uint survivor_regions = g1_policy()->recorded_survivor_regions(); |
3431 st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions, | 3388 st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions, |
3432 (size_t) survivor_regions * HeapRegion::GrainBytes / K); | 3389 (size_t) survivor_regions * HeapRegion::GrainBytes / K); |
3433 st->cr(); | 3390 st->cr(); |
3434 perm()->as_gen()->print_on(st); | 3391 MetaspaceAux::print_on(st); |
3435 } | 3392 } |
3436 | 3393 |
3437 void G1CollectedHeap::print_extended_on(outputStream* st) const { | 3394 void G1CollectedHeap::print_extended_on(outputStream* st) const { |
3438 print_on(st); | 3395 print_on(st); |
3439 | 3396 |
3454 } | 3411 } |
3455 _cmThread->print_on(st); | 3412 _cmThread->print_on(st); |
3456 st->cr(); | 3413 st->cr(); |
3457 _cm->print_worker_threads_on(st); | 3414 _cm->print_worker_threads_on(st); |
3458 _cg1r->print_worker_threads_on(st); | 3415 _cg1r->print_worker_threads_on(st); |
3459 st->cr(); | |
3460 } | 3416 } |
3461 | 3417 |
3462 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | 3418 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { |
3463 if (G1CollectedHeap::use_parallel_gc_threads()) { | 3419 if (G1CollectedHeap::use_parallel_gc_threads()) { |
3464 workers()->threads_do(tc); | 3420 workers()->threads_do(tc); |
3697 task_queue(i)->stats.reset(); | 3653 task_queue(i)->stats.reset(); |
3698 } | 3654 } |
3699 } | 3655 } |
3700 #endif // TASKQUEUE_STATS | 3656 #endif // TASKQUEUE_STATS |
3701 | 3657 |
3658 void G1CollectedHeap::log_gc_header() { | |
3659 if (!G1Log::fine()) { | |
3660 return; | |
3661 } | |
3662 | |
3663 gclog_or_tty->date_stamp(PrintGCDateStamps); | |
3664 gclog_or_tty->stamp(PrintGCTimeStamps); | |
3665 | |
3666 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause()) | |
3667 .append(g1_policy()->gcs_are_young() ? " (young)" : " (mixed)") | |
3668 .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : ""); | |
3669 | |
3670 gclog_or_tty->print("[%s", (const char*)gc_cause_str); | |
3671 } | |
3672 | |
3673 void G1CollectedHeap::log_gc_footer(double pause_time_sec) { | |
3674 if (!G1Log::fine()) { | |
3675 return; | |
3676 } | |
3677 | |
3678 if (G1Log::finer()) { | |
3679 if (evacuation_failed()) { | |
3680 gclog_or_tty->print(" (to-space exhausted)"); | |
3681 } | |
3682 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec); | |
3683 g1_policy()->phase_times()->note_gc_end(); | |
3684 g1_policy()->phase_times()->print(pause_time_sec); | |
3685 g1_policy()->print_detailed_heap_transition(); | |
3686 } else { | |
3687 if (evacuation_failed()) { | |
3688 gclog_or_tty->print("--"); | |
3689 } | |
3690 g1_policy()->print_heap_transition(); | |
3691 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec); | |
3692 } | |
3693 } | |
3694 | |
3702 bool | 3695 bool |
3703 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { | 3696 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { |
3704 assert_at_safepoint(true /* should_be_vm_thread */); | 3697 assert_at_safepoint(true /* should_be_vm_thread */); |
3705 guarantee(!is_gc_active(), "collection is not reentrant"); | 3698 guarantee(!is_gc_active(), "collection is not reentrant"); |
3706 | 3699 |
3739 if (g1_policy()->during_initial_mark_pause()) { | 3732 if (g1_policy()->during_initial_mark_pause()) { |
3740 // We are about to start a marking cycle, so we increment the | 3733 // We are about to start a marking cycle, so we increment the |
3741 // full collection counter. | 3734 // full collection counter. |
3742 increment_old_marking_cycles_started(); | 3735 increment_old_marking_cycles_started(); |
3743 } | 3736 } |
3744 // if the log level is "finer" is on, we'll print long statistics information | |
3745 // in the collector policy code, so let's not print this as the output | |
3746 // is messy if we do. | |
3747 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); | |
3748 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); | 3737 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); |
3749 | 3738 |
3750 int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? | 3739 int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? |
3751 workers()->active_workers() : 1); | 3740 workers()->active_workers() : 1); |
3752 double pause_start_sec = os::elapsedTime(); | 3741 double pause_start_sec = os::elapsedTime(); |
3753 g1_policy()->phase_times()->note_gc_start(active_workers); | 3742 g1_policy()->phase_times()->note_gc_start(active_workers); |
3754 bool initial_mark_gc = g1_policy()->during_initial_mark_pause(); | 3743 log_gc_header(); |
3755 | 3744 |
3756 TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); | 3745 TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); |
3757 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); | 3746 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); |
3758 | 3747 |
3759 // If the secondary_free_list is not empty, append it to the | 3748 // If the secondary_free_list is not empty, append it to the |
3847 #endif // YOUNG_LIST_VERBOSE | 3836 #endif // YOUNG_LIST_VERBOSE |
3848 | 3837 |
3849 if (g1_policy()->during_initial_mark_pause()) { | 3838 if (g1_policy()->during_initial_mark_pause()) { |
3850 concurrent_mark()->checkpointRootsInitialPre(); | 3839 concurrent_mark()->checkpointRootsInitialPre(); |
3851 } | 3840 } |
3852 perm_gen()->save_marks(); | |
3853 | 3841 |
3854 #if YOUNG_LIST_VERBOSE | 3842 #if YOUNG_LIST_VERBOSE |
3855 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); | 3843 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
3856 _young_list->print(); | 3844 _young_list->print(); |
3857 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); | 3845 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
4047 ParallelTaskTerminator::print_termination_counts(); | 4035 ParallelTaskTerminator::print_termination_counts(); |
4048 #endif | 4036 #endif |
4049 | 4037 |
4050 gc_epilogue(false); | 4038 gc_epilogue(false); |
4051 | 4039 |
4052 if (G1Log::fine()) { | 4040 log_gc_footer(os::elapsedTime() - pause_start_sec); |
4053 if (PrintGCTimeStamps) { | |
4054 gclog_or_tty->stamp(); | |
4055 gclog_or_tty->print(": "); | |
4056 } | |
4057 | |
4058 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause()) | |
4059 .append(g1_policy()->gcs_are_young() ? " (young)" : " (mixed)") | |
4060 .append(initial_mark_gc ? " (initial-mark)" : ""); | |
4061 | |
4062 double pause_time_sec = os::elapsedTime() - pause_start_sec; | |
4063 | |
4064 if (G1Log::finer()) { | |
4065 if (evacuation_failed()) { | |
4066 gc_cause_str.append(" (to-space exhausted)"); | |
4067 } | |
4068 gclog_or_tty->print_cr("[%s, %3.7f secs]", (const char*)gc_cause_str, pause_time_sec); | |
4069 g1_policy()->phase_times()->note_gc_end(); | |
4070 g1_policy()->phase_times()->print(pause_time_sec); | |
4071 g1_policy()->print_detailed_heap_transition(); | |
4072 } else { | |
4073 if (evacuation_failed()) { | |
4074 gc_cause_str.append("--"); | |
4075 } | |
4076 gclog_or_tty->print("[%s", (const char*)gc_cause_str); | |
4077 g1_policy()->print_heap_transition(); | |
4078 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec); | |
4079 } | |
4080 } | |
4081 } | 4041 } |
4082 | 4042 |
4083 // It is not yet to safe to tell the concurrent mark to | 4043 // It is not yet to safe to tell the concurrent mark to |
4084 // start as we have some optional output below. We don't want the | 4044 // start as we have some optional output below. We don't want the |
4085 // output from the concurrent mark thread interfering with this | 4045 // output from the concurrent mark thread interfering with this |
4190 _old_gc_alloc_region.set(retained_region); | 4150 _old_gc_alloc_region.set(retained_region); |
4191 _hr_printer.reuse(retained_region); | 4151 _hr_printer.reuse(retained_region); |
4192 } | 4152 } |
4193 } | 4153 } |
4194 | 4154 |
4195 void G1CollectedHeap::release_gc_alloc_regions() { | 4155 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) { |
4196 _survivor_gc_alloc_region.release(); | 4156 _survivor_gc_alloc_region.release(); |
4197 // If we have an old GC alloc region to release, we'll save it in | 4157 // If we have an old GC alloc region to release, we'll save it in |
4198 // _retained_old_gc_alloc_region. If we don't | 4158 // _retained_old_gc_alloc_region. If we don't |
4199 // _retained_old_gc_alloc_region will become NULL. This is what we | 4159 // _retained_old_gc_alloc_region will become NULL. This is what we |
4200 // want either way so no reason to check explicitly for either | 4160 // want either way so no reason to check explicitly for either |
4201 // condition. | 4161 // condition. |
4202 _retained_old_gc_alloc_region = _old_gc_alloc_region.release(); | 4162 _retained_old_gc_alloc_region = _old_gc_alloc_region.release(); |
4203 | 4163 |
4204 if (ResizePLAB) { | 4164 if (ResizePLAB) { |
4205 _survivor_plab_stats.adjust_desired_plab_sz(); | 4165 _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers); |
4206 _old_plab_stats.adjust_desired_plab_sz(); | 4166 _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers); |
4207 } | 4167 } |
4208 } | 4168 } |
4209 | 4169 |
4210 void G1CollectedHeap::abandon_gc_alloc_regions() { | 4170 void G1CollectedHeap::abandon_gc_alloc_regions() { |
4211 assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition"); | 4171 assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition"); |
4640 obj = forward_ptr; | 4600 obj = forward_ptr; |
4641 } | 4601 } |
4642 return obj; | 4602 return obj; |
4643 } | 4603 } |
4644 | 4604 |
4605 template <class T> | |
4606 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) { | |
4607 if (_g1->heap_region_containing_raw(new_obj)->is_young()) { | |
4608 _scanned_klass->record_modified_oops(); | |
4609 } | |
4610 } | |
4611 | |
4645 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> | 4612 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> |
4646 template <class T> | 4613 template <class T> |
4647 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> | 4614 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> |
4648 ::do_oop_work(T* p) { | 4615 ::do_oop_work(T* p) { |
4649 oop obj = oopDesc::load_decode_heap_oop(p); | 4616 oop obj = oopDesc::load_decode_heap_oop(p); |
4669 } | 4636 } |
4670 | 4637 |
4671 // When scanning the RS, we only care about objs in CS. | 4638 // When scanning the RS, we only care about objs in CS. |
4672 if (barrier == G1BarrierRS) { | 4639 if (barrier == G1BarrierRS) { |
4673 _par_scan_state->update_rs(_from, p, _worker_id); | 4640 _par_scan_state->update_rs(_from, p, _worker_id); |
4641 } else if (barrier == G1BarrierKlass) { | |
4642 do_klass_barrier(p, forwardee); | |
4674 } | 4643 } |
4675 } else { | 4644 } else { |
4676 // The object is not in collection set. If we're a root scanning | 4645 // The object is not in collection set. If we're a root scanning |
4677 // closure during an initial mark pause (i.e. do_mark_object will | 4646 // closure during an initial mark pause (i.e. do_mark_object will |
4678 // be true) then attempt to mark the object. | 4647 // be true) then attempt to mark the object. |
4797 } while (!offer_termination()); | 4766 } while (!offer_termination()); |
4798 | 4767 |
4799 pss->retire_alloc_buffers(); | 4768 pss->retire_alloc_buffers(); |
4800 } | 4769 } |
4801 | 4770 |
4771 class G1KlassScanClosure : public KlassClosure { | |
4772 G1ParCopyHelper* _closure; | |
4773 bool _process_only_dirty; | |
4774 int _count; | |
4775 public: | |
4776 G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty) | |
4777 : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {} | |
4778 void do_klass(Klass* klass) { | |
4779 // If the klass has not been dirtied we know that there's | |
4780 // no references into the young gen and we can skip it. | |
4781 if (!_process_only_dirty || klass->has_modified_oops()) { | |
4782 // Clean the klass since we're going to scavenge all the metadata. | |
4783 klass->clear_modified_oops(); | |
4784 | |
4785 // Tell the closure that this klass is the Klass to scavenge | |
4786 // and is the one to dirty if oops are left pointing into the young gen. | |
4787 _closure->set_scanned_klass(klass); | |
4788 | |
4789 klass->oops_do(_closure); | |
4790 | |
4791 _closure->set_scanned_klass(NULL); | |
4792 } | |
4793 _count++; | |
4794 } | |
4795 }; | |
4796 | |
4802 class G1ParTask : public AbstractGangTask { | 4797 class G1ParTask : public AbstractGangTask { |
4803 protected: | 4798 protected: |
4804 G1CollectedHeap* _g1h; | 4799 G1CollectedHeap* _g1h; |
4805 RefToScanQueueSet *_queues; | 4800 RefToScanQueueSet *_queues; |
4806 ParallelTaskTerminator _terminator; | 4801 ParallelTaskTerminator _terminator; |
4864 pss.set_evac_closure(&scan_evac_cl); | 4859 pss.set_evac_closure(&scan_evac_cl); |
4865 pss.set_evac_failure_closure(&evac_failure_cl); | 4860 pss.set_evac_failure_closure(&evac_failure_cl); |
4866 pss.set_partial_scan_closure(&partial_scan_cl); | 4861 pss.set_partial_scan_closure(&partial_scan_cl); |
4867 | 4862 |
4868 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp); | 4863 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp); |
4869 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss, rp); | 4864 G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp); |
4870 | 4865 |
4871 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp); | 4866 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp); |
4872 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss, rp); | 4867 G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp); |
4868 | |
4869 bool only_young = _g1h->g1_policy()->gcs_are_young(); | |
4870 G1KlassScanClosure scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false); | |
4871 G1KlassScanClosure only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young); | |
4873 | 4872 |
4874 OopClosure* scan_root_cl = &only_scan_root_cl; | 4873 OopClosure* scan_root_cl = &only_scan_root_cl; |
4875 OopsInHeapRegionClosure* scan_perm_cl = &only_scan_perm_cl; | 4874 G1KlassScanClosure* scan_klasses_cl = &only_scan_klasses_cl_s; |
4876 | 4875 |
4877 if (_g1h->g1_policy()->during_initial_mark_pause()) { | 4876 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
4878 // We also need to mark copied objects. | 4877 // We also need to mark copied objects. |
4879 scan_root_cl = &scan_mark_root_cl; | 4878 scan_root_cl = &scan_mark_root_cl; |
4880 scan_perm_cl = &scan_mark_perm_cl; | 4879 scan_klasses_cl = &scan_mark_klasses_cl_s; |
4881 } | 4880 } |
4882 | 4881 |
4883 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); | 4882 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); |
4884 | 4883 |
4884 int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; | |
4885 | |
4885 pss.start_strong_roots(); | 4886 pss.start_strong_roots(); |
4886 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | 4887 _g1h->g1_process_strong_roots(/* is scavenging */ true, |
4887 SharedHeap::SO_AllClasses, | 4888 SharedHeap::ScanningOption(so), |
4888 scan_root_cl, | 4889 scan_root_cl, |
4889 &push_heap_rs_cl, | 4890 &push_heap_rs_cl, |
4890 scan_perm_cl, | 4891 scan_klasses_cl, |
4891 worker_id); | 4892 worker_id); |
4892 pss.end_strong_roots(); | 4893 pss.end_strong_roots(); |
4893 | 4894 |
4894 { | 4895 { |
4895 double start = os::elapsedTime(); | 4896 double start = os::elapsedTime(); |
4985 | 4986 |
4986 // This method is run in a GC worker. | 4987 // This method is run in a GC worker. |
4987 | 4988 |
4988 void | 4989 void |
4989 G1CollectedHeap:: | 4990 G1CollectedHeap:: |
4990 g1_process_strong_roots(bool collecting_perm_gen, | 4991 g1_process_strong_roots(bool is_scavenging, |
4991 ScanningOption so, | 4992 ScanningOption so, |
4992 OopClosure* scan_non_heap_roots, | 4993 OopClosure* scan_non_heap_roots, |
4993 OopsInHeapRegionClosure* scan_rs, | 4994 OopsInHeapRegionClosure* scan_rs, |
4994 OopsInGenClosure* scan_perm, | 4995 G1KlassScanClosure* scan_klasses, |
4995 int worker_i) { | 4996 int worker_i) { |
4996 | 4997 |
4997 // First scan the strong roots, including the perm gen. | 4998 // First scan the strong roots |
4998 double ext_roots_start = os::elapsedTime(); | 4999 double ext_roots_start = os::elapsedTime(); |
4999 double closure_app_time_sec = 0.0; | 5000 double closure_app_time_sec = 0.0; |
5000 | 5001 |
5001 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | 5002 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); |
5002 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
5003 buf_scan_perm.set_generation(perm_gen()); | |
5004 | 5003 |
5005 // Walk the code cache w/o buffering, because StarTask cannot handle | 5004 // Walk the code cache w/o buffering, because StarTask cannot handle |
5006 // unaligned oop locations. | 5005 // unaligned oop locations. |
5007 G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots); | 5006 G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots); |
5008 | 5007 |
5009 process_strong_roots(false, // no scoping; this is parallel code | 5008 process_strong_roots(false, // no scoping; this is parallel code |
5010 collecting_perm_gen, so, | 5009 is_scavenging, so, |
5011 &buf_scan_non_heap_roots, | 5010 &buf_scan_non_heap_roots, |
5012 &eager_scan_code_roots, | 5011 &eager_scan_code_roots, |
5013 &buf_scan_perm); | 5012 scan_klasses |
5013 ); | |
5014 | 5014 |
5015 // Now the CM ref_processor roots. | 5015 // Now the CM ref_processor roots. |
5016 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | 5016 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { |
5017 // We need to treat the discovered reference lists of the | 5017 // We need to treat the discovered reference lists of the |
5018 // concurrent mark ref processor as roots and keep entries | 5018 // concurrent mark ref processor as roots and keep entries |
5021 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots); | 5021 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots); |
5022 } | 5022 } |
5023 | 5023 |
5024 // Finish up any enqueued closure apps (attributed as object copy time). | 5024 // Finish up any enqueued closure apps (attributed as object copy time). |
5025 buf_scan_non_heap_roots.done(); | 5025 buf_scan_non_heap_roots.done(); |
5026 buf_scan_perm.done(); | 5026 |
5027 | 5027 double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds(); |
5028 double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() + | 5028 |
5029 buf_scan_non_heap_roots.closure_app_seconds(); | |
5030 g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | 5029 g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); |
5031 | 5030 |
5032 double ext_root_time_ms = | 5031 double ext_root_time_ms = |
5033 ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0; | 5032 ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0; |
5034 | 5033 |
5051 | 5050 |
5052 // Now scan the complement of the collection set. | 5051 // Now scan the complement of the collection set. |
5053 if (scan_rs != NULL) { | 5052 if (scan_rs != NULL) { |
5054 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | 5053 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); |
5055 } | 5054 } |
5056 | |
5057 _process_strong_tasks->all_tasks_completed(); | 5055 _process_strong_tasks->all_tasks_completed(); |
5058 } | 5056 } |
5059 | 5057 |
5060 void | 5058 void |
5061 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | 5059 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, |
5111 // and different queues. | 5109 // and different queues. |
5112 | 5110 |
5113 class G1CopyingKeepAliveClosure: public OopClosure { | 5111 class G1CopyingKeepAliveClosure: public OopClosure { |
5114 G1CollectedHeap* _g1h; | 5112 G1CollectedHeap* _g1h; |
5115 OopClosure* _copy_non_heap_obj_cl; | 5113 OopClosure* _copy_non_heap_obj_cl; |
5116 OopsInHeapRegionClosure* _copy_perm_obj_cl; | 5114 OopsInHeapRegionClosure* _copy_metadata_obj_cl; |
5117 G1ParScanThreadState* _par_scan_state; | 5115 G1ParScanThreadState* _par_scan_state; |
5118 | 5116 |
5119 public: | 5117 public: |
5120 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h, | 5118 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h, |
5121 OopClosure* non_heap_obj_cl, | 5119 OopClosure* non_heap_obj_cl, |
5122 OopsInHeapRegionClosure* perm_obj_cl, | 5120 OopsInHeapRegionClosure* metadata_obj_cl, |
5123 G1ParScanThreadState* pss): | 5121 G1ParScanThreadState* pss): |
5124 _g1h(g1h), | 5122 _g1h(g1h), |
5125 _copy_non_heap_obj_cl(non_heap_obj_cl), | 5123 _copy_non_heap_obj_cl(non_heap_obj_cl), |
5126 _copy_perm_obj_cl(perm_obj_cl), | 5124 _copy_metadata_obj_cl(metadata_obj_cl), |
5127 _par_scan_state(pss) | 5125 _par_scan_state(pss) |
5128 {} | 5126 {} |
5129 | 5127 |
5130 virtual void do_oop(narrowOop* p) { do_oop_work(p); } | 5128 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
5131 virtual void do_oop( oop* p) { do_oop_work(p); } | 5129 virtual void do_oop( oop* p) { do_oop_work(p); } |
5146 // If the reference field is in the G1 heap then we can push | 5144 // If the reference field is in the G1 heap then we can push |
5147 // on the PSS queue. When the queue is drained (after each | 5145 // on the PSS queue. When the queue is drained (after each |
5148 // phase of reference processing) the object and it's followers | 5146 // phase of reference processing) the object and it's followers |
5149 // will be copied, the reference field set to point to the | 5147 // will be copied, the reference field set to point to the |
5150 // new location, and the RSet updated. Otherwise we need to | 5148 // new location, and the RSet updated. Otherwise we need to |
5151 // use the the non-heap or perm closures directly to copy | 5149 // use the the non-heap or metadata closures directly to copy |
5152 // the refernt object and update the pointer, while avoiding | 5150 // the refernt object and update the pointer, while avoiding |
5153 // updating the RSet. | 5151 // updating the RSet. |
5154 | 5152 |
5155 if (_g1h->is_in_g1_reserved(p)) { | 5153 if (_g1h->is_in_g1_reserved(p)) { |
5156 _par_scan_state->push_on_queue(p); | 5154 _par_scan_state->push_on_queue(p); |
5157 } else { | 5155 } else { |
5158 // The reference field is not in the G1 heap. | 5156 assert(!ClassLoaderDataGraph::contains((address)p), |
5159 if (_g1h->perm_gen()->is_in(p)) { | 5157 err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) " |
5160 _copy_perm_obj_cl->do_oop(p); | 5158 PTR_FORMAT, p)); |
5161 } else { | |
5162 _copy_non_heap_obj_cl->do_oop(p); | 5159 _copy_non_heap_obj_cl->do_oop(p); |
5163 } | 5160 } |
5164 } | 5161 } |
5165 } | 5162 } |
5166 } | |
5167 }; | 5163 }; |
5168 | 5164 |
5169 // Serial drain queue closure. Called as the 'complete_gc' | 5165 // Serial drain queue closure. Called as the 'complete_gc' |
5170 // closure for each discovered list in some of the | 5166 // closure for each discovered list in some of the |
5171 // reference processing phases. | 5167 // reference processing phases. |
5256 pss.set_evac_closure(&scan_evac_cl); | 5252 pss.set_evac_closure(&scan_evac_cl); |
5257 pss.set_evac_failure_closure(&evac_failure_cl); | 5253 pss.set_evac_failure_closure(&evac_failure_cl); |
5258 pss.set_partial_scan_closure(&partial_scan_cl); | 5254 pss.set_partial_scan_closure(&partial_scan_cl); |
5259 | 5255 |
5260 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); | 5256 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); |
5261 G1ParScanPermClosure only_copy_perm_cl(_g1h, &pss, NULL); | 5257 G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL); |
5262 | 5258 |
5263 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); | 5259 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); |
5264 G1ParScanAndMarkPermClosure copy_mark_perm_cl(_g1h, &pss, NULL); | 5260 G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL); |
5265 | 5261 |
5266 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; | 5262 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; |
5267 OopsInHeapRegionClosure* copy_perm_cl = &only_copy_perm_cl; | 5263 OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl; |
5268 | 5264 |
5269 if (_g1h->g1_policy()->during_initial_mark_pause()) { | 5265 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
5270 // We also need to mark copied objects. | 5266 // We also need to mark copied objects. |
5271 copy_non_heap_cl = ©_mark_non_heap_cl; | 5267 copy_non_heap_cl = ©_mark_non_heap_cl; |
5272 copy_perm_cl = ©_mark_perm_cl; | 5268 copy_metadata_cl = ©_mark_metadata_cl; |
5273 } | 5269 } |
5274 | 5270 |
5275 // Keep alive closure. | 5271 // Keep alive closure. |
5276 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss); | 5272 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss); |
5277 | 5273 |
5278 // Complete GC closure | 5274 // Complete GC closure |
5279 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator); | 5275 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator); |
5280 | 5276 |
5281 // Call the reference processing task's work routine. | 5277 // Call the reference processing task's work routine. |
5370 | 5366 |
5371 assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); | 5367 assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); |
5372 | 5368 |
5373 | 5369 |
5374 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); | 5370 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); |
5375 G1ParScanPermClosure only_copy_perm_cl(_g1h, &pss, NULL); | 5371 G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL); |
5376 | 5372 |
5377 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); | 5373 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); |
5378 G1ParScanAndMarkPermClosure copy_mark_perm_cl(_g1h, &pss, NULL); | 5374 G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL); |
5379 | 5375 |
5380 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; | 5376 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; |
5381 OopsInHeapRegionClosure* copy_perm_cl = &only_copy_perm_cl; | 5377 OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl; |
5382 | 5378 |
5383 if (_g1h->g1_policy()->during_initial_mark_pause()) { | 5379 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
5384 // We also need to mark copied objects. | 5380 // We also need to mark copied objects. |
5385 copy_non_heap_cl = ©_mark_non_heap_cl; | 5381 copy_non_heap_cl = ©_mark_non_heap_cl; |
5386 copy_perm_cl = ©_mark_perm_cl; | 5382 copy_metadata_cl = ©_mark_metadata_cl; |
5387 } | 5383 } |
5388 | 5384 |
5389 // Is alive closure | 5385 // Is alive closure |
5390 G1AlwaysAliveClosure always_alive(_g1h); | 5386 G1AlwaysAliveClosure always_alive(_g1h); |
5391 | 5387 |
5392 // Copying keep alive closure. Applied to referent objects that need | 5388 // Copying keep alive closure. Applied to referent objects that need |
5393 // to be copied. | 5389 // to be copied. |
5394 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss); | 5390 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss); |
5395 | 5391 |
5396 ReferenceProcessor* rp = _g1h->ref_processor_cm(); | 5392 ReferenceProcessor* rp = _g1h->ref_processor_cm(); |
5397 | 5393 |
5398 uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q(); | 5394 uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q(); |
5399 uint stride = MIN2(MAX2(_n_workers, 1U), limit); | 5395 uint stride = MIN2(MAX2(_n_workers, 1U), limit); |
5430 assert(pss.refs()->is_empty(), "should be"); | 5426 assert(pss.refs()->is_empty(), "should be"); |
5431 } | 5427 } |
5432 }; | 5428 }; |
5433 | 5429 |
5434 // Weak Reference processing during an evacuation pause (part 1). | 5430 // Weak Reference processing during an evacuation pause (part 1). |
5435 void G1CollectedHeap::process_discovered_references() { | 5431 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) { |
5436 double ref_proc_start = os::elapsedTime(); | 5432 double ref_proc_start = os::elapsedTime(); |
5437 | 5433 |
5438 ReferenceProcessor* rp = _ref_processor_stw; | 5434 ReferenceProcessor* rp = _ref_processor_stw; |
5439 assert(rp->discovery_enabled(), "should have been enabled"); | 5435 assert(rp->discovery_enabled(), "should have been enabled"); |
5440 | 5436 |
5457 // We also need to do this copying before we process the reference | 5453 // We also need to do this copying before we process the reference |
5458 // objects discovered by the STW ref processor in case one of these | 5454 // objects discovered by the STW ref processor in case one of these |
5459 // referents points to another object which is also referenced by an | 5455 // referents points to another object which is also referenced by an |
5460 // object discovered by the STW ref processor. | 5456 // object discovered by the STW ref processor. |
5461 | 5457 |
5462 uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? | |
5463 workers()->active_workers() : 1); | |
5464 | |
5465 assert(!G1CollectedHeap::use_parallel_gc_threads() || | 5458 assert(!G1CollectedHeap::use_parallel_gc_threads() || |
5466 active_workers == workers()->active_workers(), | 5459 no_of_gc_workers == workers()->active_workers(), |
5467 "Need to reset active_workers"); | 5460 "Need to reset active GC workers"); |
5468 | 5461 |
5469 set_par_threads(active_workers); | 5462 set_par_threads(no_of_gc_workers); |
5470 G1ParPreserveCMReferentsTask keep_cm_referents(this, active_workers, _task_queues); | 5463 G1ParPreserveCMReferentsTask keep_cm_referents(this, |
5464 no_of_gc_workers, | |
5465 _task_queues); | |
5471 | 5466 |
5472 if (G1CollectedHeap::use_parallel_gc_threads()) { | 5467 if (G1CollectedHeap::use_parallel_gc_threads()) { |
5473 workers()->run_task(&keep_cm_referents); | 5468 workers()->run_task(&keep_cm_referents); |
5474 } else { | 5469 } else { |
5475 keep_cm_referents.work(0); | 5470 keep_cm_referents.work(0); |
5500 pss.set_partial_scan_closure(&partial_scan_cl); | 5495 pss.set_partial_scan_closure(&partial_scan_cl); |
5501 | 5496 |
5502 assert(pss.refs()->is_empty(), "pre-condition"); | 5497 assert(pss.refs()->is_empty(), "pre-condition"); |
5503 | 5498 |
5504 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL); | 5499 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL); |
5505 G1ParScanPermClosure only_copy_perm_cl(this, &pss, NULL); | 5500 G1ParScanMetadataClosure only_copy_metadata_cl(this, &pss, NULL); |
5506 | 5501 |
5507 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL); | 5502 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL); |
5508 G1ParScanAndMarkPermClosure copy_mark_perm_cl(this, &pss, NULL); | 5503 G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL); |
5509 | 5504 |
5510 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; | 5505 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; |
5511 OopsInHeapRegionClosure* copy_perm_cl = &only_copy_perm_cl; | 5506 OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl; |
5512 | 5507 |
5513 if (_g1h->g1_policy()->during_initial_mark_pause()) { | 5508 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
5514 // We also need to mark copied objects. | 5509 // We also need to mark copied objects. |
5515 copy_non_heap_cl = ©_mark_non_heap_cl; | 5510 copy_non_heap_cl = ©_mark_non_heap_cl; |
5516 copy_perm_cl = ©_mark_perm_cl; | 5511 copy_metadata_cl = ©_mark_metadata_cl; |
5517 } | 5512 } |
5518 | 5513 |
5519 // Keep alive closure. | 5514 // Keep alive closure. |
5520 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_perm_cl, &pss); | 5515 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss); |
5521 | 5516 |
5522 // Serial Complete GC closure | 5517 // Serial Complete GC closure |
5523 G1STWDrainQueueClosure drain_queue(this, &pss); | 5518 G1STWDrainQueueClosure drain_queue(this, &pss); |
5524 | 5519 |
5525 // Setup the soft refs policy... | 5520 // Setup the soft refs policy... |
5531 &keep_alive, | 5526 &keep_alive, |
5532 &drain_queue, | 5527 &drain_queue, |
5533 NULL); | 5528 NULL); |
5534 } else { | 5529 } else { |
5535 // Parallel reference processing | 5530 // Parallel reference processing |
5536 assert(rp->num_q() == active_workers, "sanity"); | 5531 assert(rp->num_q() == no_of_gc_workers, "sanity"); |
5537 assert(active_workers <= rp->max_num_q(), "sanity"); | 5532 assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); |
5538 | 5533 |
5539 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers); | 5534 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers); |
5540 rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor); | 5535 rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor); |
5541 } | 5536 } |
5542 | 5537 |
5543 // We have completed copying any necessary live referent objects | 5538 // We have completed copying any necessary live referent objects |
5544 // (that were not copied during the actual pause) so we can | 5539 // (that were not copied during the actual pause) so we can |
5549 double ref_proc_time = os::elapsedTime() - ref_proc_start; | 5544 double ref_proc_time = os::elapsedTime() - ref_proc_start; |
5550 g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0); | 5545 g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0); |
5551 } | 5546 } |
5552 | 5547 |
5553 // Weak Reference processing during an evacuation pause (part 2). | 5548 // Weak Reference processing during an evacuation pause (part 2). |
5554 void G1CollectedHeap::enqueue_discovered_references() { | 5549 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) { |
5555 double ref_enq_start = os::elapsedTime(); | 5550 double ref_enq_start = os::elapsedTime(); |
5556 | 5551 |
5557 ReferenceProcessor* rp = _ref_processor_stw; | 5552 ReferenceProcessor* rp = _ref_processor_stw; |
5558 assert(!rp->discovery_enabled(), "should have been disabled as part of processing"); | 5553 assert(!rp->discovery_enabled(), "should have been disabled as part of processing"); |
5559 | 5554 |
5563 // Serial reference processing... | 5558 // Serial reference processing... |
5564 rp->enqueue_discovered_references(); | 5559 rp->enqueue_discovered_references(); |
5565 } else { | 5560 } else { |
5566 // Parallel reference enqueuing | 5561 // Parallel reference enqueuing |
5567 | 5562 |
5568 uint active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1); | 5563 assert(no_of_gc_workers == workers()->active_workers(), |
5569 assert(active_workers == workers()->active_workers(), | 5564 "Need to reset active workers"); |
5570 "Need to reset active_workers"); | 5565 assert(rp->num_q() == no_of_gc_workers, "sanity"); |
5571 assert(rp->num_q() == active_workers, "sanity"); | 5566 assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); |
5572 assert(active_workers <= rp->max_num_q(), "sanity"); | 5567 |
5573 | 5568 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers); |
5574 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers); | |
5575 rp->enqueue_discovered_references(&par_task_executor); | 5569 rp->enqueue_discovered_references(&par_task_executor); |
5576 } | 5570 } |
5577 | 5571 |
5578 rp->verify_no_references_recorded(); | 5572 rp->verify_no_references_recorded(); |
5579 assert(!rp->discovery_enabled(), "should have been disabled"); | 5573 assert(!rp->discovery_enabled(), "should have been disabled"); |
5661 // Process any discovered reference objects - we have | 5655 // Process any discovered reference objects - we have |
5662 // to do this _before_ we retire the GC alloc regions | 5656 // to do this _before_ we retire the GC alloc regions |
5663 // as we may have to copy some 'reachable' referent | 5657 // as we may have to copy some 'reachable' referent |
5664 // objects (and their reachable sub-graphs) that were | 5658 // objects (and their reachable sub-graphs) that were |
5665 // not copied during the pause. | 5659 // not copied during the pause. |
5666 process_discovered_references(); | 5660 process_discovered_references(n_workers); |
5667 | 5661 |
5668 // Weak root processing. | 5662 // Weak root processing. |
5669 // Note: when JSR 292 is enabled and code blobs can contain | 5663 // Note: when JSR 292 is enabled and code blobs can contain |
5670 // non-perm oops then we will need to process the code blobs | 5664 // non-perm oops then we will need to process the code blobs |
5671 // here too. | 5665 // here too. |
5673 G1STWIsAliveClosure is_alive(this); | 5667 G1STWIsAliveClosure is_alive(this); |
5674 G1KeepAliveClosure keep_alive(this); | 5668 G1KeepAliveClosure keep_alive(this); |
5675 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | 5669 JNIHandles::weak_oops_do(&is_alive, &keep_alive); |
5676 } | 5670 } |
5677 | 5671 |
5678 release_gc_alloc_regions(); | 5672 release_gc_alloc_regions(n_workers); |
5679 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); | 5673 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
5680 | 5674 |
5681 concurrent_g1_refine()->clear_hot_cache(); | 5675 concurrent_g1_refine()->clear_hot_cache(); |
5682 concurrent_g1_refine()->set_use_cache(true); | 5676 concurrent_g1_refine()->set_use_cache(true); |
5683 | 5677 |
5697 // this after the card table is cleaned (and verified) as | 5691 // this after the card table is cleaned (and verified) as |
5698 // the act of enqueuing entries on to the pending list | 5692 // the act of enqueuing entries on to the pending list |
5699 // will log these updates (and dirty their associated | 5693 // will log these updates (and dirty their associated |
5700 // cards). We need these updates logged to update any | 5694 // cards). We need these updates logged to update any |
5701 // RSets. | 5695 // RSets. |
5702 enqueue_discovered_references(); | 5696 enqueue_discovered_references(n_workers); |
5703 | 5697 |
5704 if (G1DeferredRSUpdate) { | 5698 if (G1DeferredRSUpdate) { |
5705 RedirtyLoggedCardTableEntryFastClosure redirty; | 5699 RedirtyLoggedCardTableEntryFastClosure redirty; |
5706 dirty_card_queue_set().set_closure(&redirty); | 5700 dirty_card_queue_set().set_closure(&redirty); |
5707 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); | 5701 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
6239 } | 6233 } |
6240 | 6234 |
6241 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | 6235 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { |
6242 HeapRegion* hr = heap_region_containing(p); | 6236 HeapRegion* hr = heap_region_containing(p); |
6243 if (hr == NULL) { | 6237 if (hr == NULL) { |
6244 return is_in_permanent(p); | 6238 return false; |
6245 } else { | 6239 } else { |
6246 return hr->is_in(p); | 6240 return hr->is_in(p); |
6247 } | 6241 } |
6248 } | 6242 } |
6249 | 6243 |
6417 HeapWord* bottom) { | 6411 HeapWord* bottom) { |
6418 HeapWord* end = bottom + HeapRegion::GrainWords; | 6412 HeapWord* end = bottom + HeapRegion::GrainWords; |
6419 MemRegion mr(bottom, end); | 6413 MemRegion mr(bottom, end); |
6420 assert(_g1_reserved.contains(mr), "invariant"); | 6414 assert(_g1_reserved.contains(mr), "invariant"); |
6421 // This might return NULL if the allocation fails | 6415 // This might return NULL if the allocation fails |
6422 return new HeapRegion(hrs_index, _bot_shared, mr, true /* is_zeroed */); | 6416 return new HeapRegion(hrs_index, _bot_shared, mr); |
6423 } | 6417 } |
6424 | 6418 |
6425 void G1CollectedHeap::verify_region_sets() { | 6419 void G1CollectedHeap::verify_region_sets() { |
6426 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); | 6420 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
6427 | 6421 |