comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 6725:da91efe96a93

6964458: Reimplement class meta-data storage to use native memory Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author coleenp
date Sat, 01 Sep 2012 13:25:18 -0400
parents c9814fadeb38
children 9646b7ff4d14 a7509aff1b06
comparison
equal deleted inserted replaced
6724:36d1d483d5d6 6725:da91efe96a93
461 bool G1CollectedHeap::is_scavengable(const void* p) { 461 bool G1CollectedHeap::is_scavengable(const void* p) {
462 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 462 G1CollectedHeap* g1h = G1CollectedHeap::heap();
463 G1CollectorPolicy* g1p = g1h->g1_policy(); 463 G1CollectorPolicy* g1p = g1h->g1_policy();
464 HeapRegion* hr = heap_region_containing(p); 464 HeapRegion* hr = heap_region_containing(p);
465 if (hr == NULL) { 465 if (hr == NULL) {
466 // perm gen (or null) 466 // null
467 assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
467 return false; 468 return false;
468 } else { 469 } else {
469 return !hr->isHumongous(); 470 return !hr->isHumongous();
470 } 471 }
471 } 472 }
1283 SvcGCMarker sgcm(SvcGCMarker::FULL); 1284 SvcGCMarker sgcm(SvcGCMarker::FULL);
1284 ResourceMark rm; 1285 ResourceMark rm;
1285 1286
1286 print_heap_before_gc(); 1287 print_heap_before_gc();
1287 1288
1289 size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
1290
1288 HRSPhaseSetter x(HRSPhaseFullGC); 1291 HRSPhaseSetter x(HRSPhaseFullGC);
1289 verify_region_sets_optional(); 1292 verify_region_sets_optional();
1290 1293
1291 const bool do_clear_all_soft_refs = clear_all_soft_refs || 1294 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1292 collector_policy()->should_clear_all_soft_refs(); 1295 collector_policy()->should_clear_all_soft_refs();
1399 1402
1400 verify_after_gc(); 1403 verify_after_gc();
1401 1404
1402 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); 1405 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1403 ref_processor_stw()->verify_no_references_recorded(); 1406 ref_processor_stw()->verify_no_references_recorded();
1407
1408 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1409 ClassLoaderDataGraph::purge();
1404 1410
1405 // Note: since we've just done a full GC, concurrent 1411 // Note: since we've just done a full GC, concurrent
1406 // marking is no longer active. Therefore we need not 1412 // marking is no longer active. Therefore we need not
1407 // re-enable reference discovery for the CM ref processor. 1413 // re-enable reference discovery for the CM ref processor.
1408 // That will be done at the start of the next marking cycle. 1414 // That will be done at the start of the next marking cycle.
1473 if (G1Log::fine()) { 1479 if (G1Log::fine()) {
1474 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); 1480 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
1475 } 1481 }
1476 1482
1477 if (true) { // FIXME 1483 if (true) { // FIXME
1478 // Ask the permanent generation to adjust size for full collections 1484 MetaspaceGC::compute_new_size();
1479 perm()->compute_new_size();
1480 } 1485 }
1481 1486
1482 // Start a new incremental collection set for the next pause 1487 // Start a new incremental collection set for the next pause
1483 assert(g1_policy()->collection_set() == NULL, "must be"); 1488 assert(g1_policy()->collection_set() == NULL, "must be");
1484 g1_policy()->start_incremental_cset_building(); 1489 g1_policy()->start_incremental_cset_building();
1988 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); 1993 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1989 1994
1990 _cg1r = new ConcurrentG1Refine(); 1995 _cg1r = new ConcurrentG1Refine();
1991 1996
1992 // Reserve the maximum. 1997 // Reserve the maximum.
1993 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
1994 // Includes the perm-gen.
1995 1998
1996 // When compressed oops are enabled, the preferred heap base 1999 // When compressed oops are enabled, the preferred heap base
1997 // is calculated by subtracting the requested size from the 2000 // is calculated by subtracting the requested size from the
1998 // 32Gb boundary and using the result as the base address for 2001 // 32Gb boundary and using the result as the base address for
1999 // heap reservation. If the requested size is not aligned to 2002 // heap reservation. If the requested size is not aligned to
2003 // address that was requested (i.e. the preferred heap base). 2006 // address that was requested (i.e. the preferred heap base).
2004 // If this happens then we could end up using a non-optimal 2007 // If this happens then we could end up using a non-optimal
2005 // compressed oops mode. 2008 // compressed oops mode.
2006 2009
2007 // Since max_byte_size is aligned to the size of a heap region (checked 2010 // Since max_byte_size is aligned to the size of a heap region (checked
2008 // above), we also need to align the perm gen size as it might not be. 2011 // above).
2009 const size_t total_reserved = max_byte_size + 2012 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
2010 align_size_up(pgs->max_size(), HeapRegion::GrainBytes); 2013
2011 Universe::check_alignment(total_reserved, HeapRegion::GrainBytes, "g1 heap and perm"); 2014 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
2012 2015 HeapRegion::GrainBytes);
2013 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
2014
2015 ReservedHeapSpace heap_rs(total_reserved, HeapRegion::GrainBytes,
2016 UseLargePages, addr);
2017
2018 if (UseCompressedOops) {
2019 if (addr != NULL && !heap_rs.is_reserved()) {
2020 // Failed to reserve at specified address - the requested memory
2021 // region is taken already, for example, by 'java' launcher.
2022 // Try again to reserver heap higher.
2023 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
2024
2025 ReservedHeapSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
2026 UseLargePages, addr);
2027
2028 if (addr != NULL && !heap_rs0.is_reserved()) {
2029 // Failed to reserve at specified address again - give up.
2030 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
2031 assert(addr == NULL, "");
2032
2033 ReservedHeapSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
2034 UseLargePages, addr);
2035 heap_rs = heap_rs1;
2036 } else {
2037 heap_rs = heap_rs0;
2038 }
2039 }
2040 }
2041
2042 if (!heap_rs.is_reserved()) {
2043 vm_exit_during_initialization("Could not reserve enough space for object heap");
2044 return JNI_ENOMEM;
2045 }
2046 2016
2047 // It is important to do this in a way such that concurrent readers can't 2017 // It is important to do this in a way such that concurrent readers can't
2048 // temporarily think somethings in the heap. (I've actually seen this 2018 // temporarily think somethings in the heap. (I've actually seen this
2049 // happen in asserts: DLD.) 2019 // happen in asserts: DLD.)
2050 _reserved.set_word_size(0); 2020 _reserved.set_word_size(0);
2074 // Carve out the G1 part of the heap. 2044 // Carve out the G1 part of the heap.
2075 2045
2076 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); 2046 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
2077 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), 2047 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
2078 g1_rs.size()/HeapWordSize); 2048 g1_rs.size()/HeapWordSize);
2079 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
2080
2081 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
2082 2049
2083 _g1_storage.initialize(g1_rs, 0); 2050 _g1_storage.initialize(g1_rs, 0);
2084 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); 2051 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
2085 _hrs.initialize((HeapWord*) _g1_reserved.start(), 2052 _hrs.initialize((HeapWord*) _g1_reserved.start(),
2086 (HeapWord*) _g1_reserved.end(), 2053 (HeapWord*) _g1_reserved.end(),
2490 // and it's waiting for a full GC to finish will be woken up. It is 2457 // and it's waiting for a full GC to finish will be woken up. It is
2491 // waiting in VM_G1IncCollectionPause::doit_epilogue(). 2458 // waiting in VM_G1IncCollectionPause::doit_epilogue().
2492 FullGCCount_lock->notify_all(); 2459 FullGCCount_lock->notify_all();
2493 } 2460 }
2494 2461
2495 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
2496 assert_at_safepoint(true /* should_be_vm_thread */);
2497 GCCauseSetter gcs(this, cause);
2498 switch (cause) {
2499 case GCCause::_heap_inspection:
2500 case GCCause::_heap_dump: {
2501 HandleMark hm;
2502 do_full_collection(false); // don't clear all soft refs
2503 break;
2504 }
2505 default: // XXX FIX ME
2506 ShouldNotReachHere(); // Unexpected use of this function
2507 }
2508 }
2509
2510 void G1CollectedHeap::collect(GCCause::Cause cause) { 2462 void G1CollectedHeap::collect(GCCause::Cause cause) {
2511 assert_heap_not_locked(); 2463 assert_heap_not_locked();
2512 2464
2513 unsigned int gc_count_before; 2465 unsigned int gc_count_before;
2514 unsigned int old_marking_count_before; 2466 unsigned int old_marking_count_before;
2578 // heap_region_containing_raw() should successfully 2530 // heap_region_containing_raw() should successfully
2579 // return the containing region. 2531 // return the containing region.
2580 HeapRegion* hr = heap_region_containing_raw(p); 2532 HeapRegion* hr = heap_region_containing_raw(p);
2581 return hr->is_in(p); 2533 return hr->is_in(p);
2582 } else { 2534 } else {
2583 return _perm_gen->as_gen()->is_in(p); 2535 return false;
2584 } 2536 }
2585 } 2537 }
2586 2538
2587 // Iteration functions. 2539 // Iteration functions.
2588 2540
2589 // Iterates an OopClosure over all ref-containing fields of objects 2541 // Iterates an OopClosure over all ref-containing fields of objects
2590 // within a HeapRegion. 2542 // within a HeapRegion.
2591 2543
2592 class IterateOopClosureRegionClosure: public HeapRegionClosure { 2544 class IterateOopClosureRegionClosure: public HeapRegionClosure {
2593 MemRegion _mr; 2545 MemRegion _mr;
2594 OopClosure* _cl; 2546 ExtendedOopClosure* _cl;
2595 public: 2547 public:
2596 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) 2548 IterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl)
2597 : _mr(mr), _cl(cl) {} 2549 : _mr(mr), _cl(cl) {}
2598 bool doHeapRegion(HeapRegion* r) { 2550 bool doHeapRegion(HeapRegion* r) {
2599 if (!r->continuesHumongous()) { 2551 if (!r->continuesHumongous()) {
2600 r->oop_iterate(_cl); 2552 r->oop_iterate(_cl);
2601 } 2553 }
2602 return false; 2554 return false;
2603 } 2555 }
2604 }; 2556 };
2605 2557
2606 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { 2558 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
2607 IterateOopClosureRegionClosure blk(_g1_committed, cl); 2559 IterateOopClosureRegionClosure blk(_g1_committed, cl);
2608 heap_region_iterate(&blk); 2560 heap_region_iterate(&blk);
2609 if (do_perm) { 2561 }
2610 perm_gen()->oop_iterate(cl); 2562
2611 } 2563 void G1CollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
2612 }
2613
2614 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
2615 IterateOopClosureRegionClosure blk(mr, cl); 2564 IterateOopClosureRegionClosure blk(mr, cl);
2616 heap_region_iterate(&blk); 2565 heap_region_iterate(&blk);
2617 if (do_perm) {
2618 perm_gen()->oop_iterate(cl);
2619 }
2620 } 2566 }
2621 2567
2622 // Iterates an ObjectClosure over all objects within a HeapRegion. 2568 // Iterates an ObjectClosure over all objects within a HeapRegion.
2623 2569
2624 class IterateObjectClosureRegionClosure: public HeapRegionClosure { 2570 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2631 } 2577 }
2632 return false; 2578 return false;
2633 } 2579 }
2634 }; 2580 };
2635 2581
2636 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { 2582 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2637 IterateObjectClosureRegionClosure blk(cl); 2583 IterateObjectClosureRegionClosure blk(cl);
2638 heap_region_iterate(&blk); 2584 heap_region_iterate(&blk);
2639 if (do_perm) {
2640 perm_gen()->object_iterate(cl);
2641 }
2642 } 2585 }
2643 2586
2644 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { 2587 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
2645 // FIXME: is this right? 2588 // FIXME: is this right?
2646 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); 2589 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
2981 } 2924 }
2982 2925
2983 2926
2984 Space* G1CollectedHeap::space_containing(const void* addr) const { 2927 Space* G1CollectedHeap::space_containing(const void* addr) const {
2985 Space* res = heap_region_containing(addr); 2928 Space* res = heap_region_containing(addr);
2986 if (res == NULL)
2987 res = perm_gen()->space_containing(addr);
2988 return res; 2929 return res;
2989 } 2930 }
2990 2931
2991 HeapWord* G1CollectedHeap::block_start(const void* addr) const { 2932 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2992 Space* sp = space_containing(addr); 2933 Space* sp = space_containing(addr);
3137 // since the last marking. 3078 // since the last marking.
3138 if (_vo == VerifyOption_G1UseMarkWord) { 3079 if (_vo == VerifyOption_G1UseMarkWord) {
3139 guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch"); 3080 guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
3140 } 3081 }
3141 3082
3142 o->oop_iterate(&isLive); 3083 o->oop_iterate_no_header(&isLive);
3143 if (!_hr->obj_allocated_since_prev_marking(o)) { 3084 if (!_hr->obj_allocated_since_prev_marking(o)) {
3144 size_t obj_size = o->size(); // Make sure we don't overflow 3085 size_t obj_size = o->size(); // Make sure we don't overflow
3145 _live_bytes += (obj_size * HeapWordSize); 3086 _live_bytes += (obj_size * HeapWordSize);
3146 } 3087 }
3147 } 3088 }
3224 } 3165 }
3225 return false; // stop the region iteration if we hit a failure 3166 return false; // stop the region iteration if we hit a failure
3226 } 3167 }
3227 }; 3168 };
3228 3169
3170 class YoungRefCounterClosure : public OopClosure {
3171 G1CollectedHeap* _g1h;
3172 int _count;
3173 public:
3174 YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
3175 void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } }
3176 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3177
3178 int count() { return _count; }
3179 void reset_count() { _count = 0; };
3180 };
3181
3182 class VerifyKlassClosure: public KlassClosure {
3183 YoungRefCounterClosure _young_ref_counter_closure;
3184 OopClosure *_oop_closure;
3185 public:
3186 VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
3187 void do_klass(Klass* k) {
3188 k->oops_do(_oop_closure);
3189
3190 _young_ref_counter_closure.reset_count();
3191 k->oops_do(&_young_ref_counter_closure);
3192 if (_young_ref_counter_closure.count() > 0) {
3193 guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
3194 }
3195 }
3196 };
3197
3198 // TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
3199 // pass it as the perm_blk to SharedHeap::process_strong_roots.
3200 // When process_strong_roots stop calling perm_blk->younger_refs_iterate
3201 // we can change this closure to extend the simpler OopClosure.
3229 class VerifyRootsClosure: public OopsInGenClosure { 3202 class VerifyRootsClosure: public OopsInGenClosure {
3230 private: 3203 private:
3231 G1CollectedHeap* _g1h; 3204 G1CollectedHeap* _g1h;
3232 VerifyOption _vo; 3205 VerifyOption _vo;
3233 bool _failures; 3206 bool _failures;
3301 } 3274 }
3302 3275
3303 void G1CollectedHeap::verify(bool silent, 3276 void G1CollectedHeap::verify(bool silent,
3304 VerifyOption vo) { 3277 VerifyOption vo) {
3305 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 3278 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
3306 if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); } 3279 if (!silent) { gclog_or_tty->print("Roots "); }
3307 VerifyRootsClosure rootsCl(vo); 3280 VerifyRootsClosure rootsCl(vo);
3308 3281
3309 assert(Thread::current()->is_VM_thread(), 3282 assert(Thread::current()->is_VM_thread(),
3310 "Expected to be executed serially by the VM thread at this point"); 3283 "Expected to be executed serially by the VM thread at this point");
3311 3284
3312 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); 3285 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
3286 VerifyKlassClosure klassCl(this, &rootsCl);
3313 3287
3314 // We apply the relevant closures to all the oops in the 3288 // We apply the relevant closures to all the oops in the
3315 // system dictionary, the string table and the code cache. 3289 // system dictionary, the string table and the code cache.
3316 const int so = SO_AllClasses | SO_Strings | SO_CodeCache; 3290 const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
3317 3291
3292 // Need cleared claim bits for the strong roots processing
3293 ClassLoaderDataGraph::clear_claimed_marks();
3294
3318 process_strong_roots(true, // activate StrongRootsScope 3295 process_strong_roots(true, // activate StrongRootsScope
3319 true, // we set "collecting perm gen" to true, 3296 false, // we set "is scavenging" to false,
3320 // so we don't reset the dirty cards in the perm gen. 3297 // so we don't reset the dirty cards.
3321 ScanningOption(so), // roots scanning options 3298 ScanningOption(so), // roots scanning options
3322 &rootsCl, 3299 &rootsCl,
3323 &blobsCl, 3300 &blobsCl,
3324 &rootsCl); 3301 &klassCl
3325 3302 );
3326 // If we're verifying after the marking phase of a Full GC then we can't 3303
3327 // treat the perm gen as roots into the G1 heap. Some of the objects in
3328 // the perm gen may be dead and hence not marked. If one of these dead
3329 // objects is considered to be a root then we may end up with a false
3330 // "Root location <x> points to dead ob <y>" failure.
3331 if (vo != VerifyOption_G1UseMarkWord) {
3332 // Since we used "collecting_perm_gen" == true above, we will not have
3333 // checked the refs from perm into the G1-collected heap. We check those
3334 // references explicitly below. Whether the relevant cards are dirty
3335 // is checked further below in the rem set verification.
3336 if (!silent) { gclog_or_tty->print("Permgen roots "); }
3337 perm_gen()->oop_iterate(&rootsCl);
3338 }
3339 bool failures = rootsCl.failures(); 3304 bool failures = rootsCl.failures();
3340 3305
3341 if (vo != VerifyOption_G1UseMarkWord) { 3306 if (vo != VerifyOption_G1UseMarkWord) {
3342 // If we're verifying during a full GC then the region sets 3307 // If we're verifying during a full GC then the region sets
3343 // will have been torn down at the start of the GC. Therefore 3308 // will have been torn down at the start of the GC. Therefore
3429 (size_t) young_regions * HeapRegion::GrainBytes / K); 3394 (size_t) young_regions * HeapRegion::GrainBytes / K);
3430 uint survivor_regions = g1_policy()->recorded_survivor_regions(); 3395 uint survivor_regions = g1_policy()->recorded_survivor_regions();
3431 st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions, 3396 st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3432 (size_t) survivor_regions * HeapRegion::GrainBytes / K); 3397 (size_t) survivor_regions * HeapRegion::GrainBytes / K);
3433 st->cr(); 3398 st->cr();
3434 perm()->as_gen()->print_on(st);
3435 } 3399 }
3436 3400
3437 void G1CollectedHeap::print_extended_on(outputStream* st) const { 3401 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3438 print_on(st); 3402 print_on(st);
3439 3403
3847 #endif // YOUNG_LIST_VERBOSE 3811 #endif // YOUNG_LIST_VERBOSE
3848 3812
3849 if (g1_policy()->during_initial_mark_pause()) { 3813 if (g1_policy()->during_initial_mark_pause()) {
3850 concurrent_mark()->checkpointRootsInitialPre(); 3814 concurrent_mark()->checkpointRootsInitialPre();
3851 } 3815 }
3852 perm_gen()->save_marks();
3853 3816
3854 #if YOUNG_LIST_VERBOSE 3817 #if YOUNG_LIST_VERBOSE
3855 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); 3818 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
3856 _young_list->print(); 3819 _young_list->print();
3857 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); 3820 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4640 obj = forward_ptr; 4603 obj = forward_ptr;
4641 } 4604 }
4642 return obj; 4605 return obj;
4643 } 4606 }
4644 4607
4608 template <class T>
4609 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4610 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4611 _scanned_klass->record_modified_oops();
4612 }
4613 }
4614
4645 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> 4615 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4646 template <class T> 4616 template <class T>
4647 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> 4617 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4648 ::do_oop_work(T* p) { 4618 ::do_oop_work(T* p) {
4649 oop obj = oopDesc::load_decode_heap_oop(p); 4619 oop obj = oopDesc::load_decode_heap_oop(p);
4669 } 4639 }
4670 4640
4671 // When scanning the RS, we only care about objs in CS. 4641 // When scanning the RS, we only care about objs in CS.
4672 if (barrier == G1BarrierRS) { 4642 if (barrier == G1BarrierRS) {
4673 _par_scan_state->update_rs(_from, p, _worker_id); 4643 _par_scan_state->update_rs(_from, p, _worker_id);
4644 } else if (barrier == G1BarrierKlass) {
4645 do_klass_barrier(p, forwardee);
4674 } 4646 }
4675 } else { 4647 } else {
4676 // The object is not in collection set. If we're a root scanning 4648 // The object is not in collection set. If we're a root scanning
4677 // closure during an initial mark pause (i.e. do_mark_object will 4649 // closure during an initial mark pause (i.e. do_mark_object will
4678 // be true) then attempt to mark the object. 4650 // be true) then attempt to mark the object.
4797 } while (!offer_termination()); 4769 } while (!offer_termination());
4798 4770
4799 pss->retire_alloc_buffers(); 4771 pss->retire_alloc_buffers();
4800 } 4772 }
4801 4773
4774 class G1KlassScanClosure : public KlassClosure {
4775 G1ParCopyHelper* _closure;
4776 bool _process_only_dirty;
4777 int _count;
4778 public:
4779 G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
4780 : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4781 void do_klass(Klass* klass) {
4782 // If the klass has not been dirtied we know that there's
4783 // no references into the young gen and we can skip it.
4784 if (!_process_only_dirty || klass->has_modified_oops()) {
4785 // Clean the klass since we're going to scavenge all the metadata.
4786 klass->clear_modified_oops();
4787
4788 // Tell the closure that this klass is the Klass to scavenge
4789 // and is the one to dirty if oops are left pointing into the young gen.
4790 _closure->set_scanned_klass(klass);
4791
4792 klass->oops_do(_closure);
4793
4794 _closure->set_scanned_klass(NULL);
4795 }
4796 _count++;
4797 }
4798 };
4799
4802 class G1ParTask : public AbstractGangTask { 4800 class G1ParTask : public AbstractGangTask {
4803 protected: 4801 protected:
4804 G1CollectedHeap* _g1h; 4802 G1CollectedHeap* _g1h;
4805 RefToScanQueueSet *_queues; 4803 RefToScanQueueSet *_queues;
4806 ParallelTaskTerminator _terminator; 4804 ParallelTaskTerminator _terminator;
4864 pss.set_evac_closure(&scan_evac_cl); 4862 pss.set_evac_closure(&scan_evac_cl);
4865 pss.set_evac_failure_closure(&evac_failure_cl); 4863 pss.set_evac_failure_closure(&evac_failure_cl);
4866 pss.set_partial_scan_closure(&partial_scan_cl); 4864 pss.set_partial_scan_closure(&partial_scan_cl);
4867 4865
4868 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp); 4866 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp);
4869 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss, rp); 4867 G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp);
4870 4868
4871 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp); 4869 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
4872 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss, rp); 4870 G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
4871
4872 bool only_young = _g1h->g1_policy()->gcs_are_young();
4873 G1KlassScanClosure scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
4874 G1KlassScanClosure only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
4873 4875
4874 OopClosure* scan_root_cl = &only_scan_root_cl; 4876 OopClosure* scan_root_cl = &only_scan_root_cl;
4875 OopsInHeapRegionClosure* scan_perm_cl = &only_scan_perm_cl; 4877 G1KlassScanClosure* scan_klasses_cl = &only_scan_klasses_cl_s;
4876 4878
4877 if (_g1h->g1_policy()->during_initial_mark_pause()) { 4879 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4878 // We also need to mark copied objects. 4880 // We also need to mark copied objects.
4879 scan_root_cl = &scan_mark_root_cl; 4881 scan_root_cl = &scan_mark_root_cl;
4880 scan_perm_cl = &scan_mark_perm_cl; 4882 scan_klasses_cl = &scan_mark_klasses_cl_s;
4881 } 4883 }
4882 4884
4883 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); 4885 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4884 4886
4887 int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
4888
4885 pss.start_strong_roots(); 4889 pss.start_strong_roots();
4886 _g1h->g1_process_strong_roots(/* not collecting perm */ false, 4890 _g1h->g1_process_strong_roots(/* is scavenging */ true,
4887 SharedHeap::SO_AllClasses, 4891 SharedHeap::ScanningOption(so),
4888 scan_root_cl, 4892 scan_root_cl,
4889 &push_heap_rs_cl, 4893 &push_heap_rs_cl,
4890 scan_perm_cl, 4894 scan_klasses_cl,
4891 worker_id); 4895 worker_id);
4892 pss.end_strong_roots(); 4896 pss.end_strong_roots();
4893 4897
4894 { 4898 {
4895 double start = os::elapsedTime(); 4899 double start = os::elapsedTime();
4985 4989
4986 // This method is run in a GC worker. 4990 // This method is run in a GC worker.
4987 4991
4988 void 4992 void
4989 G1CollectedHeap:: 4993 G1CollectedHeap::
4990 g1_process_strong_roots(bool collecting_perm_gen, 4994 g1_process_strong_roots(bool is_scavenging,
4991 ScanningOption so, 4995 ScanningOption so,
4992 OopClosure* scan_non_heap_roots, 4996 OopClosure* scan_non_heap_roots,
4993 OopsInHeapRegionClosure* scan_rs, 4997 OopsInHeapRegionClosure* scan_rs,
4994 OopsInGenClosure* scan_perm, 4998 G1KlassScanClosure* scan_klasses,
4995 int worker_i) { 4999 int worker_i) {
4996 5000
4997 // First scan the strong roots, including the perm gen. 5001 // First scan the strong roots
4998 double ext_roots_start = os::elapsedTime(); 5002 double ext_roots_start = os::elapsedTime();
4999 double closure_app_time_sec = 0.0; 5003 double closure_app_time_sec = 0.0;
5000 5004
5001 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); 5005 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
5002 BufferingOopsInGenClosure buf_scan_perm(scan_perm);
5003 buf_scan_perm.set_generation(perm_gen());
5004 5006
5005 // Walk the code cache w/o buffering, because StarTask cannot handle 5007 // Walk the code cache w/o buffering, because StarTask cannot handle
5006 // unaligned oop locations. 5008 // unaligned oop locations.
5007 G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots); 5009 G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots);
5008 5010
5009 process_strong_roots(false, // no scoping; this is parallel code 5011 process_strong_roots(false, // no scoping; this is parallel code
5010 collecting_perm_gen, so, 5012 is_scavenging, so,
5011 &buf_scan_non_heap_roots, 5013 &buf_scan_non_heap_roots,
5012 &eager_scan_code_roots, 5014 &eager_scan_code_roots,
5013 &buf_scan_perm); 5015 scan_klasses
5016 );
5014 5017
5015 // Now the CM ref_processor roots. 5018 // Now the CM ref_processor roots.
5016 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { 5019 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
5017 // We need to treat the discovered reference lists of the 5020 // We need to treat the discovered reference lists of the
5018 // concurrent mark ref processor as roots and keep entries 5021 // concurrent mark ref processor as roots and keep entries
5021 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots); 5024 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
5022 } 5025 }
5023 5026
5024 // Finish up any enqueued closure apps (attributed as object copy time). 5027 // Finish up any enqueued closure apps (attributed as object copy time).
5025 buf_scan_non_heap_roots.done(); 5028 buf_scan_non_heap_roots.done();
5026 buf_scan_perm.done(); 5029
5027 5030 double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds();
5028 double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() + 5031
5029 buf_scan_non_heap_roots.closure_app_seconds();
5030 g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); 5032 g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
5031 5033
5032 double ext_root_time_ms = 5034 double ext_root_time_ms =
5033 ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0; 5035 ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
5034 5036
5051 5053
5052 // Now scan the complement of the collection set. 5054 // Now scan the complement of the collection set.
5053 if (scan_rs != NULL) { 5055 if (scan_rs != NULL) {
5054 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); 5056 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
5055 } 5057 }
5056
5057 _process_strong_tasks->all_tasks_completed(); 5058 _process_strong_tasks->all_tasks_completed();
5058 } 5059 }
5059 5060
5060 void 5061 void
5061 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, 5062 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
5111 // and different queues. 5112 // and different queues.
5112 5113
5113 class G1CopyingKeepAliveClosure: public OopClosure { 5114 class G1CopyingKeepAliveClosure: public OopClosure {
5114 G1CollectedHeap* _g1h; 5115 G1CollectedHeap* _g1h;
5115 OopClosure* _copy_non_heap_obj_cl; 5116 OopClosure* _copy_non_heap_obj_cl;
5116 OopsInHeapRegionClosure* _copy_perm_obj_cl; 5117 OopsInHeapRegionClosure* _copy_metadata_obj_cl;
5117 G1ParScanThreadState* _par_scan_state; 5118 G1ParScanThreadState* _par_scan_state;
5118 5119
5119 public: 5120 public:
5120 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h, 5121 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5121 OopClosure* non_heap_obj_cl, 5122 OopClosure* non_heap_obj_cl,
5122 OopsInHeapRegionClosure* perm_obj_cl, 5123 OopsInHeapRegionClosure* metadata_obj_cl,
5123 G1ParScanThreadState* pss): 5124 G1ParScanThreadState* pss):
5124 _g1h(g1h), 5125 _g1h(g1h),
5125 _copy_non_heap_obj_cl(non_heap_obj_cl), 5126 _copy_non_heap_obj_cl(non_heap_obj_cl),
5126 _copy_perm_obj_cl(perm_obj_cl), 5127 _copy_metadata_obj_cl(metadata_obj_cl),
5127 _par_scan_state(pss) 5128 _par_scan_state(pss)
5128 {} 5129 {}
5129 5130
5130 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 5131 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
5131 virtual void do_oop( oop* p) { do_oop_work(p); } 5132 virtual void do_oop( oop* p) { do_oop_work(p); }
5146 // If the reference field is in the G1 heap then we can push 5147 // If the reference field is in the G1 heap then we can push
5147 // on the PSS queue. When the queue is drained (after each 5148 // on the PSS queue. When the queue is drained (after each
5148 // phase of reference processing) the object and it's followers 5149 // phase of reference processing) the object and it's followers
5149 // will be copied, the reference field set to point to the 5150 // will be copied, the reference field set to point to the
5150 // new location, and the RSet updated. Otherwise we need to 5151 // new location, and the RSet updated. Otherwise we need to
5151 // use the the non-heap or perm closures directly to copy 5152 // use the the non-heap or metadata closures directly to copy
5152 // the refernt object and update the pointer, while avoiding 5153 // the refernt object and update the pointer, while avoiding
5153 // updating the RSet. 5154 // updating the RSet.
5154 5155
5155 if (_g1h->is_in_g1_reserved(p)) { 5156 if (_g1h->is_in_g1_reserved(p)) {
5156 _par_scan_state->push_on_queue(p); 5157 _par_scan_state->push_on_queue(p);
5157 } else { 5158 } else {
5158 // The reference field is not in the G1 heap. 5159 assert(!ClassLoaderDataGraph::contains((address)p),
5159 if (_g1h->perm_gen()->is_in(p)) { 5160 err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
5160 _copy_perm_obj_cl->do_oop(p); 5161 PTR_FORMAT, p));
5161 } else {
5162 _copy_non_heap_obj_cl->do_oop(p); 5162 _copy_non_heap_obj_cl->do_oop(p);
5163 } 5163 }
5164 } 5164 }
5165 } 5165 }
5166 }
5167 }; 5166 };
5168 5167
5169 // Serial drain queue closure. Called as the 'complete_gc' 5168 // Serial drain queue closure. Called as the 'complete_gc'
5170 // closure for each discovered list in some of the 5169 // closure for each discovered list in some of the
5171 // reference processing phases. 5170 // reference processing phases.
5256 pss.set_evac_closure(&scan_evac_cl); 5255 pss.set_evac_closure(&scan_evac_cl);
5257 pss.set_evac_failure_closure(&evac_failure_cl); 5256 pss.set_evac_failure_closure(&evac_failure_cl);
5258 pss.set_partial_scan_closure(&partial_scan_cl); 5257 pss.set_partial_scan_closure(&partial_scan_cl);
5259 5258
5260 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); 5259 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5261 G1ParScanPermClosure only_copy_perm_cl(_g1h, &pss, NULL); 5260 G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL);
5262 5261
5263 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); 5262 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5264 G1ParScanAndMarkPermClosure copy_mark_perm_cl(_g1h, &pss, NULL); 5263 G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5265 5264
5266 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; 5265 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5267 OopsInHeapRegionClosure* copy_perm_cl = &only_copy_perm_cl; 5266 OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl;
5268 5267
5269 if (_g1h->g1_policy()->during_initial_mark_pause()) { 5268 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5270 // We also need to mark copied objects. 5269 // We also need to mark copied objects.
5271 copy_non_heap_cl = &copy_mark_non_heap_cl; 5270 copy_non_heap_cl = &copy_mark_non_heap_cl;
5272 copy_perm_cl = &copy_mark_perm_cl; 5271 copy_metadata_cl = &copy_mark_metadata_cl;
5273 } 5272 }
5274 5273
5275 // Keep alive closure. 5274 // Keep alive closure.
5276 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss); 5275 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
5277 5276
5278 // Complete GC closure 5277 // Complete GC closure
5279 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator); 5278 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5280 5279
5281 // Call the reference processing task's work routine. 5280 // Call the reference processing task's work routine.
5370 5369
5371 assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); 5370 assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5372 5371
5373 5372
5374 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); 5373 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5375 G1ParScanPermClosure only_copy_perm_cl(_g1h, &pss, NULL); 5374 G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL);
5376 5375
5377 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); 5376 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5378 G1ParScanAndMarkPermClosure copy_mark_perm_cl(_g1h, &pss, NULL); 5377 G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5379 5378
5380 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; 5379 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5381 OopsInHeapRegionClosure* copy_perm_cl = &only_copy_perm_cl; 5380 OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl;
5382 5381
5383 if (_g1h->g1_policy()->during_initial_mark_pause()) { 5382 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5384 // We also need to mark copied objects. 5383 // We also need to mark copied objects.
5385 copy_non_heap_cl = &copy_mark_non_heap_cl; 5384 copy_non_heap_cl = &copy_mark_non_heap_cl;
5386 copy_perm_cl = &copy_mark_perm_cl; 5385 copy_metadata_cl = &copy_mark_metadata_cl;
5387 } 5386 }
5388 5387
5389 // Is alive closure 5388 // Is alive closure
5390 G1AlwaysAliveClosure always_alive(_g1h); 5389 G1AlwaysAliveClosure always_alive(_g1h);
5391 5390
5392 // Copying keep alive closure. Applied to referent objects that need 5391 // Copying keep alive closure. Applied to referent objects that need
5393 // to be copied. 5392 // to be copied.
5394 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss); 5393 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
5395 5394
5396 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 5395 ReferenceProcessor* rp = _g1h->ref_processor_cm();
5397 5396
5398 uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q(); 5397 uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5399 uint stride = MIN2(MAX2(_n_workers, 1U), limit); 5398 uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5500 pss.set_partial_scan_closure(&partial_scan_cl); 5499 pss.set_partial_scan_closure(&partial_scan_cl);
5501 5500
5502 assert(pss.refs()->is_empty(), "pre-condition"); 5501 assert(pss.refs()->is_empty(), "pre-condition");
5503 5502
5504 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL); 5503 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
5505 G1ParScanPermClosure only_copy_perm_cl(this, &pss, NULL); 5504 G1ParScanMetadataClosure only_copy_metadata_cl(this, &pss, NULL);
5506 5505
5507 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL); 5506 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5508 G1ParScanAndMarkPermClosure copy_mark_perm_cl(this, &pss, NULL); 5507 G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
5509 5508
5510 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; 5509 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5511 OopsInHeapRegionClosure* copy_perm_cl = &only_copy_perm_cl; 5510 OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl;
5512 5511
5513 if (_g1h->g1_policy()->during_initial_mark_pause()) { 5512 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5514 // We also need to mark copied objects. 5513 // We also need to mark copied objects.
5515 copy_non_heap_cl = &copy_mark_non_heap_cl; 5514 copy_non_heap_cl = &copy_mark_non_heap_cl;
5516 copy_perm_cl = &copy_mark_perm_cl; 5515 copy_metadata_cl = &copy_mark_metadata_cl;
5517 } 5516 }
5518 5517
5519 // Keep alive closure. 5518 // Keep alive closure.
5520 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_perm_cl, &pss); 5519 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
5521 5520
5522 // Serial Complete GC closure 5521 // Serial Complete GC closure
5523 G1STWDrainQueueClosure drain_queue(this, &pss); 5522 G1STWDrainQueueClosure drain_queue(this, &pss);
5524 5523
5525 // Setup the soft refs policy... 5524 // Setup the soft refs policy...
6239 } 6238 }
6240 6239
6241 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { 6240 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6242 HeapRegion* hr = heap_region_containing(p); 6241 HeapRegion* hr = heap_region_containing(p);
6243 if (hr == NULL) { 6242 if (hr == NULL) {
6244 return is_in_permanent(p); 6243 return false;
6245 } else { 6244 } else {
6246 return hr->is_in(p); 6245 return hr->is_in(p);
6247 } 6246 }
6248 } 6247 }
6249 6248