comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 14909:4ca6dc0799b6

Backout jdk9 merge
author Gilles Duboscq <duboscq@ssw.jku.at>
date Tue, 01 Apr 2014 13:57:07 +0200
parents d8041d695d19
children 52b4284cb496
comparison
equal deleted inserted replaced
14908:8db6e76cb658 14909:4ca6dc0799b6
1 /* 1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
48 #include "gc_implementation/shared/gcTimer.hpp" 48 #include "gc_implementation/shared/gcTimer.hpp"
49 #include "gc_implementation/shared/gcTrace.hpp" 49 #include "gc_implementation/shared/gcTrace.hpp"
50 #include "gc_implementation/shared/gcTraceTime.hpp" 50 #include "gc_implementation/shared/gcTraceTime.hpp"
51 #include "gc_implementation/shared/isGCActiveMark.hpp" 51 #include "gc_implementation/shared/isGCActiveMark.hpp"
52 #include "memory/gcLocker.inline.hpp" 52 #include "memory/gcLocker.inline.hpp"
53 #include "memory/genOopClosures.inline.hpp"
53 #include "memory/generationSpec.hpp" 54 #include "memory/generationSpec.hpp"
54 #include "memory/iterator.hpp"
55 #include "memory/referenceProcessor.hpp" 55 #include "memory/referenceProcessor.hpp"
56 #include "oops/oop.inline.hpp" 56 #include "oops/oop.inline.hpp"
57 #include "oops/oop.pcgc.inline.hpp" 57 #include "oops/oop.pcgc.inline.hpp"
58 #include "runtime/vmThread.hpp" 58 #include "runtime/vmThread.hpp"
59 #include "utilities/ticks.hpp" 59 #include "utilities/ticks.hpp"
1573 1573
1574 // This code is mostly copied from TenuredGeneration. 1574 // This code is mostly copied from TenuredGeneration.
1575 void 1575 void
1576 G1CollectedHeap:: 1576 G1CollectedHeap::
1577 resize_if_necessary_after_full_collection(size_t word_size) { 1577 resize_if_necessary_after_full_collection(size_t word_size) {
1578 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
1579
1578 // Include the current allocation, if any, and bytes that will be 1580 // Include the current allocation, if any, and bytes that will be
1579 // pre-allocated to support collections, as "used". 1581 // pre-allocated to support collections, as "used".
1580 const size_t used_after_gc = used(); 1582 const size_t used_after_gc = used();
1581 const size_t capacity_after_gc = capacity(); 1583 const size_t capacity_after_gc = capacity();
1582 const size_t free_after_gc = capacity_after_gc - used_after_gc; 1584 const size_t free_after_gc = capacity_after_gc - used_after_gc;
2264 &_is_alive_closure_stw, 2266 &_is_alive_closure_stw,
2265 // is alive closure 2267 // is alive closure
2266 // (for efficiency/performance) 2268 // (for efficiency/performance)
2267 false); 2269 false);
2268 // Setting next fields of discovered 2270 // Setting next fields of discovered
2269 // lists does not require a barrier. 2271 // lists requires a barrier.
2270 } 2272 }
2271 2273
2272 size_t G1CollectedHeap::capacity() const { 2274 size_t G1CollectedHeap::capacity() const {
2273 return _g1_committed.byte_size(); 2275 return _g1_committed.byte_size();
2274 } 2276 }
2370 2372
2371 size_t G1CollectedHeap::recalculate_used() const { 2373 size_t G1CollectedHeap::recalculate_used() const {
2372 SumUsedClosure blk; 2374 SumUsedClosure blk;
2373 heap_region_iterate(&blk); 2375 heap_region_iterate(&blk);
2374 return blk.result(); 2376 return blk.result();
2377 }
2378
2379 size_t G1CollectedHeap::unsafe_max_alloc() {
2380 if (free_regions() > 0) return HeapRegion::GrainBytes;
2381 // otherwise, is there space in the current allocation region?
2382
2383 // We need to store the current allocation region in a local variable
2384 // here. The problem is that this method doesn't take any locks and
2385 // there may be other threads which overwrite the current allocation
2386 // region field. attempt_allocation(), for example, sets it to NULL
2387 // and this can happen *after* the NULL check here but before the call
2388 // to free(), resulting in a SIGSEGV. Note that this doesn't appear
2389 // to be a problem in the optimized build, since the two loads of the
2390 // current allocation region field are optimized away.
2391 HeapRegion* hr = _mutator_alloc_region.get();
2392 if (hr == NULL) {
2393 return 0;
2394 }
2395 return hr->free();
2375 } 2396 }
2376 2397
2377 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { 2398 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2378 switch (cause) { 2399 switch (cause) {
2379 case GCCause::_gc_locker: return GCLockerInvokesConcurrent; 2400 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
3002 bool G1CollectedHeap::supports_tlab_allocation() const { 3023 bool G1CollectedHeap::supports_tlab_allocation() const {
3003 return true; 3024 return true;
3004 } 3025 }
3005 3026
3006 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { 3027 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
3007 return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes; 3028 return HeapRegion::GrainBytes;
3008 }
3009
3010 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
3011 return young_list()->eden_used_bytes();
3012 }
3013
3014 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
3015 // must be smaller than the humongous object limit.
3016 size_t G1CollectedHeap::max_tlab_size() const {
3017 return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
3018 } 3029 }
3019 3030
3020 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { 3031 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
3021 // Return the remaining space in the cur alloc region, but not less than 3032 // Return the remaining space in the cur alloc region, but not less than
3022 // the min TLAB size. 3033 // the min TLAB size.
3024 // Also, this value can be at most the humongous object threshold, 3035 // Also, this value can be at most the humongous object threshold,
3025 // since we can't allow tlabs to grow big enough to accommodate 3036 // since we can't allow tlabs to grow big enough to accommodate
3026 // humongous objects. 3037 // humongous objects.
3027 3038
3028 HeapRegion* hr = _mutator_alloc_region.get(); 3039 HeapRegion* hr = _mutator_alloc_region.get();
3029 size_t max_tlab = max_tlab_size() * wordSize; 3040 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
3030 if (hr == NULL) { 3041 if (hr == NULL) {
3031 return max_tlab; 3042 return max_tlab_size;
3032 } else { 3043 } else {
3033 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab); 3044 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
3034 } 3045 }
3035 } 3046 }
3036 3047
3037 size_t G1CollectedHeap::max_capacity() const { 3048 size_t G1CollectedHeap::max_capacity() const {
3038 return _g1_reserved.byte_size(); 3049 return _g1_reserved.byte_size();
3093 default: ShouldNotReachHere(); 3104 default: ShouldNotReachHere();
3094 } 3105 }
3095 return NULL; // keep some compilers happy 3106 return NULL; // keep some compilers happy
3096 } 3107 }
3097 3108
3098 class VerifyRootsClosure: public OopClosure { 3109 // TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
3110 // pass it as the perm_blk to SharedHeap::process_strong_roots.
3111 // When process_strong_roots stop calling perm_blk->younger_refs_iterate
3112 // we can change this closure to extend the simpler OopClosure.
3113 class VerifyRootsClosure: public OopsInGenClosure {
3099 private: 3114 private:
3100 G1CollectedHeap* _g1h; 3115 G1CollectedHeap* _g1h;
3101 VerifyOption _vo; 3116 VerifyOption _vo;
3102 bool _failures; 3117 bool _failures;
3103 public: 3118 public:
3129 3144
3130 void do_oop(oop* p) { do_oop_nv(p); } 3145 void do_oop(oop* p) { do_oop_nv(p); }
3131 void do_oop(narrowOop* p) { do_oop_nv(p); } 3146 void do_oop(narrowOop* p) { do_oop_nv(p); }
3132 }; 3147 };
3133 3148
3134 class G1VerifyCodeRootOopClosure: public OopClosure { 3149 class G1VerifyCodeRootOopClosure: public OopsInGenClosure {
3135 G1CollectedHeap* _g1h; 3150 G1CollectedHeap* _g1h;
3136 OopClosure* _root_cl; 3151 OopClosure* _root_cl;
3137 nmethod* _nm; 3152 nmethod* _nm;
3138 VerifyOption _vo; 3153 VerifyOption _vo;
3139 bool _failures; 3154 bool _failures;
3402 assert(Thread::current()->is_VM_thread(), 3417 assert(Thread::current()->is_VM_thread(),
3403 "Expected to be executed serially by the VM thread at this point"); 3418 "Expected to be executed serially by the VM thread at this point");
3404 3419
3405 if (!silent) { gclog_or_tty->print("Roots "); } 3420 if (!silent) { gclog_or_tty->print("Roots "); }
3406 VerifyRootsClosure rootsCl(vo); 3421 VerifyRootsClosure rootsCl(vo);
3422 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3423 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3407 VerifyKlassClosure klassCl(this, &rootsCl); 3424 VerifyKlassClosure klassCl(this, &rootsCl);
3408 3425
3409 // We apply the relevant closures to all the oops in the 3426 // We apply the relevant closures to all the oops in the
3410 // system dictionary, class loader data graph and the string table. 3427 // system dictionary, the string table and the code cache.
3411 // Don't verify the code cache here, since it's verified below. 3428 const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
3412 const int so = SO_AllClasses | SO_Strings;
3413 3429
3414 // Need cleared claim bits for the strong roots processing 3430 // Need cleared claim bits for the strong roots processing
3415 ClassLoaderDataGraph::clear_claimed_marks(); 3431 ClassLoaderDataGraph::clear_claimed_marks();
3416 3432
3417 process_strong_roots(true, // activate StrongRootsScope 3433 process_strong_roots(true, // activate StrongRootsScope
3434 false, // we set "is scavenging" to false,
3435 // so we don't reset the dirty cards.
3418 ScanningOption(so), // roots scanning options 3436 ScanningOption(so), // roots scanning options
3419 &rootsCl, 3437 &rootsCl,
3438 &blobsCl,
3420 &klassCl 3439 &klassCl
3421 ); 3440 );
3422
3423 // Verify the nmethods in the code cache.
3424 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3425 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3426 CodeCache::blobs_do(&blobsCl);
3427 3441
3428 bool failures = rootsCl.failures() || codeRootsCl.failures(); 3442 bool failures = rootsCl.failures() || codeRootsCl.failures();
3429 3443
3430 if (vo != VerifyOption_G1UseMarkWord) { 3444 if (vo != VerifyOption_G1UseMarkWord) {
3431 // If we're verifying during a full GC then the region sets 3445 // If we're verifying during a full GC then the region sets
3668 3682
3669 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { 3683 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3670 // always_do_update_barrier = false; 3684 // always_do_update_barrier = false;
3671 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); 3685 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3672 // Fill TLAB's and such 3686 // Fill TLAB's and such
3673 accumulate_statistics_all_tlabs();
3674 ensure_parsability(true); 3687 ensure_parsability(true);
3675 3688
3676 if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) && 3689 if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3677 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { 3690 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3678 g1_rem_set()->print_periodic_summary_info("Before GC RS summary"); 3691 g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3692 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" 3705 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3693 // is set. 3706 // is set.
3694 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), 3707 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3695 "derived pointer present")); 3708 "derived pointer present"));
3696 // always_do_update_barrier = true; 3709 // always_do_update_barrier = true;
3697
3698 resize_all_tlabs();
3699 3710
3700 // We have just completed a GC. Update the soft reference 3711 // We have just completed a GC. Update the soft reference
3701 // policy with the new heap occupancy 3712 // policy with the new heap occupancy
3702 Universe::update_heap_info_at_gc(); 3713 Universe::update_heap_info_at_gc();
3703 } 3714 }
4542 } 4553 }
4543 4554
4544 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) : 4555 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4545 ParGCAllocBuffer(gclab_word_size), _retired(false) { } 4556 ParGCAllocBuffer(gclab_word_size), _retired(false) { }
4546 4557
4547 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp) 4558 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
4548 : _g1h(g1h), 4559 : _g1h(g1h),
4549 _refs(g1h->task_queue(queue_num)), 4560 _refs(g1h->task_queue(queue_num)),
4550 _dcq(&g1h->dirty_card_queue_set()), 4561 _dcq(&g1h->dirty_card_queue_set()),
4551 _ct_bs(g1h->g1_barrier_set()), 4562 _ct_bs(g1h->g1_barrier_set()),
4552 _g1_rem(g1h->g1_rem_set()), 4563 _g1_rem(g1h->g1_rem_set()),
4553 _hash_seed(17), _queue_num(queue_num), 4564 _hash_seed(17), _queue_num(queue_num),
4554 _term_attempts(0), 4565 _term_attempts(0),
4555 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), 4566 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4556 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), 4567 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4557 _age_table(false), _scanner(g1h, this, rp), 4568 _age_table(false),
4558 _strong_roots_time(0), _term_time(0), 4569 _strong_roots_time(0), _term_time(0),
4559 _alloc_buffer_waste(0), _undo_waste(0) { 4570 _alloc_buffer_waste(0), _undo_waste(0) {
4560 // we allocate G1YoungSurvRateNumRegions plus one entries, since 4571 // we allocate G1YoungSurvRateNumRegions plus one entries, since
4561 // we "sacrifice" entry 0 to keep track of surviving bytes for 4572 // we "sacrifice" entry 0 to keep track of surviving bytes for
4562 // non-young regions (where the age is -1) 4573 // non-young regions (where the age is -1)
4661 } while (!refs()->is_empty()); 4672 } while (!refs()->is_empty());
4662 } 4673 }
4663 4674
4664 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, 4675 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4665 G1ParScanThreadState* par_scan_state) : 4676 G1ParScanThreadState* par_scan_state) :
4666 _g1(g1), _par_scan_state(par_scan_state), 4677 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
4667 _worker_id(par_scan_state->queue_num()) { } 4678 _par_scan_state(par_scan_state),
4668 4679 _worker_id(par_scan_state->queue_num()),
4669 void G1ParCopyHelper::mark_object(oop obj) { 4680 _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
4681 _mark_in_progress(_g1->mark_in_progress()) { }
4682
4683 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4684 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop obj) {
4670 #ifdef ASSERT 4685 #ifdef ASSERT
4671 HeapRegion* hr = _g1->heap_region_containing(obj); 4686 HeapRegion* hr = _g1->heap_region_containing(obj);
4672 assert(hr != NULL, "sanity"); 4687 assert(hr != NULL, "sanity");
4673 assert(!hr->in_collection_set(), "should not mark objects in the CSet"); 4688 assert(!hr->in_collection_set(), "should not mark objects in the CSet");
4674 #endif // ASSERT 4689 #endif // ASSERT
4675 4690
4676 // We know that the object is not moving so it's safe to read its size. 4691 // We know that the object is not moving so it's safe to read its size.
4677 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id); 4692 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4678 } 4693 }
4679 4694
4680 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) { 4695 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4696 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4697 ::mark_forwarded_object(oop from_obj, oop to_obj) {
4681 #ifdef ASSERT 4698 #ifdef ASSERT
4682 assert(from_obj->is_forwarded(), "from obj should be forwarded"); 4699 assert(from_obj->is_forwarded(), "from obj should be forwarded");
4683 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee"); 4700 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4684 assert(from_obj != to_obj, "should not be self-forwarded"); 4701 assert(from_obj != to_obj, "should not be self-forwarded");
4685 4702
4697 // well-formed. So we have to read its size from its from-space 4714 // well-formed. So we have to read its size from its from-space
4698 // image which we know should not be changing. 4715 // image which we know should not be changing.
4699 _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id); 4716 _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4700 } 4717 }
4701 4718
4702 oop G1ParScanThreadState::copy_to_survivor_space(oop const old) { 4719 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4720 oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4721 ::copy_to_survivor_space(oop old) {
4703 size_t word_sz = old->size(); 4722 size_t word_sz = old->size();
4704 HeapRegion* from_region = _g1h->heap_region_containing_raw(old); 4723 HeapRegion* from_region = _g1->heap_region_containing_raw(old);
4705 // +1 to make the -1 indexes valid... 4724 // +1 to make the -1 indexes valid...
4706 int young_index = from_region->young_index_in_cset()+1; 4725 int young_index = from_region->young_index_in_cset()+1;
4707 assert( (from_region->is_young() && young_index > 0) || 4726 assert( (from_region->is_young() && young_index > 0) ||
4708 (!from_region->is_young() && young_index == 0), "invariant" ); 4727 (!from_region->is_young() && young_index == 0), "invariant" );
4709 G1CollectorPolicy* g1p = _g1h->g1_policy(); 4728 G1CollectorPolicy* g1p = _g1->g1_policy();
4710 markOop m = old->mark(); 4729 markOop m = old->mark();
4711 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() 4730 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4712 : m->age(); 4731 : m->age();
4713 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, 4732 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4714 word_sz); 4733 word_sz);
4715 HeapWord* obj_ptr = allocate(alloc_purpose, word_sz); 4734 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
4716 #ifndef PRODUCT 4735 #ifndef PRODUCT
4717 // Should this evacuation fail? 4736 // Should this evacuation fail?
4718 if (_g1h->evacuation_should_fail()) { 4737 if (_g1->evacuation_should_fail()) {
4719 if (obj_ptr != NULL) { 4738 if (obj_ptr != NULL) {
4720 undo_allocation(alloc_purpose, obj_ptr, word_sz); 4739 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4721 obj_ptr = NULL; 4740 obj_ptr = NULL;
4722 } 4741 }
4723 } 4742 }
4724 #endif // !PRODUCT 4743 #endif // !PRODUCT
4725 4744
4726 if (obj_ptr == NULL) { 4745 if (obj_ptr == NULL) {
4727 // This will either forward-to-self, or detect that someone else has 4746 // This will either forward-to-self, or detect that someone else has
4728 // installed a forwarding pointer. 4747 // installed a forwarding pointer.
4729 return _g1h->handle_evacuation_failure_par(this, old); 4748 return _g1->handle_evacuation_failure_par(_par_scan_state, old);
4730 } 4749 }
4731 4750
4732 oop obj = oop(obj_ptr); 4751 oop obj = oop(obj_ptr);
4733 4752
4734 // We're going to allocate linearly, so might as well prefetch ahead. 4753 // We're going to allocate linearly, so might as well prefetch ahead.
4757 obj->incr_age(); 4776 obj->incr_age();
4758 } else { 4777 } else {
4759 m = m->incr_age(); 4778 m = m->incr_age();
4760 obj->set_mark(m); 4779 obj->set_mark(m);
4761 } 4780 }
4762 age_table()->add(obj, word_sz); 4781 _par_scan_state->age_table()->add(obj, word_sz);
4763 } else { 4782 } else {
4764 obj->set_mark(m); 4783 obj->set_mark(m);
4765 } 4784 }
4766 4785
4767 size_t* surv_young_words = surviving_young_words(); 4786 size_t* surv_young_words = _par_scan_state->surviving_young_words();
4768 surv_young_words[young_index] += word_sz; 4787 surv_young_words[young_index] += word_sz;
4769 4788
4770 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 4789 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4771 // We keep track of the next start index in the length field of 4790 // We keep track of the next start index in the length field of
4772 // the to-space object. The actual length can be found in the 4791 // the to-space object. The actual length can be found in the
4773 // length field of the from-space object. 4792 // length field of the from-space object.
4774 arrayOop(obj)->set_length(0); 4793 arrayOop(obj)->set_length(0);
4775 oop* old_p = set_partial_array_mask(old); 4794 oop* old_p = set_partial_array_mask(old);
4776 push_on_queue(old_p); 4795 _par_scan_state->push_on_queue(old_p);
4777 } else { 4796 } else {
4778 // No point in using the slower heap_region_containing() method, 4797 // No point in using the slower heap_region_containing() method,
4779 // given that we know obj is in the heap. 4798 // given that we know obj is in the heap.
4780 _scanner.set_region(_g1h->heap_region_containing_raw(obj)); 4799 _scanner.set_region(_g1->heap_region_containing_raw(obj));
4781 obj->oop_iterate_backwards(&_scanner); 4800 obj->oop_iterate_backwards(&_scanner);
4782 } 4801 }
4783 } else { 4802 } else {
4784 undo_allocation(alloc_purpose, obj_ptr, word_sz); 4803 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4785 obj = forward_ptr; 4804 obj = forward_ptr;
4786 } 4805 }
4787 return obj; 4806 return obj;
4788 } 4807 }
4789 4808
4792 if (_g1->heap_region_containing_raw(new_obj)->is_young()) { 4811 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4793 _scanned_klass->record_modified_oops(); 4812 _scanned_klass->record_modified_oops();
4794 } 4813 }
4795 } 4814 }
4796 4815
4797 template <G1Barrier barrier, bool do_mark_object> 4816 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4798 template <class T> 4817 template <class T>
4799 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) { 4818 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4800 T heap_oop = oopDesc::load_heap_oop(p); 4819 ::do_oop_work(T* p) {
4801 4820 oop obj = oopDesc::load_decode_heap_oop(p);
4802 if (oopDesc::is_null(heap_oop)) { 4821 assert(barrier != G1BarrierRS || obj != NULL,
4803 return; 4822 "Precondition: G1BarrierRS implies obj is non-NULL");
4804 }
4805
4806 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4807 4823
4808 assert(_worker_id == _par_scan_state->queue_num(), "sanity"); 4824 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4809 4825
4826 // here the null check is implicit in the cset_fast_test() test
4810 if (_g1->in_cset_fast_test(obj)) { 4827 if (_g1->in_cset_fast_test(obj)) {
4811 oop forwardee; 4828 oop forwardee;
4812 if (obj->is_forwarded()) { 4829 if (obj->is_forwarded()) {
4813 forwardee = obj->forwardee(); 4830 forwardee = obj->forwardee();
4814 } else { 4831 } else {
4815 forwardee = _par_scan_state->copy_to_survivor_space(obj); 4832 forwardee = copy_to_survivor_space(obj);
4816 } 4833 }
4817 assert(forwardee != NULL, "forwardee should not be NULL"); 4834 assert(forwardee != NULL, "forwardee should not be NULL");
4818 oopDesc::encode_store_heap_oop(p, forwardee); 4835 oopDesc::encode_store_heap_oop(p, forwardee);
4819 if (do_mark_object && forwardee != obj) { 4836 if (do_mark_object && forwardee != obj) {
4820 // If the object is self-forwarded we don't need to explicitly 4837 // If the object is self-forwarded we don't need to explicitly
4821 // mark it, the evacuation failure protocol will do so. 4838 // mark it, the evacuation failure protocol will do so.
4822 mark_forwarded_object(obj, forwardee); 4839 mark_forwarded_object(obj, forwardee);
4823 } 4840 }
4824 4841
4825 if (barrier == G1BarrierKlass) { 4842 // When scanning the RS, we only care about objs in CS.
4843 if (barrier == G1BarrierRS) {
4844 _par_scan_state->update_rs(_from, p, _worker_id);
4845 } else if (barrier == G1BarrierKlass) {
4826 do_klass_barrier(p, forwardee); 4846 do_klass_barrier(p, forwardee);
4827 } 4847 }
4828 } else { 4848 } else {
4829 // The object is not in collection set. If we're a root scanning 4849 // The object is not in collection set. If we're a root scanning
4830 // closure during an initial mark pause (i.e. do_mark_object will 4850 // closure during an initial mark pause (i.e. do_mark_object will
4831 // be true) then attempt to mark the object. 4851 // be true) then attempt to mark the object.
4832 if (do_mark_object) { 4852 if (do_mark_object && _g1->is_in_g1_reserved(obj)) {
4833 mark_object(obj); 4853 mark_object(obj);
4834 } 4854 }
4835 } 4855 }
4836 4856
4837 if (barrier == G1BarrierEvac) { 4857 if (barrier == G1BarrierEvac && obj != NULL) {
4838 _par_scan_state->update_rs(_from, p, _worker_id); 4858 _par_scan_state->update_rs(_from, p, _worker_id);
4839 } 4859 }
4840 } 4860
4841 4861 if (do_gen_barrier && obj != NULL) {
4842 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p); 4862 par_do_barrier(p);
4843 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p); 4863 }
4864 }
4865
4866 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
4867 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
4844 4868
4845 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { 4869 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
4846 assert(has_partial_array_mask(p), "invariant"); 4870 assert(has_partial_array_mask(p), "invariant");
4847 oop from_obj = clear_partial_array_mask(p); 4871 oop from_obj = clear_partial_array_mask(p);
4848 4872
5029 ResourceMark rm; 5053 ResourceMark rm;
5030 HandleMark hm; 5054 HandleMark hm;
5031 5055
5032 ReferenceProcessor* rp = _g1h->ref_processor_stw(); 5056 ReferenceProcessor* rp = _g1h->ref_processor_stw();
5033 5057
5034 G1ParScanThreadState pss(_g1h, worker_id, rp); 5058 G1ParScanThreadState pss(_g1h, worker_id);
5035 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp); 5059 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp);
5036 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp); 5060 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
5037 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp); 5061 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp);
5038 5062
5039 pss.set_evac_closure(&scan_evac_cl); 5063 pss.set_evac_closure(&scan_evac_cl);
5122 double ext_roots_start = os::elapsedTime(); 5146 double ext_roots_start = os::elapsedTime();
5123 double closure_app_time_sec = 0.0; 5147 double closure_app_time_sec = 0.0;
5124 5148
5125 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); 5149 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
5126 5150
5151 assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow");
5152 // Walk the code cache/strong code roots w/o buffering, because StarTask
5153 // cannot handle unaligned oop locations.
5154 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
5155
5127 process_strong_roots(false, // no scoping; this is parallel code 5156 process_strong_roots(false, // no scoping; this is parallel code
5128 so, 5157 is_scavenging, so,
5129 &buf_scan_non_heap_roots, 5158 &buf_scan_non_heap_roots,
5159 &eager_scan_code_roots,
5130 scan_klasses 5160 scan_klasses
5131 ); 5161 );
5132 5162
5133 // Now the CM ref_processor roots. 5163 // Now the CM ref_processor roots.
5134 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { 5164 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
5170 // the entire code cache, we need to mark the oops in the 5200 // the entire code cache, we need to mark the oops in the
5171 // strong code root lists for the regions that are not in 5201 // strong code root lists for the regions that are not in
5172 // the collection set. 5202 // the collection set.
5173 // Note all threads participate in this set of root tasks. 5203 // Note all threads participate in this set of root tasks.
5174 double mark_strong_code_roots_ms = 0.0; 5204 double mark_strong_code_roots_ms = 0.0;
5175 if (g1_policy()->during_initial_mark_pause() && !(so & SO_AllCodeCache)) { 5205 if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
5176 double mark_strong_roots_start = os::elapsedTime(); 5206 double mark_strong_roots_start = os::elapsedTime();
5177 mark_strong_code_roots(worker_i); 5207 mark_strong_code_roots(worker_i);
5178 mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0; 5208 mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
5179 } 5209 }
5180 g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms); 5210 g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
5181 5211
5182 // Now scan the complement of the collection set. 5212 // Now scan the complement of the collection set.
5183 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */); 5213 if (scan_rs != NULL) {
5184 g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i); 5214 g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
5185 5215 }
5186 _process_strong_tasks->all_tasks_completed(); 5216 _process_strong_tasks->all_tasks_completed();
5187 } 5217 }
5188 5218
5189 class G1StringSymbolTableUnlinkTask : public AbstractGangTask { 5219 void
5190 private: 5220 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
5191 BoolObjectClosure* _is_alive; 5221 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
5192 int _initial_string_table_size; 5222 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
5193 int _initial_symbol_table_size;
5194
5195 bool _process_strings;
5196 int _strings_processed;
5197 int _strings_removed;
5198
5199 bool _process_symbols;
5200 int _symbols_processed;
5201 int _symbols_removed;
5202
5203 bool _do_in_parallel;
5204 public:
5205 G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
5206 AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
5207 _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
5208 _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
5209 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
5210
5211 _initial_string_table_size = StringTable::the_table()->table_size();
5212 _initial_symbol_table_size = SymbolTable::the_table()->table_size();
5213 if (process_strings) {
5214 StringTable::clear_parallel_claimed_index();
5215 }
5216 if (process_symbols) {
5217 SymbolTable::clear_parallel_claimed_index();
5218 }
5219 }
5220
5221 ~G1StringSymbolTableUnlinkTask() {
5222 guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
5223 err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT,
5224 StringTable::parallel_claimed_index(), _initial_string_table_size));
5225 guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
5226 err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
5227 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
5228 }
5229
5230 void work(uint worker_id) {
5231 if (_do_in_parallel) {
5232 int strings_processed = 0;
5233 int strings_removed = 0;
5234 int symbols_processed = 0;
5235 int symbols_removed = 0;
5236 if (_process_strings) {
5237 StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
5238 Atomic::add(strings_processed, &_strings_processed);
5239 Atomic::add(strings_removed, &_strings_removed);
5240 }
5241 if (_process_symbols) {
5242 SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
5243 Atomic::add(symbols_processed, &_symbols_processed);
5244 Atomic::add(symbols_removed, &_symbols_removed);
5245 }
5246 } else {
5247 if (_process_strings) {
5248 StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
5249 }
5250 if (_process_symbols) {
5251 SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
5252 }
5253 }
5254 }
5255
5256 size_t strings_processed() const { return (size_t)_strings_processed; }
5257 size_t strings_removed() const { return (size_t)_strings_removed; }
5258
5259 size_t symbols_processed() const { return (size_t)_symbols_processed; }
5260 size_t symbols_removed() const { return (size_t)_symbols_removed; }
5261 };
5262
5263 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
5264 bool process_strings, bool process_symbols) {
5265 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5266 _g1h->workers()->active_workers() : 1);
5267
5268 G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5269 if (G1CollectedHeap::use_parallel_gc_threads()) {
5270 set_par_threads(n_workers);
5271 workers()->run_task(&g1_unlink_task);
5272 set_par_threads(0);
5273 } else {
5274 g1_unlink_task.work(0);
5275 }
5276 if (G1TraceStringSymbolTableScrubbing) {
5277 gclog_or_tty->print_cr("Cleaned string and symbol table, "
5278 "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
5279 "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
5280 g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
5281 g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
5282 }
5283 } 5223 }
5284 5224
5285 // Weak Reference Processing support 5225 // Weak Reference Processing support
5286 5226
5287 // An always "is_alive" closure that is used to preserve referents. 5227 // An always "is_alive" closure that is used to preserve referents.
5460 ResourceMark rm; 5400 ResourceMark rm;
5461 HandleMark hm; 5401 HandleMark hm;
5462 5402
5463 G1STWIsAliveClosure is_alive(_g1h); 5403 G1STWIsAliveClosure is_alive(_g1h);
5464 5404
5465 G1ParScanThreadState pss(_g1h, worker_id, NULL); 5405 G1ParScanThreadState pss(_g1h, worker_id);
5466 5406
5467 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL); 5407 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
5468 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); 5408 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5469 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL); 5409 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
5470 5410
5572 5512
5573 void work(uint worker_id) { 5513 void work(uint worker_id) {
5574 ResourceMark rm; 5514 ResourceMark rm;
5575 HandleMark hm; 5515 HandleMark hm;
5576 5516
5577 G1ParScanThreadState pss(_g1h, worker_id, NULL); 5517 G1ParScanThreadState pss(_g1h, worker_id);
5578 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL); 5518 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
5579 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); 5519 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5580 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL); 5520 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
5581 5521
5582 pss.set_evac_closure(&scan_evac_cl); 5522 pss.set_evac_closure(&scan_evac_cl);
5698 // of JNI refs is serial and performed serially by the current thread 5638 // of JNI refs is serial and performed serially by the current thread
5699 // rather than by a worker. The following PSS will be used for processing 5639 // rather than by a worker. The following PSS will be used for processing
5700 // JNI refs. 5640 // JNI refs.
5701 5641
5702 // Use only a single queue for this PSS. 5642 // Use only a single queue for this PSS.
5703 G1ParScanThreadState pss(this, 0, NULL); 5643 G1ParScanThreadState pss(this, 0);
5704 5644
5705 // We do not embed a reference processor in the copying/scanning 5645 // We do not embed a reference processor in the copying/scanning
5706 // closures while we're actually processing the discovered 5646 // closures while we're actually processing the discovered
5707 // reference objects. 5647 // reference objects.
5708 G1ParScanHeapEvacClosure scan_evac_cl(this, &pss, NULL); 5648 G1ParScanHeapEvacClosure scan_evac_cl(this, &pss, NULL);