comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 14518:d8041d695d19

Merged with jdk9/dev/hotspot changeset 3812c088b945
author twisti
date Tue, 11 Mar 2014 18:45:59 -0700
parents 02f27ecb4f3a 97300b6165f8
children 4ca6dc0799b6
comparison
equal deleted inserted replaced
14141:f97c5ec83832 14518:d8041d695d19
1 /* 1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
48 #include "gc_implementation/shared/gcTimer.hpp" 48 #include "gc_implementation/shared/gcTimer.hpp"
49 #include "gc_implementation/shared/gcTrace.hpp" 49 #include "gc_implementation/shared/gcTrace.hpp"
50 #include "gc_implementation/shared/gcTraceTime.hpp" 50 #include "gc_implementation/shared/gcTraceTime.hpp"
51 #include "gc_implementation/shared/isGCActiveMark.hpp" 51 #include "gc_implementation/shared/isGCActiveMark.hpp"
52 #include "memory/gcLocker.inline.hpp" 52 #include "memory/gcLocker.inline.hpp"
53 #include "memory/genOopClosures.inline.hpp"
54 #include "memory/generationSpec.hpp" 53 #include "memory/generationSpec.hpp"
54 #include "memory/iterator.hpp"
55 #include "memory/referenceProcessor.hpp" 55 #include "memory/referenceProcessor.hpp"
56 #include "oops/oop.inline.hpp" 56 #include "oops/oop.inline.hpp"
57 #include "oops/oop.pcgc.inline.hpp" 57 #include "oops/oop.pcgc.inline.hpp"
58 #include "runtime/vmThread.hpp" 58 #include "runtime/vmThread.hpp"
59 #include "utilities/ticks.hpp" 59 #include "utilities/ticks.hpp"
1573 1573
1574 // This code is mostly copied from TenuredGeneration. 1574 // This code is mostly copied from TenuredGeneration.
1575 void 1575 void
1576 G1CollectedHeap:: 1576 G1CollectedHeap::
1577 resize_if_necessary_after_full_collection(size_t word_size) { 1577 resize_if_necessary_after_full_collection(size_t word_size) {
1578 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
1579
1580 // Include the current allocation, if any, and bytes that will be 1578 // Include the current allocation, if any, and bytes that will be
1581 // pre-allocated to support collections, as "used". 1579 // pre-allocated to support collections, as "used".
1582 const size_t used_after_gc = used(); 1580 const size_t used_after_gc = used();
1583 const size_t capacity_after_gc = capacity(); 1581 const size_t capacity_after_gc = capacity();
1584 const size_t free_after_gc = capacity_after_gc - used_after_gc; 1582 const size_t free_after_gc = capacity_after_gc - used_after_gc;
2266 &_is_alive_closure_stw, 2264 &_is_alive_closure_stw,
2267 // is alive closure 2265 // is alive closure
2268 // (for efficiency/performance) 2266 // (for efficiency/performance)
2269 false); 2267 false);
2270 // Setting next fields of discovered 2268 // Setting next fields of discovered
2271 // lists requires a barrier. 2269 // lists does not require a barrier.
2272 } 2270 }
2273 2271
2274 size_t G1CollectedHeap::capacity() const { 2272 size_t G1CollectedHeap::capacity() const {
2275 return _g1_committed.byte_size(); 2273 return _g1_committed.byte_size();
2276 } 2274 }
2372 2370
2373 size_t G1CollectedHeap::recalculate_used() const { 2371 size_t G1CollectedHeap::recalculate_used() const {
2374 SumUsedClosure blk; 2372 SumUsedClosure blk;
2375 heap_region_iterate(&blk); 2373 heap_region_iterate(&blk);
2376 return blk.result(); 2374 return blk.result();
2377 }
2378
2379 size_t G1CollectedHeap::unsafe_max_alloc() {
2380 if (free_regions() > 0) return HeapRegion::GrainBytes;
2381 // otherwise, is there space in the current allocation region?
2382
2383 // We need to store the current allocation region in a local variable
2384 // here. The problem is that this method doesn't take any locks and
2385 // there may be other threads which overwrite the current allocation
2386 // region field. attempt_allocation(), for example, sets it to NULL
2387 // and this can happen *after* the NULL check here but before the call
2388 // to free(), resulting in a SIGSEGV. Note that this doesn't appear
2389 // to be a problem in the optimized build, since the two loads of the
2390 // current allocation region field are optimized away.
2391 HeapRegion* hr = _mutator_alloc_region.get();
2392 if (hr == NULL) {
2393 return 0;
2394 }
2395 return hr->free();
2396 } 2375 }
2397 2376
2398 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { 2377 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2399 switch (cause) { 2378 switch (cause) {
2400 case GCCause::_gc_locker: return GCLockerInvokesConcurrent; 2379 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
3023 bool G1CollectedHeap::supports_tlab_allocation() const { 3002 bool G1CollectedHeap::supports_tlab_allocation() const {
3024 return true; 3003 return true;
3025 } 3004 }
3026 3005
3027 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { 3006 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
3028 return HeapRegion::GrainBytes; 3007 return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes;
3008 }
3009
3010 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
3011 return young_list()->eden_used_bytes();
3012 }
3013
3014 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
3015 // must be smaller than the humongous object limit.
3016 size_t G1CollectedHeap::max_tlab_size() const {
3017 return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
3029 } 3018 }
3030 3019
3031 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { 3020 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
3032 // Return the remaining space in the cur alloc region, but not less than 3021 // Return the remaining space in the cur alloc region, but not less than
3033 // the min TLAB size. 3022 // the min TLAB size.
3035 // Also, this value can be at most the humongous object threshold, 3024 // Also, this value can be at most the humongous object threshold,
3036 // since we can't allow tlabs to grow big enough to accommodate 3025 // since we can't allow tlabs to grow big enough to accommodate
3037 // humongous objects. 3026 // humongous objects.
3038 3027
3039 HeapRegion* hr = _mutator_alloc_region.get(); 3028 HeapRegion* hr = _mutator_alloc_region.get();
3040 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; 3029 size_t max_tlab = max_tlab_size() * wordSize;
3041 if (hr == NULL) { 3030 if (hr == NULL) {
3042 return max_tlab_size; 3031 return max_tlab;
3043 } else { 3032 } else {
3044 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size); 3033 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
3045 } 3034 }
3046 } 3035 }
3047 3036
3048 size_t G1CollectedHeap::max_capacity() const { 3037 size_t G1CollectedHeap::max_capacity() const {
3049 return _g1_reserved.byte_size(); 3038 return _g1_reserved.byte_size();
3104 default: ShouldNotReachHere(); 3093 default: ShouldNotReachHere();
3105 } 3094 }
3106 return NULL; // keep some compilers happy 3095 return NULL; // keep some compilers happy
3107 } 3096 }
3108 3097
3109 // TODO: VerifyRootsClosure extends OopsInGenClosure so that we can 3098 class VerifyRootsClosure: public OopClosure {
3110 // pass it as the perm_blk to SharedHeap::process_strong_roots.
3111 // When process_strong_roots stop calling perm_blk->younger_refs_iterate
3112 // we can change this closure to extend the simpler OopClosure.
3113 class VerifyRootsClosure: public OopsInGenClosure {
3114 private: 3099 private:
3115 G1CollectedHeap* _g1h; 3100 G1CollectedHeap* _g1h;
3116 VerifyOption _vo; 3101 VerifyOption _vo;
3117 bool _failures; 3102 bool _failures;
3118 public: 3103 public:
3144 3129
3145 void do_oop(oop* p) { do_oop_nv(p); } 3130 void do_oop(oop* p) { do_oop_nv(p); }
3146 void do_oop(narrowOop* p) { do_oop_nv(p); } 3131 void do_oop(narrowOop* p) { do_oop_nv(p); }
3147 }; 3132 };
3148 3133
3149 class G1VerifyCodeRootOopClosure: public OopsInGenClosure { 3134 class G1VerifyCodeRootOopClosure: public OopClosure {
3150 G1CollectedHeap* _g1h; 3135 G1CollectedHeap* _g1h;
3151 OopClosure* _root_cl; 3136 OopClosure* _root_cl;
3152 nmethod* _nm; 3137 nmethod* _nm;
3153 VerifyOption _vo; 3138 VerifyOption _vo;
3154 bool _failures; 3139 bool _failures;
3417 assert(Thread::current()->is_VM_thread(), 3402 assert(Thread::current()->is_VM_thread(),
3418 "Expected to be executed serially by the VM thread at this point"); 3403 "Expected to be executed serially by the VM thread at this point");
3419 3404
3420 if (!silent) { gclog_or_tty->print("Roots "); } 3405 if (!silent) { gclog_or_tty->print("Roots "); }
3421 VerifyRootsClosure rootsCl(vo); 3406 VerifyRootsClosure rootsCl(vo);
3407 VerifyKlassClosure klassCl(this, &rootsCl);
3408
3409 // We apply the relevant closures to all the oops in the
3410 // system dictionary, class loader data graph and the string table.
3411 // Don't verify the code cache here, since it's verified below.
3412 const int so = SO_AllClasses | SO_Strings;
3413
3414 // Need cleared claim bits for the strong roots processing
3415 ClassLoaderDataGraph::clear_claimed_marks();
3416
3417 process_strong_roots(true, // activate StrongRootsScope
3418 ScanningOption(so), // roots scanning options
3419 &rootsCl,
3420 &klassCl
3421 );
3422
3423 // Verify the nmethods in the code cache.
3422 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo); 3424 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3423 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl); 3425 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3424 VerifyKlassClosure klassCl(this, &rootsCl); 3426 CodeCache::blobs_do(&blobsCl);
3425
3426 // We apply the relevant closures to all the oops in the
3427 // system dictionary, the string table and the code cache.
3428 const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
3429
3430 // Need cleared claim bits for the strong roots processing
3431 ClassLoaderDataGraph::clear_claimed_marks();
3432
3433 process_strong_roots(true, // activate StrongRootsScope
3434 false, // we set "is scavenging" to false,
3435 // so we don't reset the dirty cards.
3436 ScanningOption(so), // roots scanning options
3437 &rootsCl,
3438 &blobsCl,
3439 &klassCl
3440 );
3441 3427
3442 bool failures = rootsCl.failures() || codeRootsCl.failures(); 3428 bool failures = rootsCl.failures() || codeRootsCl.failures();
3443 3429
3444 if (vo != VerifyOption_G1UseMarkWord) { 3430 if (vo != VerifyOption_G1UseMarkWord) {
3445 // If we're verifying during a full GC then the region sets 3431 // If we're verifying during a full GC then the region sets
3682 3668
3683 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { 3669 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3684 // always_do_update_barrier = false; 3670 // always_do_update_barrier = false;
3685 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); 3671 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3686 // Fill TLAB's and such 3672 // Fill TLAB's and such
3673 accumulate_statistics_all_tlabs();
3687 ensure_parsability(true); 3674 ensure_parsability(true);
3688 3675
3689 if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) && 3676 if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3690 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { 3677 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3691 g1_rem_set()->print_periodic_summary_info("Before GC RS summary"); 3678 g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3705 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" 3692 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3706 // is set. 3693 // is set.
3707 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), 3694 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3708 "derived pointer present")); 3695 "derived pointer present"));
3709 // always_do_update_barrier = true; 3696 // always_do_update_barrier = true;
3697
3698 resize_all_tlabs();
3710 3699
3711 // We have just completed a GC. Update the soft reference 3700 // We have just completed a GC. Update the soft reference
3712 // policy with the new heap occupancy 3701 // policy with the new heap occupancy
3713 Universe::update_heap_info_at_gc(); 3702 Universe::update_heap_info_at_gc();
3714 } 3703 }
4553 } 4542 }
4554 4543
4555 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) : 4544 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4556 ParGCAllocBuffer(gclab_word_size), _retired(false) { } 4545 ParGCAllocBuffer(gclab_word_size), _retired(false) { }
4557 4546
4558 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num) 4547 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
4559 : _g1h(g1h), 4548 : _g1h(g1h),
4560 _refs(g1h->task_queue(queue_num)), 4549 _refs(g1h->task_queue(queue_num)),
4561 _dcq(&g1h->dirty_card_queue_set()), 4550 _dcq(&g1h->dirty_card_queue_set()),
4562 _ct_bs(g1h->g1_barrier_set()), 4551 _ct_bs(g1h->g1_barrier_set()),
4563 _g1_rem(g1h->g1_rem_set()), 4552 _g1_rem(g1h->g1_rem_set()),
4564 _hash_seed(17), _queue_num(queue_num), 4553 _hash_seed(17), _queue_num(queue_num),
4565 _term_attempts(0), 4554 _term_attempts(0),
4566 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), 4555 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4567 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), 4556 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4568 _age_table(false), 4557 _age_table(false), _scanner(g1h, this, rp),
4569 _strong_roots_time(0), _term_time(0), 4558 _strong_roots_time(0), _term_time(0),
4570 _alloc_buffer_waste(0), _undo_waste(0) { 4559 _alloc_buffer_waste(0), _undo_waste(0) {
4571 // we allocate G1YoungSurvRateNumRegions plus one entries, since 4560 // we allocate G1YoungSurvRateNumRegions plus one entries, since
4572 // we "sacrifice" entry 0 to keep track of surviving bytes for 4561 // we "sacrifice" entry 0 to keep track of surviving bytes for
4573 // non-young regions (where the age is -1) 4562 // non-young regions (where the age is -1)
4672 } while (!refs()->is_empty()); 4661 } while (!refs()->is_empty());
4673 } 4662 }
4674 4663
4675 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, 4664 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4676 G1ParScanThreadState* par_scan_state) : 4665 G1ParScanThreadState* par_scan_state) :
4677 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), 4666 _g1(g1), _par_scan_state(par_scan_state),
4678 _par_scan_state(par_scan_state), 4667 _worker_id(par_scan_state->queue_num()) { }
4679 _worker_id(par_scan_state->queue_num()), 4668
4680 _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()), 4669 void G1ParCopyHelper::mark_object(oop obj) {
4681 _mark_in_progress(_g1->mark_in_progress()) { }
4682
4683 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4684 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop obj) {
4685 #ifdef ASSERT 4670 #ifdef ASSERT
4686 HeapRegion* hr = _g1->heap_region_containing(obj); 4671 HeapRegion* hr = _g1->heap_region_containing(obj);
4687 assert(hr != NULL, "sanity"); 4672 assert(hr != NULL, "sanity");
4688 assert(!hr->in_collection_set(), "should not mark objects in the CSet"); 4673 assert(!hr->in_collection_set(), "should not mark objects in the CSet");
4689 #endif // ASSERT 4674 #endif // ASSERT
4690 4675
4691 // We know that the object is not moving so it's safe to read its size. 4676 // We know that the object is not moving so it's safe to read its size.
4692 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id); 4677 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4693 } 4678 }
4694 4679
4695 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> 4680 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
4696 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4697 ::mark_forwarded_object(oop from_obj, oop to_obj) {
4698 #ifdef ASSERT 4681 #ifdef ASSERT
4699 assert(from_obj->is_forwarded(), "from obj should be forwarded"); 4682 assert(from_obj->is_forwarded(), "from obj should be forwarded");
4700 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee"); 4683 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4701 assert(from_obj != to_obj, "should not be self-forwarded"); 4684 assert(from_obj != to_obj, "should not be self-forwarded");
4702 4685
4714 // well-formed. So we have to read its size from its from-space 4697 // well-formed. So we have to read its size from its from-space
4715 // image which we know should not be changing. 4698 // image which we know should not be changing.
4716 _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id); 4699 _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4717 } 4700 }
4718 4701
4719 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> 4702 oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
4720 oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4721 ::copy_to_survivor_space(oop old) {
4722 size_t word_sz = old->size(); 4703 size_t word_sz = old->size();
4723 HeapRegion* from_region = _g1->heap_region_containing_raw(old); 4704 HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
4724 // +1 to make the -1 indexes valid... 4705 // +1 to make the -1 indexes valid...
4725 int young_index = from_region->young_index_in_cset()+1; 4706 int young_index = from_region->young_index_in_cset()+1;
4726 assert( (from_region->is_young() && young_index > 0) || 4707 assert( (from_region->is_young() && young_index > 0) ||
4727 (!from_region->is_young() && young_index == 0), "invariant" ); 4708 (!from_region->is_young() && young_index == 0), "invariant" );
4728 G1CollectorPolicy* g1p = _g1->g1_policy(); 4709 G1CollectorPolicy* g1p = _g1h->g1_policy();
4729 markOop m = old->mark(); 4710 markOop m = old->mark();
4730 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() 4711 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4731 : m->age(); 4712 : m->age();
4732 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, 4713 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4733 word_sz); 4714 word_sz);
4734 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); 4715 HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
4735 #ifndef PRODUCT 4716 #ifndef PRODUCT
4736 // Should this evacuation fail? 4717 // Should this evacuation fail?
4737 if (_g1->evacuation_should_fail()) { 4718 if (_g1h->evacuation_should_fail()) {
4738 if (obj_ptr != NULL) { 4719 if (obj_ptr != NULL) {
4739 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); 4720 undo_allocation(alloc_purpose, obj_ptr, word_sz);
4740 obj_ptr = NULL; 4721 obj_ptr = NULL;
4741 } 4722 }
4742 } 4723 }
4743 #endif // !PRODUCT 4724 #endif // !PRODUCT
4744 4725
4745 if (obj_ptr == NULL) { 4726 if (obj_ptr == NULL) {
4746 // This will either forward-to-self, or detect that someone else has 4727 // This will either forward-to-self, or detect that someone else has
4747 // installed a forwarding pointer. 4728 // installed a forwarding pointer.
4748 return _g1->handle_evacuation_failure_par(_par_scan_state, old); 4729 return _g1h->handle_evacuation_failure_par(this, old);
4749 } 4730 }
4750 4731
4751 oop obj = oop(obj_ptr); 4732 oop obj = oop(obj_ptr);
4752 4733
4753 // We're going to allocate linearly, so might as well prefetch ahead. 4734 // We're going to allocate linearly, so might as well prefetch ahead.
4776 obj->incr_age(); 4757 obj->incr_age();
4777 } else { 4758 } else {
4778 m = m->incr_age(); 4759 m = m->incr_age();
4779 obj->set_mark(m); 4760 obj->set_mark(m);
4780 } 4761 }
4781 _par_scan_state->age_table()->add(obj, word_sz); 4762 age_table()->add(obj, word_sz);
4782 } else { 4763 } else {
4783 obj->set_mark(m); 4764 obj->set_mark(m);
4784 } 4765 }
4785 4766
4786 size_t* surv_young_words = _par_scan_state->surviving_young_words(); 4767 size_t* surv_young_words = surviving_young_words();
4787 surv_young_words[young_index] += word_sz; 4768 surv_young_words[young_index] += word_sz;
4788 4769
4789 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 4770 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4790 // We keep track of the next start index in the length field of 4771 // We keep track of the next start index in the length field of
4791 // the to-space object. The actual length can be found in the 4772 // the to-space object. The actual length can be found in the
4792 // length field of the from-space object. 4773 // length field of the from-space object.
4793 arrayOop(obj)->set_length(0); 4774 arrayOop(obj)->set_length(0);
4794 oop* old_p = set_partial_array_mask(old); 4775 oop* old_p = set_partial_array_mask(old);
4795 _par_scan_state->push_on_queue(old_p); 4776 push_on_queue(old_p);
4796 } else { 4777 } else {
4797 // No point in using the slower heap_region_containing() method, 4778 // No point in using the slower heap_region_containing() method,
4798 // given that we know obj is in the heap. 4779 // given that we know obj is in the heap.
4799 _scanner.set_region(_g1->heap_region_containing_raw(obj)); 4780 _scanner.set_region(_g1h->heap_region_containing_raw(obj));
4800 obj->oop_iterate_backwards(&_scanner); 4781 obj->oop_iterate_backwards(&_scanner);
4801 } 4782 }
4802 } else { 4783 } else {
4803 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); 4784 undo_allocation(alloc_purpose, obj_ptr, word_sz);
4804 obj = forward_ptr; 4785 obj = forward_ptr;
4805 } 4786 }
4806 return obj; 4787 return obj;
4807 } 4788 }
4808 4789
4811 if (_g1->heap_region_containing_raw(new_obj)->is_young()) { 4792 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4812 _scanned_klass->record_modified_oops(); 4793 _scanned_klass->record_modified_oops();
4813 } 4794 }
4814 } 4795 }
4815 4796
4816 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> 4797 template <G1Barrier barrier, bool do_mark_object>
4817 template <class T> 4798 template <class T>
4818 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> 4799 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4819 ::do_oop_work(T* p) { 4800 T heap_oop = oopDesc::load_heap_oop(p);
4820 oop obj = oopDesc::load_decode_heap_oop(p); 4801
4821 assert(barrier != G1BarrierRS || obj != NULL, 4802 if (oopDesc::is_null(heap_oop)) {
4822 "Precondition: G1BarrierRS implies obj is non-NULL"); 4803 return;
4804 }
4805
4806 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4823 4807
4824 assert(_worker_id == _par_scan_state->queue_num(), "sanity"); 4808 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4825 4809
4826 // here the null check is implicit in the cset_fast_test() test
4827 if (_g1->in_cset_fast_test(obj)) { 4810 if (_g1->in_cset_fast_test(obj)) {
4828 oop forwardee; 4811 oop forwardee;
4829 if (obj->is_forwarded()) { 4812 if (obj->is_forwarded()) {
4830 forwardee = obj->forwardee(); 4813 forwardee = obj->forwardee();
4831 } else { 4814 } else {
4832 forwardee = copy_to_survivor_space(obj); 4815 forwardee = _par_scan_state->copy_to_survivor_space(obj);
4833 } 4816 }
4834 assert(forwardee != NULL, "forwardee should not be NULL"); 4817 assert(forwardee != NULL, "forwardee should not be NULL");
4835 oopDesc::encode_store_heap_oop(p, forwardee); 4818 oopDesc::encode_store_heap_oop(p, forwardee);
4836 if (do_mark_object && forwardee != obj) { 4819 if (do_mark_object && forwardee != obj) {
4837 // If the object is self-forwarded we don't need to explicitly 4820 // If the object is self-forwarded we don't need to explicitly
4838 // mark it, the evacuation failure protocol will do so. 4821 // mark it, the evacuation failure protocol will do so.
4839 mark_forwarded_object(obj, forwardee); 4822 mark_forwarded_object(obj, forwardee);
4840 } 4823 }
4841 4824
4842 // When scanning the RS, we only care about objs in CS. 4825 if (barrier == G1BarrierKlass) {
4843 if (barrier == G1BarrierRS) {
4844 _par_scan_state->update_rs(_from, p, _worker_id);
4845 } else if (barrier == G1BarrierKlass) {
4846 do_klass_barrier(p, forwardee); 4826 do_klass_barrier(p, forwardee);
4847 } 4827 }
4848 } else { 4828 } else {
4849 // The object is not in collection set. If we're a root scanning 4829 // The object is not in collection set. If we're a root scanning
4850 // closure during an initial mark pause (i.e. do_mark_object will 4830 // closure during an initial mark pause (i.e. do_mark_object will
4851 // be true) then attempt to mark the object. 4831 // be true) then attempt to mark the object.
4852 if (do_mark_object && _g1->is_in_g1_reserved(obj)) { 4832 if (do_mark_object) {
4853 mark_object(obj); 4833 mark_object(obj);
4854 } 4834 }
4855 } 4835 }
4856 4836
4857 if (barrier == G1BarrierEvac && obj != NULL) { 4837 if (barrier == G1BarrierEvac) {
4858 _par_scan_state->update_rs(_from, p, _worker_id); 4838 _par_scan_state->update_rs(_from, p, _worker_id);
4859 } 4839 }
4860 4840 }
4861 if (do_gen_barrier && obj != NULL) { 4841
4862 par_do_barrier(p); 4842 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
4863 } 4843 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
4864 }
4865
4866 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
4867 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
4868 4844
4869 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { 4845 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
4870 assert(has_partial_array_mask(p), "invariant"); 4846 assert(has_partial_array_mask(p), "invariant");
4871 oop from_obj = clear_partial_array_mask(p); 4847 oop from_obj = clear_partial_array_mask(p);
4872 4848
5053 ResourceMark rm; 5029 ResourceMark rm;
5054 HandleMark hm; 5030 HandleMark hm;
5055 5031
5056 ReferenceProcessor* rp = _g1h->ref_processor_stw(); 5032 ReferenceProcessor* rp = _g1h->ref_processor_stw();
5057 5033
5058 G1ParScanThreadState pss(_g1h, worker_id); 5034 G1ParScanThreadState pss(_g1h, worker_id, rp);
5059 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp); 5035 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp);
5060 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp); 5036 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
5061 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp); 5037 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp);
5062 5038
5063 pss.set_evac_closure(&scan_evac_cl); 5039 pss.set_evac_closure(&scan_evac_cl);
5146 double ext_roots_start = os::elapsedTime(); 5122 double ext_roots_start = os::elapsedTime();
5147 double closure_app_time_sec = 0.0; 5123 double closure_app_time_sec = 0.0;
5148 5124
5149 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); 5125 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
5150 5126
5151 assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow");
5152 // Walk the code cache/strong code roots w/o buffering, because StarTask
5153 // cannot handle unaligned oop locations.
5154 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
5155
5156 process_strong_roots(false, // no scoping; this is parallel code 5127 process_strong_roots(false, // no scoping; this is parallel code
5157 is_scavenging, so, 5128 so,
5158 &buf_scan_non_heap_roots, 5129 &buf_scan_non_heap_roots,
5159 &eager_scan_code_roots,
5160 scan_klasses 5130 scan_klasses
5161 ); 5131 );
5162 5132
5163 // Now the CM ref_processor roots. 5133 // Now the CM ref_processor roots.
5164 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { 5134 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
5200 // the entire code cache, we need to mark the oops in the 5170 // the entire code cache, we need to mark the oops in the
5201 // strong code root lists for the regions that are not in 5171 // strong code root lists for the regions that are not in
5202 // the collection set. 5172 // the collection set.
5203 // Note all threads participate in this set of root tasks. 5173 // Note all threads participate in this set of root tasks.
5204 double mark_strong_code_roots_ms = 0.0; 5174 double mark_strong_code_roots_ms = 0.0;
5205 if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) { 5175 if (g1_policy()->during_initial_mark_pause() && !(so & SO_AllCodeCache)) {
5206 double mark_strong_roots_start = os::elapsedTime(); 5176 double mark_strong_roots_start = os::elapsedTime();
5207 mark_strong_code_roots(worker_i); 5177 mark_strong_code_roots(worker_i);
5208 mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0; 5178 mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
5209 } 5179 }
5210 g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms); 5180 g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
5211 5181
5212 // Now scan the complement of the collection set. 5182 // Now scan the complement of the collection set.
5213 if (scan_rs != NULL) { 5183 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
5214 g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i); 5184 g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
5215 } 5185
5216 _process_strong_tasks->all_tasks_completed(); 5186 _process_strong_tasks->all_tasks_completed();
5217 } 5187 }
5218 5188
5219 void 5189 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
5220 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) { 5190 private:
5221 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); 5191 BoolObjectClosure* _is_alive;
5222 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs); 5192 int _initial_string_table_size;
5193 int _initial_symbol_table_size;
5194
5195 bool _process_strings;
5196 int _strings_processed;
5197 int _strings_removed;
5198
5199 bool _process_symbols;
5200 int _symbols_processed;
5201 int _symbols_removed;
5202
5203 bool _do_in_parallel;
5204 public:
5205 G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
5206 AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
5207 _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
5208 _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
5209 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
5210
5211 _initial_string_table_size = StringTable::the_table()->table_size();
5212 _initial_symbol_table_size = SymbolTable::the_table()->table_size();
5213 if (process_strings) {
5214 StringTable::clear_parallel_claimed_index();
5215 }
5216 if (process_symbols) {
5217 SymbolTable::clear_parallel_claimed_index();
5218 }
5219 }
5220
5221 ~G1StringSymbolTableUnlinkTask() {
5222 guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
5223 err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT,
5224 StringTable::parallel_claimed_index(), _initial_string_table_size));
5225 guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
5226 err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
5227 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
5228 }
5229
5230 void work(uint worker_id) {
5231 if (_do_in_parallel) {
5232 int strings_processed = 0;
5233 int strings_removed = 0;
5234 int symbols_processed = 0;
5235 int symbols_removed = 0;
5236 if (_process_strings) {
5237 StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
5238 Atomic::add(strings_processed, &_strings_processed);
5239 Atomic::add(strings_removed, &_strings_removed);
5240 }
5241 if (_process_symbols) {
5242 SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
5243 Atomic::add(symbols_processed, &_symbols_processed);
5244 Atomic::add(symbols_removed, &_symbols_removed);
5245 }
5246 } else {
5247 if (_process_strings) {
5248 StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
5249 }
5250 if (_process_symbols) {
5251 SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
5252 }
5253 }
5254 }
5255
5256 size_t strings_processed() const { return (size_t)_strings_processed; }
5257 size_t strings_removed() const { return (size_t)_strings_removed; }
5258
5259 size_t symbols_processed() const { return (size_t)_symbols_processed; }
5260 size_t symbols_removed() const { return (size_t)_symbols_removed; }
5261 };
5262
5263 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
5264 bool process_strings, bool process_symbols) {
5265 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5266 _g1h->workers()->active_workers() : 1);
5267
5268 G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5269 if (G1CollectedHeap::use_parallel_gc_threads()) {
5270 set_par_threads(n_workers);
5271 workers()->run_task(&g1_unlink_task);
5272 set_par_threads(0);
5273 } else {
5274 g1_unlink_task.work(0);
5275 }
5276 if (G1TraceStringSymbolTableScrubbing) {
5277 gclog_or_tty->print_cr("Cleaned string and symbol table, "
5278 "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
5279 "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
5280 g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
5281 g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
5282 }
5223 } 5283 }
5224 5284
5225 // Weak Reference Processing support 5285 // Weak Reference Processing support
5226 5286
5227 // An always "is_alive" closure that is used to preserve referents. 5287 // An always "is_alive" closure that is used to preserve referents.
5400 ResourceMark rm; 5460 ResourceMark rm;
5401 HandleMark hm; 5461 HandleMark hm;
5402 5462
5403 G1STWIsAliveClosure is_alive(_g1h); 5463 G1STWIsAliveClosure is_alive(_g1h);
5404 5464
5405 G1ParScanThreadState pss(_g1h, worker_id); 5465 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5406 5466
5407 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL); 5467 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
5408 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); 5468 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5409 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL); 5469 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
5410 5470
5512 5572
5513 void work(uint worker_id) { 5573 void work(uint worker_id) {
5514 ResourceMark rm; 5574 ResourceMark rm;
5515 HandleMark hm; 5575 HandleMark hm;
5516 5576
5517 G1ParScanThreadState pss(_g1h, worker_id); 5577 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5518 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL); 5578 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
5519 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); 5579 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5520 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL); 5580 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
5521 5581
5522 pss.set_evac_closure(&scan_evac_cl); 5582 pss.set_evac_closure(&scan_evac_cl);
5638 // of JNI refs is serial and performed serially by the current thread 5698 // of JNI refs is serial and performed serially by the current thread
5639 // rather than by a worker. The following PSS will be used for processing 5699 // rather than by a worker. The following PSS will be used for processing
5640 // JNI refs. 5700 // JNI refs.
5641 5701
5642 // Use only a single queue for this PSS. 5702 // Use only a single queue for this PSS.
5643 G1ParScanThreadState pss(this, 0); 5703 G1ParScanThreadState pss(this, 0, NULL);
5644 5704
5645 // We do not embed a reference processor in the copying/scanning 5705 // We do not embed a reference processor in the copying/scanning
5646 // closures while we're actually processing the discovered 5706 // closures while we're actually processing the discovered
5647 // reference objects. 5707 // reference objects.
5648 G1ParScanHeapEvacClosure scan_evac_cl(this, &pss, NULL); 5708 G1ParScanHeapEvacClosure scan_evac_cl(this, &pss, NULL);