Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 4787:2ace1c4ee8da
6888336: G1: avoid explicitly marking and pushing objects in survivor spaces
Summary: This change simplifies the interaction between GC and concurrent marking. By disabling survivor spaces during the initial-mark pause we don't need to propagate marks of objects we copy during each GC (since we never need to copy an explicitly marked object).
Reviewed-by: johnc, brutisso
author | tonyp |
---|---|
date | Tue, 10 Jan 2012 18:58:13 -0500 |
parents | 97c00e21fecb |
children | 9509c20bba28 aa3d708d67c4 |
comparison
equal
deleted
inserted
replaced
4786:1d6185f732aa | 4787:2ace1c4ee8da |
---|---|
34 #include "gc_implementation/g1/g1ErgoVerbose.hpp" | 34 #include "gc_implementation/g1/g1ErgoVerbose.hpp" |
35 #include "gc_implementation/g1/g1EvacFailure.hpp" | 35 #include "gc_implementation/g1/g1EvacFailure.hpp" |
36 #include "gc_implementation/g1/g1MarkSweep.hpp" | 36 #include "gc_implementation/g1/g1MarkSweep.hpp" |
37 #include "gc_implementation/g1/g1OopClosures.inline.hpp" | 37 #include "gc_implementation/g1/g1OopClosures.inline.hpp" |
38 #include "gc_implementation/g1/g1RemSet.inline.hpp" | 38 #include "gc_implementation/g1/g1RemSet.inline.hpp" |
39 #include "gc_implementation/g1/heapRegion.inline.hpp" | |
39 #include "gc_implementation/g1/heapRegionRemSet.hpp" | 40 #include "gc_implementation/g1/heapRegionRemSet.hpp" |
40 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | 41 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" |
41 #include "gc_implementation/g1/vm_operations_g1.hpp" | 42 #include "gc_implementation/g1/vm_operations_g1.hpp" |
42 #include "gc_implementation/shared/isGCActiveMark.hpp" | 43 #include "gc_implementation/shared/isGCActiveMark.hpp" |
43 #include "memory/gcLocker.inline.hpp" | 44 #include "memory/gcLocker.inline.hpp" |
3016 if (failures) { | 3017 if (failures) { |
3017 _failures = true; | 3018 _failures = true; |
3018 } else { | 3019 } else { |
3019 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo); | 3020 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo); |
3020 r->object_iterate(¬_dead_yet_cl); | 3021 r->object_iterate(¬_dead_yet_cl); |
3021 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { | 3022 if (_vo != VerifyOption_G1UseNextMarking) { |
3022 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " | 3023 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { |
3023 "max_live_bytes "SIZE_FORMAT" " | 3024 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " |
3024 "< calculated "SIZE_FORMAT, | 3025 "max_live_bytes "SIZE_FORMAT" " |
3025 r->bottom(), r->end(), | 3026 "< calculated "SIZE_FORMAT, |
3026 r->max_live_bytes(), | 3027 r->bottom(), r->end(), |
3028 r->max_live_bytes(), | |
3027 not_dead_yet_cl.live_bytes()); | 3029 not_dead_yet_cl.live_bytes()); |
3028 _failures = true; | 3030 _failures = true; |
3031 } | |
3032 } else { | |
3033 // When vo == UseNextMarking we cannot currently do a sanity | |
3034 // check on the live bytes as the calculation has not been | |
3035 // finalized yet. | |
3029 } | 3036 } |
3030 } | 3037 } |
3031 } | 3038 } |
3032 return false; // stop the region iteration if we hit a failure | 3039 return false; // stop the region iteration if we hit a failure |
3033 } | 3040 } |
3657 if (g1_policy()->during_initial_mark_pause()) { | 3664 if (g1_policy()->during_initial_mark_pause()) { |
3658 concurrent_mark()->checkpointRootsInitialPre(); | 3665 concurrent_mark()->checkpointRootsInitialPre(); |
3659 } | 3666 } |
3660 perm_gen()->save_marks(); | 3667 perm_gen()->save_marks(); |
3661 | 3668 |
3662 // We must do this before any possible evacuation that should propagate | |
3663 // marks. | |
3664 if (mark_in_progress()) { | |
3665 double start_time_sec = os::elapsedTime(); | |
3666 | |
3667 _cm->drainAllSATBBuffers(); | |
3668 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; | |
3669 g1_policy()->record_satb_drain_time(finish_mark_ms); | |
3670 } | |
3671 // Record the number of elements currently on the mark stack, so we | |
3672 // only iterate over these. (Since evacuation may add to the mark | |
3673 // stack, doing more exposes race conditions.) If no mark is in | |
3674 // progress, this will be zero. | |
3675 _cm->set_oops_do_bound(); | |
3676 | |
3677 if (mark_in_progress()) { | |
3678 concurrent_mark()->newCSet(); | |
3679 } | |
3680 | |
3681 #if YOUNG_LIST_VERBOSE | 3669 #if YOUNG_LIST_VERBOSE |
3682 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); | 3670 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
3683 _young_list->print(); | 3671 _young_list->print(); |
3684 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); | 3672 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
3685 #endif // YOUNG_LIST_VERBOSE | 3673 #endif // YOUNG_LIST_VERBOSE |
3686 | 3674 |
3687 g1_policy()->choose_collection_set(target_pause_time_ms); | 3675 g1_policy()->choose_collection_set(target_pause_time_ms); |
3676 | |
3677 _cm->note_start_of_gc(); | |
3678 // We should not verify the per-thread SATB buffers given that | |
3679 // we have not filtered them yet (we'll do so during the | |
3680 // GC). We also call this after choose_collection_set() to | |
3681 // ensure that the CSet has been finalized. | |
3682 _cm->verify_no_cset_oops(true /* verify_stacks */, | |
3683 true /* verify_enqueued_buffers */, | |
3684 false /* verify_thread_buffers */, | |
3685 true /* verify_fingers */); | |
3688 | 3686 |
3689 if (_hr_printer.is_active()) { | 3687 if (_hr_printer.is_active()) { |
3690 HeapRegion* hr = g1_policy()->collection_set(); | 3688 HeapRegion* hr = g1_policy()->collection_set(); |
3691 while (hr != NULL) { | 3689 while (hr != NULL) { |
3692 G1HRPrinter::RegionType type; | 3690 G1HRPrinter::RegionType type; |
3700 _hr_printer.cset(hr); | 3698 _hr_printer.cset(hr); |
3701 hr = hr->next_in_collection_set(); | 3699 hr = hr->next_in_collection_set(); |
3702 } | 3700 } |
3703 } | 3701 } |
3704 | 3702 |
3705 // We have chosen the complete collection set. If marking is | |
3706 // active then, we clear the region fields of any of the | |
3707 // concurrent marking tasks whose region fields point into | |
3708 // the collection set as these values will become stale. This | |
3709 // will cause the owning marking threads to claim a new region | |
3710 // when marking restarts. | |
3711 if (mark_in_progress()) { | |
3712 concurrent_mark()->reset_active_task_region_fields_in_cset(); | |
3713 } | |
3714 | |
3715 #ifdef ASSERT | 3703 #ifdef ASSERT |
3716 VerifyCSetClosure cl; | 3704 VerifyCSetClosure cl; |
3717 collection_set_iterate(&cl); | 3705 collection_set_iterate(&cl); |
3718 #endif // ASSERT | 3706 #endif // ASSERT |
3719 | 3707 |
3722 // Initialize the GC alloc regions. | 3710 // Initialize the GC alloc regions. |
3723 init_gc_alloc_regions(); | 3711 init_gc_alloc_regions(); |
3724 | 3712 |
3725 // Actually do the work... | 3713 // Actually do the work... |
3726 evacuate_collection_set(); | 3714 evacuate_collection_set(); |
3715 | |
3716 // We do this to mainly verify the per-thread SATB buffers | |
3717 // (which have been filtered by now) since we didn't verify | |
3718 // them earlier. No point in re-checking the stacks / enqueued | |
3719 // buffers given that the CSet has not changed since last time | |
3720 // we checked. | |
3721 _cm->verify_no_cset_oops(false /* verify_stacks */, | |
3722 false /* verify_enqueued_buffers */, | |
3723 true /* verify_thread_buffers */, | |
3724 true /* verify_fingers */); | |
3727 | 3725 |
3728 free_collection_set(g1_policy()->collection_set()); | 3726 free_collection_set(g1_policy()->collection_set()); |
3729 g1_policy()->clear_collection_set(); | 3727 g1_policy()->clear_collection_set(); |
3730 | 3728 |
3731 cleanup_surviving_young_words(); | 3729 cleanup_surviving_young_words(); |
3801 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch"); | 3799 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch"); |
3802 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); | 3800 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); |
3803 } | 3801 } |
3804 } | 3802 } |
3805 } | 3803 } |
3804 | |
3805 // We redo the verificaiton but now wrt to the new CSet which | |
3806 // has just got initialized after the previous CSet was freed. | |
3807 _cm->verify_no_cset_oops(true /* verify_stacks */, | |
3808 true /* verify_enqueued_buffers */, | |
3809 true /* verify_thread_buffers */, | |
3810 true /* verify_fingers */); | |
3811 _cm->note_end_of_gc(); | |
3806 | 3812 |
3807 double end_time_sec = os::elapsedTime(); | 3813 double end_time_sec = os::elapsedTime(); |
3808 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; | 3814 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
3809 g1_policy()->record_pause_time_ms(pause_time_ms); | 3815 g1_policy()->record_pause_time_ms(pause_time_ms); |
3810 int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? | 3816 int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? |
3952 // The retained region was added to the old region set when it was | 3958 // The retained region was added to the old region set when it was |
3953 // retired. We have to remove it now, since we don't allow regions | 3959 // retired. We have to remove it now, since we don't allow regions |
3954 // we allocate to in the region sets. We'll re-add it later, when | 3960 // we allocate to in the region sets. We'll re-add it later, when |
3955 // it's retired again. | 3961 // it's retired again. |
3956 _old_set.remove(retained_region); | 3962 _old_set.remove(retained_region); |
3963 bool during_im = g1_policy()->during_initial_mark_pause(); | |
3964 retained_region->note_start_of_copying(during_im); | |
3957 _old_gc_alloc_region.set(retained_region); | 3965 _old_gc_alloc_region.set(retained_region); |
3958 _hr_printer.reuse(retained_region); | 3966 _hr_printer.reuse(retained_region); |
3959 } | 3967 } |
3960 } | 3968 } |
3961 | 3969 |
4045 } | 4053 } |
4046 } | 4054 } |
4047 | 4055 |
4048 oop | 4056 oop |
4049 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | 4057 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, |
4050 oop old, | 4058 oop old) { |
4051 bool should_mark_root) { | |
4052 assert(obj_in_cs(old), | 4059 assert(obj_in_cs(old), |
4053 err_msg("obj: "PTR_FORMAT" should still be in the CSet", | 4060 err_msg("obj: "PTR_FORMAT" should still be in the CSet", |
4054 (HeapWord*) old)); | 4061 (HeapWord*) old)); |
4055 markOop m = old->mark(); | 4062 markOop m = old->mark(); |
4056 oop forward_ptr = old->forward_to_atomic(old); | 4063 oop forward_ptr = old->forward_to_atomic(old); |
4057 if (forward_ptr == NULL) { | 4064 if (forward_ptr == NULL) { |
4058 // Forward-to-self succeeded. | 4065 // Forward-to-self succeeded. |
4059 | |
4060 // should_mark_root will be true when this routine is called | |
4061 // from a root scanning closure during an initial mark pause. | |
4062 // In this case the thread that succeeds in self-forwarding the | |
4063 // object is also responsible for marking the object. | |
4064 if (should_mark_root) { | |
4065 assert(!oopDesc::is_null(old), "shouldn't be"); | |
4066 _cm->grayRoot(old); | |
4067 } | |
4068 | 4066 |
4069 if (_evac_failure_closure != cl) { | 4067 if (_evac_failure_closure != cl) { |
4070 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | 4068 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); |
4071 assert(!_drain_in_progress, | 4069 assert(!_drain_in_progress, |
4072 "Should only be true while someone holds the lock."); | 4070 "Should only be true while someone holds the lock."); |
4159 ShouldNotReachHere(); | 4157 ShouldNotReachHere(); |
4160 // Trying to keep some compilers happy. | 4158 // Trying to keep some compilers happy. |
4161 return NULL; | 4159 return NULL; |
4162 } | 4160 } |
4163 | 4161 |
4164 #ifndef PRODUCT | |
4165 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
4166 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
4167 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
4168 return true; | |
4169 } | |
4170 #endif // PRODUCT | |
4171 | |
4172 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) : | 4162 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) : |
4173 ParGCAllocBuffer(gclab_word_size), | 4163 ParGCAllocBuffer(gclab_word_size), _retired(false) { } |
4174 _should_mark_objects(false), | |
4175 _bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size), | |
4176 _retired(false) | |
4177 { | |
4178 //_should_mark_objects is set to true when G1ParCopyHelper needs to | |
4179 // mark the forwarded location of an evacuated object. | |
4180 // We set _should_mark_objects to true if marking is active, i.e. when we | |
4181 // need to propagate a mark, or during an initial mark pause, i.e. when we | |
4182 // need to mark objects immediately reachable by the roots. | |
4183 if (G1CollectedHeap::heap()->mark_in_progress() || | |
4184 G1CollectedHeap::heap()->g1_policy()->during_initial_mark_pause()) { | |
4185 _should_mark_objects = true; | |
4186 } | |
4187 } | |
4188 | 4164 |
4189 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) | 4165 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
4190 : _g1h(g1h), | 4166 : _g1h(g1h), |
4191 _refs(g1h->task_queue(queue_num)), | 4167 _refs(g1h->task_queue(queue_num)), |
4192 _dcq(&g1h->dirty_card_queue_set()), | 4168 _dcq(&g1h->dirty_card_queue_set()), |
4196 _term_attempts(0), | 4172 _term_attempts(0), |
4197 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), | 4173 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), |
4198 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), | 4174 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), |
4199 _age_table(false), | 4175 _age_table(false), |
4200 _strong_roots_time(0), _term_time(0), | 4176 _strong_roots_time(0), _term_time(0), |
4201 _alloc_buffer_waste(0), _undo_waste(0) | 4177 _alloc_buffer_waste(0), _undo_waste(0) { |
4202 { | |
4203 // we allocate G1YoungSurvRateNumRegions plus one entries, since | 4178 // we allocate G1YoungSurvRateNumRegions plus one entries, since |
4204 // we "sacrifice" entry 0 to keep track of surviving bytes for | 4179 // we "sacrifice" entry 0 to keep track of surviving bytes for |
4205 // non-young regions (where the age is -1) | 4180 // non-young regions (where the age is -1) |
4206 // We also add a few elements at the beginning and at the end in | 4181 // We also add a few elements at the beginning and at the end in |
4207 // an attempt to eliminate cache contention | 4182 // an attempt to eliminate cache contention |
4302 deal_with_reference(ref); | 4277 deal_with_reference(ref); |
4303 } | 4278 } |
4304 } while (!refs()->is_empty()); | 4279 } while (!refs()->is_empty()); |
4305 } | 4280 } |
4306 | 4281 |
4307 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : | 4282 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, |
4283 G1ParScanThreadState* par_scan_state) : | |
4308 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | 4284 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), |
4309 _par_scan_state(par_scan_state), | 4285 _par_scan_state(par_scan_state), |
4310 _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()), | 4286 _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()), |
4311 _mark_in_progress(_g1->mark_in_progress()) { } | 4287 _mark_in_progress(_g1->mark_in_progress()) { } |
4312 | 4288 |
4313 template <class T> void G1ParCopyHelper::mark_object(T* p) { | 4289 void G1ParCopyHelper::mark_object(oop obj) { |
4314 // This is called from do_oop_work for objects that are not | 4290 #ifdef ASSERT |
4315 // in the collection set. Objects in the collection set | 4291 HeapRegion* hr = _g1->heap_region_containing(obj); |
4316 // are marked after they have been evacuated. | 4292 assert(hr != NULL, "sanity"); |
4317 | 4293 assert(!hr->in_collection_set(), "should not mark objects in the CSet"); |
4318 T heap_oop = oopDesc::load_heap_oop(p); | 4294 #endif // ASSERT |
4319 if (!oopDesc::is_null(heap_oop)) { | 4295 |
4320 oop obj = oopDesc::decode_heap_oop(heap_oop); | 4296 // We know that the object is not moving so it's safe to read its size. |
4321 HeapWord* addr = (HeapWord*)obj; | 4297 _cm->grayRoot(obj, (size_t) obj->size()); |
4322 if (_g1->is_in_g1_reserved(addr)) { | 4298 } |
4323 _cm->grayRoot(oop(addr)); | 4299 |
4324 } | 4300 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) { |
4325 } | 4301 #ifdef ASSERT |
4326 } | 4302 assert(from_obj->is_forwarded(), "from obj should be forwarded"); |
4327 | 4303 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee"); |
4328 oop G1ParCopyHelper::copy_to_survivor_space(oop old, bool should_mark_root, | 4304 assert(from_obj != to_obj, "should not be self-forwarded"); |
4329 bool should_mark_copy) { | 4305 |
4306 HeapRegion* from_hr = _g1->heap_region_containing(from_obj); | |
4307 assert(from_hr != NULL, "sanity"); | |
4308 assert(from_hr->in_collection_set(), "from obj should be in the CSet"); | |
4309 | |
4310 HeapRegion* to_hr = _g1->heap_region_containing(to_obj); | |
4311 assert(to_hr != NULL, "sanity"); | |
4312 assert(!to_hr->in_collection_set(), "should not mark objects in the CSet"); | |
4313 #endif // ASSERT | |
4314 | |
4315 // The object might be in the process of being copied by another | |
4316 // worker so we cannot trust that its to-space image is | |
4317 // well-formed. So we have to read its size from its from-space | |
4318 // image which we know should not be changing. | |
4319 _cm->grayRoot(to_obj, (size_t) from_obj->size()); | |
4320 } | |
4321 | |
4322 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
4330 size_t word_sz = old->size(); | 4323 size_t word_sz = old->size(); |
4331 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | 4324 HeapRegion* from_region = _g1->heap_region_containing_raw(old); |
4332 // +1 to make the -1 indexes valid... | 4325 // +1 to make the -1 indexes valid... |
4333 int young_index = from_region->young_index_in_cset()+1; | 4326 int young_index = from_region->young_index_in_cset()+1; |
4334 assert( (from_region->is_young() && young_index > 0) || | 4327 assert( (from_region->is_young() && young_index > 0) || |
4335 (!from_region->is_young() && young_index == 0), "invariant" ); | 4328 (!from_region->is_young() && young_index == 0), "invariant" ); |
4336 G1CollectorPolicy* g1p = _g1->g1_policy(); | 4329 G1CollectorPolicy* g1p = _g1->g1_policy(); |
4337 markOop m = old->mark(); | 4330 markOop m = old->mark(); |
4338 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() | 4331 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
4339 : m->age(); | 4332 : m->age(); |
4340 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, | 4333 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, |
4344 | 4337 |
4345 if (obj_ptr == NULL) { | 4338 if (obj_ptr == NULL) { |
4346 // This will either forward-to-self, or detect that someone else has | 4339 // This will either forward-to-self, or detect that someone else has |
4347 // installed a forwarding pointer. | 4340 // installed a forwarding pointer. |
4348 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | 4341 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); |
4349 return _g1->handle_evacuation_failure_par(cl, old, should_mark_root); | 4342 return _g1->handle_evacuation_failure_par(cl, old); |
4350 } | 4343 } |
4351 | 4344 |
4352 // We're going to allocate linearly, so might as well prefetch ahead. | 4345 // We're going to allocate linearly, so might as well prefetch ahead. |
4353 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); | 4346 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); |
4354 | 4347 |
4380 _par_scan_state->age_table()->add(obj, word_sz); | 4373 _par_scan_state->age_table()->add(obj, word_sz); |
4381 } else { | 4374 } else { |
4382 obj->set_mark(m); | 4375 obj->set_mark(m); |
4383 } | 4376 } |
4384 | 4377 |
4385 // Mark the evacuated object or propagate "next" mark bit | |
4386 if (should_mark_copy) { | |
4387 if (!use_local_bitmaps || | |
4388 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
4389 // if we couldn't mark it on the local bitmap (this happens when | |
4390 // the object was not allocated in the GCLab), we have to bite | |
4391 // the bullet and do the standard parallel mark | |
4392 _cm->markAndGrayObjectIfNecessary(obj); | |
4393 } | |
4394 | |
4395 if (_g1->isMarkedNext(old)) { | |
4396 // Unmark the object's old location so that marking | |
4397 // doesn't think the old object is alive. | |
4398 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
4399 } | |
4400 } | |
4401 | |
4402 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | 4378 size_t* surv_young_words = _par_scan_state->surviving_young_words(); |
4403 surv_young_words[young_index] += word_sz; | 4379 surv_young_words[young_index] += word_sz; |
4404 | 4380 |
4405 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | 4381 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { |
4406 // We keep track of the next start index in the length field of | 4382 // We keep track of the next start index in the length field of |
4426 template <class T> | 4402 template <class T> |
4427 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> | 4403 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> |
4428 ::do_oop_work(T* p) { | 4404 ::do_oop_work(T* p) { |
4429 oop obj = oopDesc::load_decode_heap_oop(p); | 4405 oop obj = oopDesc::load_decode_heap_oop(p); |
4430 assert(barrier != G1BarrierRS || obj != NULL, | 4406 assert(barrier != G1BarrierRS || obj != NULL, |
4431 "Precondition: G1BarrierRS implies obj is nonNull"); | 4407 "Precondition: G1BarrierRS implies obj is non-NULL"); |
4432 | |
4433 // Marking: | |
4434 // If the object is in the collection set, then the thread | |
4435 // that copies the object should mark, or propagate the | |
4436 // mark to, the evacuated object. | |
4437 // If the object is not in the collection set then we | |
4438 // should call the mark_object() method depending on the | |
4439 // value of the template parameter do_mark_object (which will | |
4440 // be true for root scanning closures during an initial mark | |
4441 // pause). | |
4442 // The mark_object() method first checks whether the object | |
4443 // is marked and, if not, attempts to mark the object. | |
4444 | 4408 |
4445 // here the null check is implicit in the cset_fast_test() test | 4409 // here the null check is implicit in the cset_fast_test() test |
4446 if (_g1->in_cset_fast_test(obj)) { | 4410 if (_g1->in_cset_fast_test(obj)) { |
4411 oop forwardee; | |
4447 if (obj->is_forwarded()) { | 4412 if (obj->is_forwarded()) { |
4448 oopDesc::encode_store_heap_oop(p, obj->forwardee()); | 4413 forwardee = obj->forwardee(); |
4449 // If we are a root scanning closure during an initial | |
4450 // mark pause (i.e. do_mark_object will be true) then | |
4451 // we also need to handle marking of roots in the | |
4452 // event of an evacuation failure. In the event of an | |
4453 // evacuation failure, the object is forwarded to itself | |
4454 // and not copied. For root-scanning closures, the | |
4455 // object would be marked after a successful self-forward | |
4456 // but an object could be pointed to by both a root and non | |
4457 // root location and be self-forwarded by a non-root-scanning | |
4458 // closure. Therefore we also have to attempt to mark the | |
4459 // self-forwarded root object here. | |
4460 if (do_mark_object && obj->forwardee() == obj) { | |
4461 mark_object(p); | |
4462 } | |
4463 } else { | 4414 } else { |
4464 // During an initial mark pause, objects that are pointed to | 4415 forwardee = copy_to_survivor_space(obj); |
4465 // by the roots need to be marked - even in the event of an | 4416 } |
4466 // evacuation failure. We pass the template parameter | 4417 assert(forwardee != NULL, "forwardee should not be NULL"); |
4467 // do_mark_object (which is true for root scanning closures | 4418 oopDesc::encode_store_heap_oop(p, forwardee); |
4468 // during an initial mark pause) to copy_to_survivor_space | 4419 if (do_mark_object && forwardee != obj) { |
4469 // which will pass it on to the evacuation failure handling | 4420 // If the object is self-forwarded we don't need to explicitly |
4470 // code. The thread that successfully self-forwards a root | 4421 // mark it, the evacuation failure protocol will do so. |
4471 // object to itself is responsible for marking the object. | 4422 mark_forwarded_object(obj, forwardee); |
4472 bool should_mark_root = do_mark_object; | 4423 } |
4473 | 4424 |
4474 // We need to mark the copied object if we're a root scanning | |
4475 // closure during an initial mark pause (i.e. do_mark_object | |
4476 // will be true), or the object is already marked and we need | |
4477 // to propagate the mark to the evacuated copy. | |
4478 bool should_mark_copy = do_mark_object || | |
4479 _during_initial_mark || | |
4480 (_mark_in_progress && !_g1->is_obj_ill(obj)); | |
4481 | |
4482 oop copy_oop = copy_to_survivor_space(obj, should_mark_root, | |
4483 should_mark_copy); | |
4484 oopDesc::encode_store_heap_oop(p, copy_oop); | |
4485 } | |
4486 // When scanning the RS, we only care about objs in CS. | 4425 // When scanning the RS, we only care about objs in CS. |
4487 if (barrier == G1BarrierRS) { | 4426 if (barrier == G1BarrierRS) { |
4488 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); | 4427 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
4489 } | 4428 } |
4490 } else { | 4429 } else { |
4491 // The object is not in collection set. If we're a root scanning | 4430 // The object is not in collection set. If we're a root scanning |
4492 // closure during an initial mark pause (i.e. do_mark_object will | 4431 // closure during an initial mark pause (i.e. do_mark_object will |
4493 // be true) then attempt to mark the object. | 4432 // be true) then attempt to mark the object. |
4494 if (do_mark_object) { | 4433 if (do_mark_object && _g1->is_in_g1_reserved(obj)) { |
4495 mark_object(p); | 4434 mark_object(obj); |
4496 } | 4435 } |
4497 } | 4436 } |
4498 | 4437 |
4499 if (barrier == G1BarrierEvac && obj != NULL) { | 4438 if (barrier == G1BarrierEvac && obj != NULL) { |
4500 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); | 4439 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
4785 double ext_root_time_ms = | 4724 double ext_root_time_ms = |
4786 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | 4725 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; |
4787 | 4726 |
4788 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | 4727 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); |
4789 | 4728 |
4790 // Scan strong roots in mark stack. | 4729 // During conc marking we have to filter the per-thread SATB buffers |
4791 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | 4730 // to make sure we remove any oops into the CSet (which will show up |
4792 concurrent_mark()->oops_do(scan_non_heap_roots); | 4731 // as implicitly live). |
4793 } | 4732 if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) { |
4794 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | 4733 if (mark_in_progress()) { |
4795 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | 4734 JavaThread::satb_mark_queue_set().filter_thread_buffers(); |
4735 } | |
4736 } | |
4737 double satb_filtering_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4738 g1_policy()->record_satb_filtering_time(worker_i, satb_filtering_ms); | |
4796 | 4739 |
4797 // Now scan the complement of the collection set. | 4740 // Now scan the complement of the collection set. |
4798 if (scan_rs != NULL) { | 4741 if (scan_rs != NULL) { |
4799 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | 4742 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); |
4800 } | 4743 } |
5409 concurrent_g1_refine()->clear_hot_cache(); | 5352 concurrent_g1_refine()->clear_hot_cache(); |
5410 concurrent_g1_refine()->set_use_cache(true); | 5353 concurrent_g1_refine()->set_use_cache(true); |
5411 | 5354 |
5412 finalize_for_evac_failure(); | 5355 finalize_for_evac_failure(); |
5413 | 5356 |
5414 // Must do this before clearing the per-region evac-failure flags | |
5415 // (which is currently done when we free the collection set). | |
5416 // We also only do this if marking is actually in progress and so | |
5417 // have to do this before we set the mark_in_progress flag at the | |
5418 // end of an initial mark pause. | |
5419 concurrent_mark()->complete_marking_in_collection_set(); | |
5420 | |
5421 if (evacuation_failed()) { | 5357 if (evacuation_failed()) { |
5422 remove_self_forwarding_pointers(); | 5358 remove_self_forwarding_pointers(); |
5423 if (PrintGCDetails) { | 5359 if (PrintGCDetails) { |
5424 gclog_or_tty->print(" (to-space overflow)"); | 5360 gclog_or_tty->print(" (to-space overflow)"); |
5425 } else if (PrintGC) { | 5361 } else if (PrintGC) { |
6072 new_alloc_region->set_survivor(); | 6008 new_alloc_region->set_survivor(); |
6073 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor); | 6009 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor); |
6074 } else { | 6010 } else { |
6075 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old); | 6011 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old); |
6076 } | 6012 } |
6013 bool during_im = g1_policy()->during_initial_mark_pause(); | |
6014 new_alloc_region->note_start_of_copying(during_im); | |
6077 return new_alloc_region; | 6015 return new_alloc_region; |
6078 } else { | 6016 } else { |
6079 g1_policy()->note_alloc_region_limit_reached(ap); | 6017 g1_policy()->note_alloc_region_limit_reached(ap); |
6080 } | 6018 } |
6081 } | 6019 } |
6083 } | 6021 } |
6084 | 6022 |
6085 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region, | 6023 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region, |
6086 size_t allocated_bytes, | 6024 size_t allocated_bytes, |
6087 GCAllocPurpose ap) { | 6025 GCAllocPurpose ap) { |
6088 alloc_region->note_end_of_copying(); | 6026 bool during_im = g1_policy()->during_initial_mark_pause(); |
6027 alloc_region->note_end_of_copying(during_im); | |
6089 g1_policy()->record_bytes_copied_during_gc(allocated_bytes); | 6028 g1_policy()->record_bytes_copied_during_gc(allocated_bytes); |
6090 if (ap == GCAllocForSurvived) { | 6029 if (ap == GCAllocForSurvived) { |
6091 young_list()->add_survivor_region(alloc_region); | 6030 young_list()->add_survivor_region(alloc_region); |
6092 } else { | 6031 } else { |
6093 _old_set.add(alloc_region); | 6032 _old_set.add(alloc_region); |