comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 620:bcedf688d882

Merge
author tonyp
date Mon, 09 Mar 2009 11:32:57 -0400
parents d8c7fa77a6dc 7ea5ca260b28
children 87fa6e083d82 660978a2a31a
comparison
equal deleted inserted replaced
614:3db67f76d308 620:bcedf688d882
134 return true; 134 return true;
135 } 135 }
136 int calls() { return _calls; } 136 int calls() { return _calls; }
137 }; 137 };
138 138
139 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
140 public:
141 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
142 *card_ptr = CardTableModRefBS::dirty_card_val();
143 return true;
144 }
145 };
146
139 YoungList::YoungList(G1CollectedHeap* g1h) 147 YoungList::YoungList(G1CollectedHeap* g1h)
140 : _g1h(g1h), _head(NULL), 148 : _g1h(g1h), _head(NULL),
141 _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL), 149 _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
142 _length(0), _scan_only_length(0), 150 _length(0), _scan_only_length(0),
143 _last_sampled_rs_lengths(0), 151 _last_sampled_rs_lengths(0),
959 // dirty-card logging system, some cards may be dirty by weak-ref 967 // dirty-card logging system, some cards may be dirty by weak-ref
960 // processing, and may be enqueued. But the whole card table is 968 // processing, and may be enqueued. But the whole card table is
961 // dirtied, so this should abandon those logs, and set "do_traversal" 969 // dirtied, so this should abandon those logs, and set "do_traversal"
962 // to true. 970 // to true.
963 concurrent_g1_refine()->set_pya_restart(); 971 concurrent_g1_refine()->set_pya_restart();
964 972 assert(!G1DeferredRSUpdate
973 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
965 assert(regions_accounted_for(), "Region leakage!"); 974 assert(regions_accounted_for(), "Region leakage!");
966 } 975 }
967 976
968 if (g1_policy()->in_young_gc_mode()) { 977 if (g1_policy()->in_young_gc_mode()) {
969 _young_list->reset_sampled_info(); 978 _young_list->reset_sampled_info();
1464 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, 1473 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1465 DirtyCardQ_FL_lock, 1474 DirtyCardQ_FL_lock,
1466 G1DirtyCardQueueMax, 1475 G1DirtyCardQueueMax,
1467 Shared_DirtyCardQ_lock); 1476 Shared_DirtyCardQ_lock);
1468 } 1477 }
1478 if (G1DeferredRSUpdate) {
1479 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1480 DirtyCardQ_FL_lock,
1481 0,
1482 Shared_DirtyCardQ_lock,
1483 &JavaThread::dirty_card_queue_set());
1484 }
1469 // In case we're keeping closure specialization stats, initialize those 1485 // In case we're keeping closure specialization stats, initialize those
1470 // counts and that mechanism. 1486 // counts and that mechanism.
1471 SpecializationStats::clear(); 1487 SpecializationStats::clear();
1472 1488
1473 _gc_alloc_region_list = NULL; 1489 _gc_alloc_region_list = NULL;
2314 }; 2330 };
2315 2331
2316 void 2332 void
2317 G1CollectedHeap::checkConcurrentMark() { 2333 G1CollectedHeap::checkConcurrentMark() {
2318 VerifyMarkedObjsClosure verifycl(this); 2334 VerifyMarkedObjsClosure verifycl(this);
2319 doConcurrentMark();
2320 // MutexLockerEx x(getMarkBitMapLock(), 2335 // MutexLockerEx x(getMarkBitMapLock(),
2321 // Mutex::_no_safepoint_check_flag); 2336 // Mutex::_no_safepoint_check_flag);
2322 object_iterate(&verifycl); 2337 object_iterate(&verifycl);
2323 } 2338 }
2324 2339
2491 g1_policy()->record_collection_pause_start(start_time_sec, 2506 g1_policy()->record_collection_pause_start(start_time_sec,
2492 start_used_bytes); 2507 start_used_bytes);
2493 2508
2494 guarantee(_in_cset_fast_test == NULL, "invariant"); 2509 guarantee(_in_cset_fast_test == NULL, "invariant");
2495 guarantee(_in_cset_fast_test_base == NULL, "invariant"); 2510 guarantee(_in_cset_fast_test_base == NULL, "invariant");
2496 _in_cset_fast_test_length = n_regions(); 2511 _in_cset_fast_test_length = max_regions();
2497 _in_cset_fast_test_base = 2512 _in_cset_fast_test_base =
2498 NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); 2513 NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
2499 memset(_in_cset_fast_test_base, false, 2514 memset(_in_cset_fast_test_base, false,
2500 _in_cset_fast_test_length * sizeof(bool)); 2515 _in_cset_fast_test_length * sizeof(bool));
2501 // We're biasing _in_cset_fast_test to avoid subtracting the 2516 // We're biasing _in_cset_fast_test to avoid subtracting the
2916 #endif // G1_DEBUG 2931 #endif // G1_DEBUG
2917 } 2932 }
2918 } 2933 }
2919 }; 2934 };
2920 2935
2921 class RecreateRSetEntriesClosure: public OopClosure { 2936 class UpdateRSetImmediate : public OopsInHeapRegionClosure {
2922 private: 2937 private:
2923 G1CollectedHeap* _g1; 2938 G1CollectedHeap* _g1;
2924 G1RemSet* _g1_rem_set; 2939 G1RemSet* _g1_rem_set;
2925 HeapRegion* _from;
2926 public: 2940 public:
2927 RecreateRSetEntriesClosure(G1CollectedHeap* g1, HeapRegion* from) : 2941 UpdateRSetImmediate(G1CollectedHeap* g1) :
2928 _g1(g1), _g1_rem_set(g1->g1_rem_set()), _from(from) 2942 _g1(g1), _g1_rem_set(g1->g1_rem_set()) {}
2929 {}
2930 2943
2931 void do_oop(narrowOop* p) { 2944 void do_oop(narrowOop* p) {
2932 guarantee(false, "NYI"); 2945 guarantee(false, "NYI");
2933 } 2946 }
2934 void do_oop(oop* p) { 2947 void do_oop(oop* p) {
2935 assert(_from->is_in_reserved(p), "paranoia"); 2948 assert(_from->is_in_reserved(p), "paranoia");
2936 if (*p != NULL) { 2949 if (*p != NULL && !_from->is_survivor()) {
2937 _g1_rem_set->write_ref(_from, p); 2950 _g1_rem_set->par_write_ref(_from, p, 0);
2938 } 2951 }
2939 } 2952 }
2940 }; 2953 };
2954
2955 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
2956 private:
2957 G1CollectedHeap* _g1;
2958 DirtyCardQueue *_dcq;
2959 CardTableModRefBS* _ct_bs;
2960
2961 public:
2962 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
2963 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
2964
2965 void do_oop(narrowOop* p) {
2966 guarantee(false, "NYI");
2967 }
2968 void do_oop(oop* p) {
2969 assert(_from->is_in_reserved(p), "paranoia");
2970 if (!_from->is_in_reserved(*p) && !_from->is_survivor()) {
2971 size_t card_index = _ct_bs->index_for(p);
2972 if (_ct_bs->mark_card_deferred(card_index)) {
2973 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
2974 }
2975 }
2976 }
2977 };
2978
2979
2941 2980
2942 class RemoveSelfPointerClosure: public ObjectClosure { 2981 class RemoveSelfPointerClosure: public ObjectClosure {
2943 private: 2982 private:
2944 G1CollectedHeap* _g1; 2983 G1CollectedHeap* _g1;
2945 ConcurrentMark* _cm; 2984 ConcurrentMark* _cm;
2946 HeapRegion* _hr; 2985 HeapRegion* _hr;
2947 size_t _prev_marked_bytes; 2986 size_t _prev_marked_bytes;
2948 size_t _next_marked_bytes; 2987 size_t _next_marked_bytes;
2988 OopsInHeapRegionClosure *_cl;
2949 public: 2989 public:
2950 RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr) : 2990 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) :
2951 _g1(g1), _cm(_g1->concurrent_mark()), _hr(hr), 2991 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0),
2952 _prev_marked_bytes(0), _next_marked_bytes(0) 2992 _next_marked_bytes(0), _cl(cl) {}
2953 {}
2954 2993
2955 size_t prev_marked_bytes() { return _prev_marked_bytes; } 2994 size_t prev_marked_bytes() { return _prev_marked_bytes; }
2956 size_t next_marked_bytes() { return _next_marked_bytes; } 2995 size_t next_marked_bytes() { return _next_marked_bytes; }
2957 2996
2958 // The original idea here was to coalesce evacuated and dead objects. 2997 // The original idea here was to coalesce evacuated and dead objects.
2986 // card in the collection set and coming across an array that 3025 // card in the collection set and coming across an array that
2987 // was being chunked and looking malformed. The problem is 3026 // was being chunked and looking malformed. The problem is
2988 // that, if evacuation fails, we might have remembered set 3027 // that, if evacuation fails, we might have remembered set
2989 // entries missing given that we skipped cards on the 3028 // entries missing given that we skipped cards on the
2990 // collection set. So, we'll recreate such entries now. 3029 // collection set. So, we'll recreate such entries now.
2991 RecreateRSetEntriesClosure cl(_g1, _hr); 3030 obj->oop_iterate(_cl);
2992 obj->oop_iterate(&cl);
2993 assert(_cm->isPrevMarked(obj), "Should be marked!"); 3031 assert(_cm->isPrevMarked(obj), "Should be marked!");
2994 } else { 3032 } else {
2995 // The object has been either evacuated or is dead. Fill it with a 3033 // The object has been either evacuated or is dead. Fill it with a
2996 // dummy object. 3034 // dummy object.
2997 MemRegion mr((HeapWord*)obj, obj->size()); 3035 MemRegion mr((HeapWord*)obj, obj->size());
3000 } 3038 }
3001 } 3039 }
3002 }; 3040 };
3003 3041
3004 void G1CollectedHeap::remove_self_forwarding_pointers() { 3042 void G1CollectedHeap::remove_self_forwarding_pointers() {
3043 UpdateRSetImmediate immediate_update(_g1h);
3044 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
3045 UpdateRSetDeferred deferred_update(_g1h, &dcq);
3046 OopsInHeapRegionClosure *cl;
3047 if (G1DeferredRSUpdate) {
3048 cl = &deferred_update;
3049 } else {
3050 cl = &immediate_update;
3051 }
3005 HeapRegion* cur = g1_policy()->collection_set(); 3052 HeapRegion* cur = g1_policy()->collection_set();
3006
3007 while (cur != NULL) { 3053 while (cur != NULL) {
3008 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); 3054 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
3009 3055
3056 RemoveSelfPointerClosure rspc(_g1h, cl);
3010 if (cur->evacuation_failed()) { 3057 if (cur->evacuation_failed()) {
3011 RemoveSelfPointerClosure rspc(_g1h, cur);
3012 assert(cur->in_collection_set(), "bad CS"); 3058 assert(cur->in_collection_set(), "bad CS");
3059 cl->set_region(cur);
3013 cur->object_iterate(&rspc); 3060 cur->object_iterate(&rspc);
3014 3061
3015 // A number of manipulations to make the TAMS be the current top, 3062 // A number of manipulations to make the TAMS be the current top,
3016 // and the marked bytes be the ones observed in the iteration. 3063 // and the marked bytes be the ones observed in the iteration.
3017 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { 3064 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) {
3516 3563
3517 class G1ParScanThreadState : public StackObj { 3564 class G1ParScanThreadState : public StackObj {
3518 protected: 3565 protected:
3519 G1CollectedHeap* _g1h; 3566 G1CollectedHeap* _g1h;
3520 RefToScanQueue* _refs; 3567 RefToScanQueue* _refs;
3568 DirtyCardQueue _dcq;
3569 CardTableModRefBS* _ct_bs;
3570 G1RemSet* _g1_rem;
3521 3571
3522 typedef GrowableArray<oop*> OverflowQueue; 3572 typedef GrowableArray<oop*> OverflowQueue;
3523 OverflowQueue* _overflowed_refs; 3573 OverflowQueue* _overflowed_refs;
3524 3574
3525 G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount]; 3575 G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
3557 3607
3558 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } 3608 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
3559 3609
3560 void add_to_undo_waste(size_t waste) { _undo_waste += waste; } 3610 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
3561 3611
3612 DirtyCardQueue& dirty_card_queue() { return _dcq; }
3613 CardTableModRefBS* ctbs() { return _ct_bs; }
3614
3615 void immediate_rs_update(HeapRegion* from, oop* p, int tid) {
3616 _g1_rem->par_write_ref(from, p, tid);
3617 }
3618
3619 void deferred_rs_update(HeapRegion* from, oop* p, int tid) {
3620 // If the new value of the field points to the same region or
3621 // is the to-space, we don't need to include it in the Rset updates.
3622 if (!from->is_in_reserved(*p) && !from->is_survivor()) {
3623 size_t card_index = ctbs()->index_for(p);
3624 // If the card hasn't been added to the buffer, do it.
3625 if (ctbs()->mark_card_deferred(card_index)) {
3626 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
3627 }
3628 }
3629 }
3630
3562 public: 3631 public:
3563 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) 3632 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
3564 : _g1h(g1h), 3633 : _g1h(g1h),
3565 _refs(g1h->task_queue(queue_num)), 3634 _refs(g1h->task_queue(queue_num)),
3635 _dcq(&g1h->dirty_card_queue_set()),
3636 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
3637 _g1_rem(g1h->g1_rem_set()),
3566 _hash_seed(17), _queue_num(queue_num), 3638 _hash_seed(17), _queue_num(queue_num),
3567 _term_attempts(0), 3639 _term_attempts(0),
3568 _age_table(false), 3640 _age_table(false),
3569 #if G1_DETAILED_STATS 3641 #if G1_DETAILED_STATS
3570 _pushes(0), _pops(0), _steals(0), 3642 _pushes(0), _pops(0), _steals(0),
3638 } 3710 }
3639 3711
3640 int refs_to_scan() { return refs()->size(); } 3712 int refs_to_scan() { return refs()->size(); }
3641 int overflowed_refs_to_scan() { return overflowed_refs()->length(); } 3713 int overflowed_refs_to_scan() { return overflowed_refs()->length(); }
3642 3714
3715 void update_rs(HeapRegion* from, oop* p, int tid) {
3716 if (G1DeferredRSUpdate) {
3717 deferred_rs_update(from, p, tid);
3718 } else {
3719 immediate_rs_update(from, p, tid);
3720 }
3721 }
3722
3643 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { 3723 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
3644 3724
3645 HeapWord* obj = NULL; 3725 HeapWord* obj = NULL;
3646 if (word_sz * 100 < 3726 if (word_sz * 100 <
3647 (size_t)(ParallelGCG1AllocBufferSize / HeapWordSize) * 3727 (size_t)(ParallelGCG1AllocBufferSize / HeapWordSize) *
3806 } 3886 }
3807 } 3887 }
3808 } 3888 }
3809 }; 3889 };
3810 3890
3811
3812 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : 3891 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
3813 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), 3892 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
3814 _par_scan_state(par_scan_state) { } 3893 _par_scan_state(par_scan_state) { }
3815 3894
3816 // This closure is applied to the fields of the objects that have just been copied. 3895 // This closure is applied to the fields of the objects that have just been copied.
3832 // problems before we go into push_on_queue to know where the 3911 // problems before we go into push_on_queue to know where the
3833 // problem is coming from 3912 // problem is coming from
3834 assert(obj == *p, "the value of *p should not have changed"); 3913 assert(obj == *p, "the value of *p should not have changed");
3835 _par_scan_state->push_on_queue(p); 3914 _par_scan_state->push_on_queue(p);
3836 } else { 3915 } else {
3837 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); 3916 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
3838 } 3917 }
3839 } 3918 }
3840 } 3919 }
3841 3920
3842 void G1ParCopyHelper::mark_forwardee(oop* p) { 3921 void G1ParCopyHelper::mark_forwardee(oop* p) {
3970 } else { 4049 } else {
3971 *p = copy_to_survivor_space(obj); 4050 *p = copy_to_survivor_space(obj);
3972 } 4051 }
3973 // When scanning the RS, we only care about objs in CS. 4052 // When scanning the RS, we only care about objs in CS.
3974 if (barrier == G1BarrierRS) { 4053 if (barrier == G1BarrierRS) {
3975 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); 4054 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
3976 } 4055 }
3977 } 4056 }
3978 4057
3979 // When scanning moved objs, must look at all oops. 4058 // When scanning moved objs, must look at all oops.
3980 if (barrier == G1BarrierEvac && obj != NULL) { 4059 if (barrier == G1BarrierEvac && obj != NULL) {
3981 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); 4060 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
3982 } 4061 }
3983 4062
3984 if (do_gen_barrier && obj != NULL) { 4063 if (do_gen_barrier && obj != NULL) {
3985 par_do_barrier(p); 4064 par_do_barrier(p);
3986 } 4065 }
4125 pss.set_partial_scan_closure(&partial_scan_cl); 4204 pss.set_partial_scan_closure(&partial_scan_cl);
4126 4205
4127 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); 4206 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss);
4128 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); 4207 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss);
4129 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); 4208 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss);
4209
4130 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); 4210 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss);
4131 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); 4211 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss);
4132 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); 4212 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss);
4133 4213
4134 OopsInHeapRegionClosure *scan_root_cl; 4214 OopsInHeapRegionClosure *scan_root_cl;
4380 set_evacuation_failed(false); 4460 set_evacuation_failed(false);
4381 4461
4382 g1_rem_set()->prepare_for_oops_into_collection_set_do(); 4462 g1_rem_set()->prepare_for_oops_into_collection_set_do();
4383 concurrent_g1_refine()->set_use_cache(false); 4463 concurrent_g1_refine()->set_use_cache(false);
4384 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); 4464 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
4385
4386 set_par_threads(n_workers); 4465 set_par_threads(n_workers);
4387 G1ParTask g1_par_task(this, n_workers, _task_queues); 4466 G1ParTask g1_par_task(this, n_workers, _task_queues);
4388 4467
4389 init_for_evac_failure(NULL); 4468 init_for_evac_failure(NULL);
4390 4469
4391 change_strong_roots_parity(); // In preparation for parallel strong roots. 4470 change_strong_roots_parity(); // In preparation for parallel strong roots.
4392 rem_set()->prepare_for_younger_refs_iterate(true); 4471 rem_set()->prepare_for_younger_refs_iterate(true);
4472
4473 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
4393 double start_par = os::elapsedTime(); 4474 double start_par = os::elapsedTime();
4394
4395 if (ParallelGCThreads > 0) { 4475 if (ParallelGCThreads > 0) {
4396 // The individual threads will set their evac-failure closures. 4476 // The individual threads will set their evac-failure closures.
4397 workers()->run_task(&g1_par_task); 4477 workers()->run_task(&g1_par_task);
4398 } else { 4478 } else {
4399 g1_par_task.work(0); 4479 g1_par_task.work(0);
4409 { 4489 {
4410 G1IsAliveClosure is_alive(this); 4490 G1IsAliveClosure is_alive(this);
4411 G1KeepAliveClosure keep_alive(this); 4491 G1KeepAliveClosure keep_alive(this);
4412 JNIHandles::weak_oops_do(&is_alive, &keep_alive); 4492 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
4413 } 4493 }
4414
4415 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); 4494 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4495
4416 concurrent_g1_refine()->set_use_cache(true); 4496 concurrent_g1_refine()->set_use_cache(true);
4417 4497
4418 finalize_for_evac_failure(); 4498 finalize_for_evac_failure();
4419 4499
4420 // Must do this before removing self-forwarding pointers, which clears 4500 // Must do this before removing self-forwarding pointers, which clears
4421 // the per-region evac-failure flags. 4501 // the per-region evac-failure flags.
4422 concurrent_mark()->complete_marking_in_collection_set(); 4502 concurrent_mark()->complete_marking_in_collection_set();
4423 4503
4424 if (evacuation_failed()) { 4504 if (evacuation_failed()) {
4425 remove_self_forwarding_pointers(); 4505 remove_self_forwarding_pointers();
4426
4427 if (PrintGCDetails) { 4506 if (PrintGCDetails) {
4428 gclog_or_tty->print(" (evacuation failed)"); 4507 gclog_or_tty->print(" (evacuation failed)");
4429 } else if (PrintGC) { 4508 } else if (PrintGC) {
4430 gclog_or_tty->print("--"); 4509 gclog_or_tty->print("--");
4431 } 4510 }
4511 }
4512
4513 if (G1DeferredRSUpdate) {
4514 RedirtyLoggedCardTableEntryFastClosure redirty;
4515 dirty_card_queue_set().set_closure(&redirty);
4516 dirty_card_queue_set().apply_closure_to_all_completed_buffers();
4517 JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set());
4518 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
4432 } 4519 }
4433 4520
4434 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 4521 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
4435 } 4522 }
4436 4523