comparison src/share/vm/gc_implementation/g1/g1EvacFailure.hpp @ 20804:7848fc12602b

Merge with jdk8u40-b25
author Gilles Duboscq <gilles.m.duboscq@oracle.com>
date Tue, 07 Apr 2015 14:58:49 +0200
parents 89152779163c 6948da6d7c13
children
comparison
equal deleted inserted replaced
20184:84105dcdb05b 20804:7848fc12602b
69 size_t _marked_bytes; 69 size_t _marked_bytes;
70 OopsInHeapRegionClosure *_update_rset_cl; 70 OopsInHeapRegionClosure *_update_rset_cl;
71 bool _during_initial_mark; 71 bool _during_initial_mark;
72 bool _during_conc_mark; 72 bool _during_conc_mark;
73 uint _worker_id; 73 uint _worker_id;
74 HeapWord* _end_of_last_gap;
75 HeapWord* _last_gap_threshold;
76 HeapWord* _last_obj_threshold;
74 77
75 public: 78 public:
76 RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm, 79 RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
77 HeapRegion* hr, 80 HeapRegion* hr,
78 OopsInHeapRegionClosure* update_rset_cl, 81 OopsInHeapRegionClosure* update_rset_cl,
81 uint worker_id) : 84 uint worker_id) :
82 _g1(g1), _cm(cm), _hr(hr), _marked_bytes(0), 85 _g1(g1), _cm(cm), _hr(hr), _marked_bytes(0),
83 _update_rset_cl(update_rset_cl), 86 _update_rset_cl(update_rset_cl),
84 _during_initial_mark(during_initial_mark), 87 _during_initial_mark(during_initial_mark),
85 _during_conc_mark(during_conc_mark), 88 _during_conc_mark(during_conc_mark),
86 _worker_id(worker_id) { } 89 _worker_id(worker_id),
90 _end_of_last_gap(hr->bottom()),
91 _last_gap_threshold(hr->bottom()),
92 _last_obj_threshold(hr->bottom()) { }
87 93
88 size_t marked_bytes() { return _marked_bytes; } 94 size_t marked_bytes() { return _marked_bytes; }
89 95
90 // <original comment> 96 // <original comment>
91 // The original idea here was to coalesce evacuated and dead objects. 97 // The original idea here was to coalesce evacuated and dead objects.
105 // to coalesce dead objects if we want to. 111 // to coalesce dead objects if we want to.
106 void do_object(oop obj) { 112 void do_object(oop obj) {
107 HeapWord* obj_addr = (HeapWord*) obj; 113 HeapWord* obj_addr = (HeapWord*) obj;
108 assert(_hr->is_in(obj_addr), "sanity"); 114 assert(_hr->is_in(obj_addr), "sanity");
109 size_t obj_size = obj->size(); 115 size_t obj_size = obj->size();
110 _hr->update_bot_for_object(obj_addr, obj_size); 116 HeapWord* obj_end = obj_addr + obj_size;
117
118 if (_end_of_last_gap != obj_addr) {
119 // there was a gap before obj_addr
120 _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr);
121 }
111 122
112 if (obj->is_forwarded() && obj->forwardee() == obj) { 123 if (obj->is_forwarded() && obj->forwardee() == obj) {
113 // The object failed to move. 124 // The object failed to move.
114 125
115 // We consider all objects that we find self-forwarded to be 126 // We consider all objects that we find self-forwarded to be
116 // live. What we'll do is that we'll update the prev marking 127 // live. What we'll do is that we'll update the prev marking
117 // info so that they are all under PTAMS and explicitly marked. 128 // info so that they are all under PTAMS and explicitly marked.
118 _cm->markPrev(obj); 129 if (!_cm->isPrevMarked(obj)) {
130 _cm->markPrev(obj);
131 }
119 if (_during_initial_mark) { 132 if (_during_initial_mark) {
120 // For the next marking info we'll only mark the 133 // For the next marking info we'll only mark the
121 // self-forwarded objects explicitly if we are during 134 // self-forwarded objects explicitly if we are during
122 // initial-mark (since, normally, we only mark objects pointed 135 // initial-mark (since, normally, we only mark objects pointed
123 // to by roots if we succeed in copying them). By marking all 136 // to by roots if we succeed in copying them). By marking all
143 // across an array that was being chunked and looking malformed. 156 // across an array that was being chunked and looking malformed.
144 // The problem is that, if evacuation fails, we might have 157 // The problem is that, if evacuation fails, we might have
145 // remembered set entries missing given that we skipped cards on 158 // remembered set entries missing given that we skipped cards on
146 // the collection set. So, we'll recreate such entries now. 159 // the collection set. So, we'll recreate such entries now.
147 obj->oop_iterate(_update_rset_cl); 160 obj->oop_iterate(_update_rset_cl);
148 assert(_cm->isPrevMarked(obj), "Should be marked!");
149 } else { 161 } else {
162
150 // The object has been either evacuated or is dead. Fill it with a 163 // The object has been either evacuated or is dead. Fill it with a
151 // dummy object. 164 // dummy object.
152 MemRegion mr((HeapWord*) obj, obj_size); 165 MemRegion mr(obj_addr, obj_size);
153 CollectedHeap::fill_with_object(mr); 166 CollectedHeap::fill_with_object(mr);
154 } 167
168 // must nuke all dead objects which we skipped when iterating over the region
169 _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
170 }
171 _end_of_last_gap = obj_end;
172 _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
155 } 173 }
156 }; 174 };
157 175
158 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure { 176 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
159 G1CollectedHeap* _g1h; 177 G1CollectedHeap* _g1h;
160 ConcurrentMark* _cm; 178 ConcurrentMark* _cm;
161 OopsInHeapRegionClosure *_update_rset_cl;
162 uint _worker_id; 179 uint _worker_id;
163 180
181 DirtyCardQueue _dcq;
182 UpdateRSetDeferred _update_rset_cl;
183
164 public: 184 public:
165 RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h, 185 RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
166 OopsInHeapRegionClosure* update_rset_cl,
167 uint worker_id) : 186 uint worker_id) :
168 _g1h(g1h), _update_rset_cl(update_rset_cl), 187 _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq),
169 _worker_id(worker_id), _cm(_g1h->concurrent_mark()) { } 188 _worker_id(worker_id), _cm(_g1h->concurrent_mark()) {
189 }
170 190
171 bool doHeapRegion(HeapRegion *hr) { 191 bool doHeapRegion(HeapRegion *hr) {
172 bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause(); 192 bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
173 bool during_conc_mark = _g1h->mark_in_progress(); 193 bool during_conc_mark = _g1h->mark_in_progress();
174 194
175 assert(!hr->isHumongous(), "sanity"); 195 assert(!hr->isHumongous(), "sanity");
176 assert(hr->in_collection_set(), "bad CS"); 196 assert(hr->in_collection_set(), "bad CS");
177 197
178 if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) { 198 if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
179 if (hr->evacuation_failed()) { 199 if (hr->evacuation_failed()) {
180 RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl, 200 RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl,
181 during_initial_mark, 201 during_initial_mark,
182 during_conc_mark, 202 during_conc_mark,
183 _worker_id); 203 _worker_id);
184 204
185 MemRegion mr(hr->bottom(), hr->end());
186 // We'll recreate the prev marking info so we'll first clear
187 // the prev bitmap range for this region. We never mark any
188 // CSet objects explicitly so the next bitmap range should be
189 // cleared anyway.
190 _cm->clearRangePrevBitmap(mr);
191
192 hr->note_self_forwarding_removal_start(during_initial_mark, 205 hr->note_self_forwarding_removal_start(during_initial_mark,
193 during_conc_mark); 206 during_conc_mark);
207 _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
194 208
195 // In the common case (i.e. when there is no evacuation 209 // In the common case (i.e. when there is no evacuation
196 // failure) we make sure that the following is done when 210 // failure) we make sure that the following is done when
197 // the region is freed so that it is "ready-to-go" when it's 211 // the region is freed so that it is "ready-to-go" when it's
198 // re-allocated. However, when evacuation failure happens, a 212 // re-allocated. However, when evacuation failure happens, a
200 // to a CSet in the future. So we have to be careful here and 214 // to a CSet in the future. So we have to be careful here and
201 // make sure the region's RSet is ready for parallel iteration 215 // make sure the region's RSet is ready for parallel iteration
202 // whenever this might be required in the future. 216 // whenever this might be required in the future.
203 hr->rem_set()->reset_for_par_iteration(); 217 hr->rem_set()->reset_for_par_iteration();
204 hr->reset_bot(); 218 hr->reset_bot();
205 _update_rset_cl->set_region(hr); 219 _update_rset_cl.set_region(hr);
206 hr->object_iterate(&rspc); 220 hr->object_iterate(&rspc);
221
222 hr->rem_set()->clean_strong_code_roots(hr);
207 223
208 hr->note_self_forwarding_removal_end(during_initial_mark, 224 hr->note_self_forwarding_removal_end(during_initial_mark,
209 during_conc_mark, 225 during_conc_mark,
210 rspc.marked_bytes()); 226 rspc.marked_bytes());
211 } 227 }
222 G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) : 238 G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) :
223 AbstractGangTask("G1 Remove Self-forwarding Pointers"), 239 AbstractGangTask("G1 Remove Self-forwarding Pointers"),
224 _g1h(g1h) { } 240 _g1h(g1h) { }
225 241
226 void work(uint worker_id) { 242 void work(uint worker_id) {
227 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); 243 RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id);
228 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
229 UpdateRSetDeferred deferred_update(_g1h, &dcq);
230
231 OopsInHeapRegionClosure *update_rset_cl = &deferred_update;
232 if (!G1DeferredRSUpdate) {
233 update_rset_cl = &immediate_update;
234 }
235
236 RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl, worker_id);
237 244
238 HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id); 245 HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
239 _g1h->collection_set_iterate_from(hr, &rsfp_cl); 246 _g1h->collection_set_iterate_from(hr, &rsfp_cl);
240 } 247 }
241 }; 248 };