comparison src/share/vm/gc_implementation/g1/g1EvacFailure.hpp @ 4787:2ace1c4ee8da

6888336: G1: avoid explicitly marking and pushing objects in survivor spaces Summary: This change simplifies the interaction between GC and concurrent marking. By disabling survivor spaces during the initial-mark pause we don't need to propagate marks of objects we copy during each GC (since we never need to copy an explicitly marked object). Reviewed-by: johnc, brutisso
author tonyp
date Tue, 10 Jan 2012 18:58:13 -0500
parents 023652e49ac0
children d30fa85f9994
comparison
equal deleted inserted replaced
4786:1d6185f732aa 4787:2ace1c4ee8da
64 class RemoveSelfForwardPtrObjClosure: public ObjectClosure { 64 class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
65 private: 65 private:
66 G1CollectedHeap* _g1; 66 G1CollectedHeap* _g1;
67 ConcurrentMark* _cm; 67 ConcurrentMark* _cm;
68 HeapRegion* _hr; 68 HeapRegion* _hr;
69 size_t _prev_marked_bytes; 69 size_t _marked_bytes;
70 size_t _next_marked_bytes;
71 OopsInHeapRegionClosure *_update_rset_cl; 70 OopsInHeapRegionClosure *_update_rset_cl;
71 bool _during_initial_mark;
72 bool _during_conc_mark;
72 public: 73 public:
73 RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm, 74 RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
74 HeapRegion* hr, 75 HeapRegion* hr,
75 OopsInHeapRegionClosure* update_rset_cl) : 76 OopsInHeapRegionClosure* update_rset_cl,
76 _g1(g1), _cm(cm), _hr(hr), 77 bool during_initial_mark,
78 bool during_conc_mark) :
79 _g1(g1), _cm(cm), _hr(hr), _marked_bytes(0),
77 _update_rset_cl(update_rset_cl), 80 _update_rset_cl(update_rset_cl),
78 _prev_marked_bytes(0), _next_marked_bytes(0) {} 81 _during_initial_mark(during_initial_mark),
79 82 _during_conc_mark(during_conc_mark) { }
80 size_t prev_marked_bytes() { return _prev_marked_bytes; } 83
81 size_t next_marked_bytes() { return _next_marked_bytes; } 84 size_t marked_bytes() { return _marked_bytes; }
82 85
83 // <original comment> 86 // <original comment>
84 // The original idea here was to coalesce evacuated and dead objects. 87 // The original idea here was to coalesce evacuated and dead objects.
85 // However that caused complications with the block offset table (BOT). 88 // However that caused complications with the block offset table (BOT).
86 // In particular if there were two TLABs, one of them partially refined. 89 // In particular if there were two TLABs, one of them partially refined.
98 // to coalesce dead objects if we want to. 101 // to coalesce dead objects if we want to.
99 void do_object(oop obj) { 102 void do_object(oop obj) {
100 HeapWord* obj_addr = (HeapWord*) obj; 103 HeapWord* obj_addr = (HeapWord*) obj;
101 assert(_hr->is_in(obj_addr), "sanity"); 104 assert(_hr->is_in(obj_addr), "sanity");
102 size_t obj_size = obj->size(); 105 size_t obj_size = obj->size();
103
104 _hr->update_bot_for_object(obj_addr, obj_size); 106 _hr->update_bot_for_object(obj_addr, obj_size);
105 107
106 if (obj->is_forwarded() && obj->forwardee() == obj) { 108 if (obj->is_forwarded() && obj->forwardee() == obj) {
107 // The object failed to move. 109 // The object failed to move.
108 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); 110
111 // We consider all objects that we find self-forwarded to be
112 // live. What we'll do is that we'll update the prev marking
113 // info so that they are all under PTAMS and explicitly marked.
109 _cm->markPrev(obj); 114 _cm->markPrev(obj);
110 assert(_cm->isPrevMarked(obj), "Should be marked!"); 115 if (_during_initial_mark) {
111 _prev_marked_bytes += (obj_size * HeapWordSize); 116 // For the next marking info we'll only mark the
112 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { 117 // self-forwarded objects explicitly if we are during
113 _cm->markAndGrayObjectIfNecessary(obj); 118 // initial-mark (since, normally, we only mark objects pointed
119 // to by roots if we succeed in copying them). By marking all
120 // self-forwarded objects we ensure that we mark any that are
121 // still pointed to be roots. During concurrent marking, and
122 // after initial-mark, we don't need to mark any objects
123 // explicitly and all objects in the CSet are considered
124 // (implicitly) live. So, we won't mark them explicitly and
125 // we'll leave them over NTAMS.
126 _cm->markNext(obj);
114 } 127 }
128 _marked_bytes += (obj_size * HeapWordSize);
115 obj->set_mark(markOopDesc::prototype()); 129 obj->set_mark(markOopDesc::prototype());
116 130
117 // While we were processing RSet buffers during the collection, 131 // While we were processing RSet buffers during the collection,
118 // we actually didn't scan any cards on the collection set, 132 // we actually didn't scan any cards on the collection set,
119 // since we didn't want to update remembered sets with entries 133 // since we didn't want to update remembered sets with entries
124 // involved scanning a card in the collection set and coming 138 // involved scanning a card in the collection set and coming
125 // across an array that was being chunked and looking malformed. 139 // across an array that was being chunked and looking malformed.
126 // The problem is that, if evacuation fails, we might have 140 // The problem is that, if evacuation fails, we might have
127 // remembered set entries missing given that we skipped cards on 141 // remembered set entries missing given that we skipped cards on
128 // the collection set. So, we'll recreate such entries now. 142 // the collection set. So, we'll recreate such entries now.
129
130 obj->oop_iterate(_update_rset_cl); 143 obj->oop_iterate(_update_rset_cl);
131 assert(_cm->isPrevMarked(obj), "Should be marked!"); 144 assert(_cm->isPrevMarked(obj), "Should be marked!");
132 } else { 145 } else {
133 // The object has been either evacuated or is dead. Fill it with a 146 // The object has been either evacuated or is dead. Fill it with a
134 // dummy object. 147 // dummy object.
135 MemRegion mr((HeapWord*)obj, obj_size); 148 MemRegion mr((HeapWord*) obj, obj_size);
136 CollectedHeap::fill_with_object(mr); 149 CollectedHeap::fill_with_object(mr);
137 _cm->clearRangeBothMaps(mr);
138 } 150 }
139 } 151 }
140 }; 152 };
141 153
142 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure { 154 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
149 OopsInHeapRegionClosure* update_rset_cl) : 161 OopsInHeapRegionClosure* update_rset_cl) :
150 _g1h(g1h), _update_rset_cl(update_rset_cl), 162 _g1h(g1h), _update_rset_cl(update_rset_cl),
151 _cm(_g1h->concurrent_mark()) { } 163 _cm(_g1h->concurrent_mark()) { }
152 164
153 bool doHeapRegion(HeapRegion *hr) { 165 bool doHeapRegion(HeapRegion *hr) {
166 bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
167 bool during_conc_mark = _g1h->mark_in_progress();
168
154 assert(!hr->isHumongous(), "sanity"); 169 assert(!hr->isHumongous(), "sanity");
155 assert(hr->in_collection_set(), "bad CS"); 170 assert(hr->in_collection_set(), "bad CS");
156 171
157 if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) { 172 if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
158 if (hr->evacuation_failed()) { 173 if (hr->evacuation_failed()) {
159 RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl); 174 RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl,
175 during_initial_mark,
176 during_conc_mark);
177
178 MemRegion mr(hr->bottom(), hr->end());
179 // We'll recreate the prev marking info so we'll first clear
180 // the prev bitmap range for this region. We never mark any
181 // CSet objects explicitly so the next bitmap range should be
182 // cleared anyway.
183 _cm->clearRangePrevBitmap(mr);
184
185 hr->note_self_forwarding_removal_start(during_initial_mark,
186 during_conc_mark);
160 187
161 // In the common case (i.e. when there is no evacuation 188 // In the common case (i.e. when there is no evacuation
162 // failure) we make sure that the following is done when 189 // failure) we make sure that the following is done when
163 // the region is freed so that it is "ready-to-go" when it's 190 // the region is freed so that it is "ready-to-go" when it's
164 // re-allocated. However, when evacuation failure happens, a 191 // re-allocated. However, when evacuation failure happens, a
169 hr->rem_set()->reset_for_par_iteration(); 196 hr->rem_set()->reset_for_par_iteration();
170 hr->reset_bot(); 197 hr->reset_bot();
171 _update_rset_cl->set_region(hr); 198 _update_rset_cl->set_region(hr);
172 hr->object_iterate(&rspc); 199 hr->object_iterate(&rspc);
173 200
174 // A number of manipulations to make the TAMS for this region 201 hr->note_self_forwarding_removal_end(during_initial_mark,
175 // be the current top, and the marked bytes be the ones observed 202 during_conc_mark,
176 // in the iteration. 203 rspc.marked_bytes());
177 if (_cm->at_least_one_mark_complete()) {
178 // The comments below are the postconditions achieved by the
179 // calls. Note especially the last such condition, which says that
180 // the count of marked bytes has been properly restored.
181 hr->note_start_of_marking(false);
182 // _next_top_at_mark_start == top, _next_marked_bytes == 0
183 hr->add_to_marked_bytes(rspc.prev_marked_bytes());
184 // _next_marked_bytes == prev_marked_bytes.
185 hr->note_end_of_marking();
186 // _prev_top_at_mark_start == top(),
187 // _prev_marked_bytes == prev_marked_bytes
188 }
189 // If there is no mark in progress, we modified the _next variables
190 // above needlessly, but harmlessly.
191 if (_g1h->mark_in_progress()) {
192 hr->note_start_of_marking(false);
193 // _next_top_at_mark_start == top, _next_marked_bytes == 0
194 // _next_marked_bytes == next_marked_bytes.
195 }
196 } 204 }
197 } 205 }
198 return false; 206 return false;
199 } 207 }
200 }; 208 };