comparison src/share/vm/gc_implementation/g1/g1EvacFailure.hpp @ 4783:023652e49ac0

7121496: G1: do the per-region evacuation failure handling work in parallel Summary: Parallelize the removal of self forwarding pointers etc. by wrapping in a HeapRegion closure, which is then wrapped inside an AbstractGangTask. Reviewed-by: tonyp, iveresov
author johnc
date Fri, 23 Dec 2011 11:14:18 -0800
parents
children 2ace1c4ee8da
comparison
equal deleted inserted replaced
4782:5fd354a959c5 4783:023652e49ac0
1 /*
2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
27
28 #include "gc_implementation/g1/concurrentMark.inline.hpp"
29 #include "gc_implementation/g1/dirtyCardQueue.hpp"
30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
31 #include "gc_implementation/g1/g1_globals.hpp"
32 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
33 #include "gc_implementation/g1/heapRegion.hpp"
34 #include "gc_implementation/g1/heapRegionRemSet.hpp"
35 #include "utilities/workgroup.hpp"
36
37 // Closures and tasks associated with any self-forwarding pointers
38 // installed as a result of an evacuation failure.
39
40 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
41 private:
42 G1CollectedHeap* _g1;
43 DirtyCardQueue *_dcq;
44 CardTableModRefBS* _ct_bs;
45
46 public:
47 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
48 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
49
50 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
51 virtual void do_oop( oop* p) { do_oop_work(p); }
52 template <class T> void do_oop_work(T* p) {
53 assert(_from->is_in_reserved(p), "paranoia");
54 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
55 !_from->is_survivor()) {
56 size_t card_index = _ct_bs->index_for(p);
57 if (_ct_bs->mark_card_deferred(card_index)) {
58 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
59 }
60 }
61 }
62 };
63
64 class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
65 private:
66 G1CollectedHeap* _g1;
67 ConcurrentMark* _cm;
68 HeapRegion* _hr;
69 size_t _prev_marked_bytes;
70 size_t _next_marked_bytes;
71 OopsInHeapRegionClosure *_update_rset_cl;
72 public:
73 RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
74 HeapRegion* hr,
75 OopsInHeapRegionClosure* update_rset_cl) :
76 _g1(g1), _cm(cm), _hr(hr),
77 _update_rset_cl(update_rset_cl),
78 _prev_marked_bytes(0), _next_marked_bytes(0) {}
79
80 size_t prev_marked_bytes() { return _prev_marked_bytes; }
81 size_t next_marked_bytes() { return _next_marked_bytes; }
82
83 // <original comment>
84 // The original idea here was to coalesce evacuated and dead objects.
85 // However that caused complications with the block offset table (BOT).
86 // In particular if there were two TLABs, one of them partially refined.
87 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
88 // The BOT entries of the unrefined part of TLAB_2 point to the start
89 // of TLAB_2. If the last object of the TLAB_1 and the first object
90 // of TLAB_2 are coalesced, then the cards of the unrefined part
91 // would point into middle of the filler object.
92 // The current approach is to not coalesce and leave the BOT contents intact.
93 // </original comment>
94 //
95 // We now reset the BOT when we start the object iteration over the
96 // region and refine its entries for every object we come across. So
97 // the above comment is not really relevant and we should be able
98 // to coalesce dead objects if we want to.
99 void do_object(oop obj) {
100 HeapWord* obj_addr = (HeapWord*) obj;
101 assert(_hr->is_in(obj_addr), "sanity");
102 size_t obj_size = obj->size();
103
104 _hr->update_bot_for_object(obj_addr, obj_size);
105
106 if (obj->is_forwarded() && obj->forwardee() == obj) {
107 // The object failed to move.
108 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
109 _cm->markPrev(obj);
110 assert(_cm->isPrevMarked(obj), "Should be marked!");
111 _prev_marked_bytes += (obj_size * HeapWordSize);
112 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
113 _cm->markAndGrayObjectIfNecessary(obj);
114 }
115 obj->set_mark(markOopDesc::prototype());
116
117 // While we were processing RSet buffers during the collection,
118 // we actually didn't scan any cards on the collection set,
119 // since we didn't want to update remembered sets with entries
120 // that point into the collection set, given that live objects
121 // from the collection set are about to move and such entries
122 // will be stale very soon.
123 // This change also dealt with a reliability issue which
124 // involved scanning a card in the collection set and coming
125 // across an array that was being chunked and looking malformed.
126 // The problem is that, if evacuation fails, we might have
127 // remembered set entries missing given that we skipped cards on
128 // the collection set. So, we'll recreate such entries now.
129
130 obj->oop_iterate(_update_rset_cl);
131 assert(_cm->isPrevMarked(obj), "Should be marked!");
132 } else {
133 // The object has been either evacuated or is dead. Fill it with a
134 // dummy object.
135 MemRegion mr((HeapWord*)obj, obj_size);
136 CollectedHeap::fill_with_object(mr);
137 _cm->clearRangeBothMaps(mr);
138 }
139 }
140 };
141
142 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
143 G1CollectedHeap* _g1h;
144 ConcurrentMark* _cm;
145 OopsInHeapRegionClosure *_update_rset_cl;
146
147 public:
148 RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
149 OopsInHeapRegionClosure* update_rset_cl) :
150 _g1h(g1h), _update_rset_cl(update_rset_cl),
151 _cm(_g1h->concurrent_mark()) { }
152
153 bool doHeapRegion(HeapRegion *hr) {
154 assert(!hr->isHumongous(), "sanity");
155 assert(hr->in_collection_set(), "bad CS");
156
157 if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
158 if (hr->evacuation_failed()) {
159 RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl);
160
161 // In the common case (i.e. when there is no evacuation
162 // failure) we make sure that the following is done when
163 // the region is freed so that it is "ready-to-go" when it's
164 // re-allocated. However, when evacuation failure happens, a
165 // region will remain in the heap and might ultimately be added
166 // to a CSet in the future. So we have to be careful here and
167 // make sure the region's RSet is ready for parallel iteration
168 // whenever this might be required in the future.
169 hr->rem_set()->reset_for_par_iteration();
170 hr->reset_bot();
171 _update_rset_cl->set_region(hr);
172 hr->object_iterate(&rspc);
173
174 // A number of manipulations to make the TAMS for this region
175 // be the current top, and the marked bytes be the ones observed
176 // in the iteration.
177 if (_cm->at_least_one_mark_complete()) {
178 // The comments below are the postconditions achieved by the
179 // calls. Note especially the last such condition, which says that
180 // the count of marked bytes has been properly restored.
181 hr->note_start_of_marking(false);
182 // _next_top_at_mark_start == top, _next_marked_bytes == 0
183 hr->add_to_marked_bytes(rspc.prev_marked_bytes());
184 // _next_marked_bytes == prev_marked_bytes.
185 hr->note_end_of_marking();
186 // _prev_top_at_mark_start == top(),
187 // _prev_marked_bytes == prev_marked_bytes
188 }
189 // If there is no mark in progress, we modified the _next variables
190 // above needlessly, but harmlessly.
191 if (_g1h->mark_in_progress()) {
192 hr->note_start_of_marking(false);
193 // _next_top_at_mark_start == top, _next_marked_bytes == 0
194 // _next_marked_bytes == next_marked_bytes.
195 }
196 }
197 }
198 return false;
199 }
200 };
201
202 class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
203 protected:
204 G1CollectedHeap* _g1h;
205
206 public:
207 G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) :
208 AbstractGangTask("G1 Remove Self-forwarding Pointers"),
209 _g1h(g1h) { }
210
211 void work(uint worker_id) {
212 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set());
213 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
214 UpdateRSetDeferred deferred_update(_g1h, &dcq);
215
216 OopsInHeapRegionClosure *update_rset_cl = &deferred_update;
217 if (!G1DeferredRSUpdate) {
218 update_rset_cl = &immediate_update;
219 }
220
221 RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl);
222
223 HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
224 _g1h->collection_set_iterate_from(hr, &rsfp_cl);
225 }
226 };
227
228 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP