comparison src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children df6caf649ff7
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 # include "incls/_precompiled.incl"
26 # include "incls/_parCardTableModRefBS.cpp.incl"
27
28 void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
29 DirtyCardToOopClosure* dcto_cl,
30 MemRegionClosure* cl,
31 bool clear,
32 int n_threads) {
33 if (n_threads > 0) {
34 assert(n_threads == (int)ParallelGCThreads, "# worker threads != # requested!");
35
36 // Make sure the LNC array is valid for the space.
37 jbyte** lowest_non_clean;
38 uintptr_t lowest_non_clean_base_chunk_index;
39 size_t lowest_non_clean_chunk_size;
40 get_LNC_array_for_space(sp, lowest_non_clean,
41 lowest_non_clean_base_chunk_index,
42 lowest_non_clean_chunk_size);
43
44 int n_strides = n_threads * StridesPerThread;
45 SequentialSubTasksDone* pst = sp->par_seq_tasks();
46 pst->set_par_threads(n_threads);
47 pst->set_n_tasks(n_strides);
48
49 int stride = 0;
50 while (!pst->is_task_claimed(/* reference */ stride)) {
51 process_stride(sp, mr, stride, n_strides, dcto_cl, cl, clear,
52 lowest_non_clean,
53 lowest_non_clean_base_chunk_index,
54 lowest_non_clean_chunk_size);
55 }
56 if (pst->all_tasks_completed()) {
57 // Clear lowest_non_clean array for next time.
58 intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
59 uintptr_t last_chunk_index = addr_to_chunk_index(mr.last());
60 for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
61 intptr_t ind = ch - lowest_non_clean_base_chunk_index;
62 assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
63 "Bounds error");
64 lowest_non_clean[ind] = NULL;
65 }
66 }
67 }
68 }
69
70 void
71 CardTableModRefBS::
72 process_stride(Space* sp,
73 MemRegion used,
74 jint stride, int n_strides,
75 DirtyCardToOopClosure* dcto_cl,
76 MemRegionClosure* cl,
77 bool clear,
78 jbyte** lowest_non_clean,
79 uintptr_t lowest_non_clean_base_chunk_index,
80 size_t lowest_non_clean_chunk_size) {
81 // We don't have to go downwards here; it wouldn't help anyway,
82 // because of parallelism.
83
84 // Find the first card address of the first chunk in the stride that is
85 // at least "bottom" of the used region.
86 jbyte* start_card = byte_for(used.start());
87 jbyte* end_card = byte_after(used.last());
88 uintptr_t start_chunk = addr_to_chunk_index(used.start());
89 uintptr_t start_chunk_stride_num = start_chunk % n_strides;
90 jbyte* chunk_card_start;
91
92 if ((uintptr_t)stride >= start_chunk_stride_num) {
93 chunk_card_start = (jbyte*)(start_card +
94 (stride - start_chunk_stride_num) *
95 CardsPerStrideChunk);
96 } else {
97 // Go ahead to the next chunk group boundary, then to the requested stride.
98 chunk_card_start = (jbyte*)(start_card +
99 (n_strides - start_chunk_stride_num + stride) *
100 CardsPerStrideChunk);
101 }
102
103 while (chunk_card_start < end_card) {
104 // We don't have to go downwards here; it wouldn't help anyway,
105 // because of parallelism. (We take care with "min_done"; see below.)
106 // Invariant: chunk_mr should be fully contained within the "used" region.
107 jbyte* chunk_card_end = chunk_card_start + CardsPerStrideChunk;
108 MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
109 chunk_card_end >= end_card ?
110 used.end() : addr_for(chunk_card_end));
111 assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
112 assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
113
114 // Process the chunk.
115 process_chunk_boundaries(sp,
116 dcto_cl,
117 chunk_mr,
118 used,
119 lowest_non_clean,
120 lowest_non_clean_base_chunk_index,
121 lowest_non_clean_chunk_size);
122
123 non_clean_card_iterate_work(chunk_mr, cl, clear);
124
125 // Find the next chunk of the stride.
126 chunk_card_start += CardsPerStrideChunk * n_strides;
127 }
128 }
129
130 void
131 CardTableModRefBS::
132 process_chunk_boundaries(Space* sp,
133 DirtyCardToOopClosure* dcto_cl,
134 MemRegion chunk_mr,
135 MemRegion used,
136 jbyte** lowest_non_clean,
137 uintptr_t lowest_non_clean_base_chunk_index,
138 size_t lowest_non_clean_chunk_size)
139 {
140 // We must worry about the chunk boundaries.
141
142 // First, set our max_to_do:
143 HeapWord* max_to_do = NULL;
144 uintptr_t cur_chunk_index = addr_to_chunk_index(chunk_mr.start());
145 cur_chunk_index = cur_chunk_index - lowest_non_clean_base_chunk_index;
146
147 if (chunk_mr.end() < used.end()) {
148 // This is not the last chunk in the used region. What is the last
149 // object?
150 HeapWord* last_block = sp->block_start(chunk_mr.end());
151 assert(last_block <= chunk_mr.end(), "In case this property changes.");
152 if (last_block == chunk_mr.end()
153 || !sp->block_is_obj(last_block)) {
154 max_to_do = chunk_mr.end();
155
156 } else {
157 // It is an object and starts before the end of the current chunk.
158 // last_obj_card is the card corresponding to the start of the last object
159 // in the chunk. Note that the last object may not start in
160 // the chunk.
161 jbyte* last_obj_card = byte_for(last_block);
162 if (!card_may_have_been_dirty(*last_obj_card)) {
163 // The card containing the head is not dirty. Any marks in
164 // subsequent cards still in this chunk must have been made
165 // precisely; we can cap processing at the end.
166 max_to_do = chunk_mr.end();
167 } else {
168 // The last object must be considered dirty, and extends onto the
169 // following chunk. Look for a dirty card in that chunk that will
170 // bound our processing.
171 jbyte* limit_card = NULL;
172 size_t last_block_size = sp->block_size(last_block);
173 jbyte* last_card_of_last_obj =
174 byte_for(last_block + last_block_size - 1);
175 jbyte* first_card_of_next_chunk = byte_for(chunk_mr.end());
176 // This search potentially goes a long distance looking
177 // for the next card that will be scanned. For example,
178 // an object that is an array of primitives will not
179 // have any cards covering regions interior to the array
180 // that will need to be scanned. The scan can be terminated
181 // at the last card of the next chunk. That would leave
182 // limit_card as NULL and would result in "max_to_do"
183 // being set with the LNC value or with the end
184 // of the last block.
185 jbyte* last_card_of_next_chunk = first_card_of_next_chunk +
186 CardsPerStrideChunk;
187 assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start())
188 == CardsPerStrideChunk, "last card of next chunk may be wrong");
189 jbyte* last_card_to_check = (jbyte*) MIN2(last_card_of_last_obj,
190 last_card_of_next_chunk);
191 for (jbyte* cur = first_card_of_next_chunk;
192 cur <= last_card_to_check; cur++) {
193 if (card_will_be_scanned(*cur)) {
194 limit_card = cur; break;
195 }
196 }
197 assert(0 <= cur_chunk_index+1 &&
198 cur_chunk_index+1 < lowest_non_clean_chunk_size,
199 "Bounds error.");
200 // LNC for the next chunk
201 jbyte* lnc_card = lowest_non_clean[cur_chunk_index+1];
202 if (limit_card == NULL) {
203 limit_card = lnc_card;
204 }
205 if (limit_card != NULL) {
206 if (lnc_card != NULL) {
207 limit_card = (jbyte*)MIN2((intptr_t)limit_card,
208 (intptr_t)lnc_card);
209 }
210 max_to_do = addr_for(limit_card);
211 } else {
212 max_to_do = last_block + last_block_size;
213 }
214 }
215 }
216 assert(max_to_do != NULL, "OOPS!");
217 } else {
218 max_to_do = used.end();
219 }
220 // Now we can set the closure we're using so it doesn't to beyond
221 // max_to_do.
222 dcto_cl->set_min_done(max_to_do);
223 #ifndef PRODUCT
224 dcto_cl->set_last_bottom(max_to_do);
225 #endif
226
227 // Now we set *our" lowest_non_clean entry.
228 // Find the object that spans our boundary, if one exists.
229 // Nothing to do on the first chunk.
230 if (chunk_mr.start() > used.start()) {
231 // first_block is the block possibly spanning the chunk start
232 HeapWord* first_block = sp->block_start(chunk_mr.start());
233 // Does the block span the start of the chunk and is it
234 // an object?
235 if (first_block < chunk_mr.start() &&
236 sp->block_is_obj(first_block)) {
237 jbyte* first_dirty_card = NULL;
238 jbyte* last_card_of_first_obj =
239 byte_for(first_block + sp->block_size(first_block) - 1);
240 jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
241 jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last());
242 jbyte* last_card_to_check =
243 (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,
244 (intptr_t) last_card_of_first_obj);
245 for (jbyte* cur = first_card_of_cur_chunk;
246 cur <= last_card_to_check; cur++) {
247 if (card_will_be_scanned(*cur)) {
248 first_dirty_card = cur; break;
249 }
250 }
251 if (first_dirty_card != NULL) {
252 assert(0 <= cur_chunk_index &&
253 cur_chunk_index < lowest_non_clean_chunk_size,
254 "Bounds error.");
255 lowest_non_clean[cur_chunk_index] = first_dirty_card;
256 }
257 }
258 }
259 }
260
261 void
262 CardTableModRefBS::
263 get_LNC_array_for_space(Space* sp,
264 jbyte**& lowest_non_clean,
265 uintptr_t& lowest_non_clean_base_chunk_index,
266 size_t& lowest_non_clean_chunk_size) {
267
268 int i = find_covering_region_containing(sp->bottom());
269 MemRegion covered = _covered[i];
270 size_t n_chunks = chunks_to_cover(covered);
271
272 // Only the first thread to obtain the lock will resize the
273 // LNC array for the covered region. Any later expansion can't affect
274 // the used_at_save_marks region.
275 // (I observed a bug in which the first thread to execute this would
276 // resize, and then it would cause "expand_and_allocates" that would
277 // Increase the number of chunks in the covered region. Then a second
278 // thread would come and execute this, see that the size didn't match,
279 // and free and allocate again. So the first thread would be using a
280 // freed "_lowest_non_clean" array.)
281
282 // Do a dirty read here. If we pass the conditional then take the rare
283 // event lock and do the read again in case some other thread had already
284 // succeeded and done the resize.
285 int cur_collection = Universe::heap()->total_collections();
286 if (_last_LNC_resizing_collection[i] != cur_collection) {
287 MutexLocker x(ParGCRareEvent_lock);
288 if (_last_LNC_resizing_collection[i] != cur_collection) {
289 if (_lowest_non_clean[i] == NULL ||
290 n_chunks != _lowest_non_clean_chunk_size[i]) {
291
292 // Should we delete the old?
293 if (_lowest_non_clean[i] != NULL) {
294 assert(n_chunks != _lowest_non_clean_chunk_size[i],
295 "logical consequence");
296 FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
297 _lowest_non_clean[i] = NULL;
298 }
299 // Now allocate a new one if necessary.
300 if (_lowest_non_clean[i] == NULL) {
301 _lowest_non_clean[i] = NEW_C_HEAP_ARRAY(CardPtr, n_chunks);
302 _lowest_non_clean_chunk_size[i] = n_chunks;
303 _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
304 for (int j = 0; j < (int)n_chunks; j++)
305 _lowest_non_clean[i][j] = NULL;
306 }
307 }
308 _last_LNC_resizing_collection[i] = cur_collection;
309 }
310 }
311 // In any case, now do the initialization.
312 lowest_non_clean = _lowest_non_clean[i];
313 lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i];
314 lowest_non_clean_chunk_size = _lowest_non_clean_chunk_size[i];
315 }