0
|
1 /*
|
|
2 * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
|
|
26 // enumerate ref fields that have been modified (since the last
|
|
27 // enumeration.)
|
|
28
|
|
29 # include "incls/_precompiled.incl"
|
|
30 # include "incls/_cardTableModRefBS.cpp.incl"
|
|
31
|
|
32 size_t CardTableModRefBS::cards_required(size_t covered_words)
|
|
33 {
|
|
34 // Add one for a guard card, used to detect errors.
|
|
35 const size_t words = align_size_up(covered_words, card_size_in_words);
|
|
36 return words / card_size_in_words + 1;
|
|
37 }
|
|
38
|
|
39 size_t CardTableModRefBS::compute_byte_map_size()
|
|
40 {
|
|
41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
|
|
42 "unitialized, check declaration order");
|
|
43 assert(_page_size != 0, "unitialized, check declaration order");
|
|
44 const size_t granularity = os::vm_allocation_granularity();
|
|
45 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
|
|
46 }
|
|
47
|
|
48 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
|
|
49 int max_covered_regions):
|
|
50 ModRefBarrierSet(max_covered_regions),
|
|
51 _whole_heap(whole_heap),
|
|
52 _guard_index(cards_required(whole_heap.word_size()) - 1),
|
|
53 _last_valid_index(_guard_index - 1),
|
|
54 _page_size(os::page_size_for_region(_guard_index + 1, _guard_index + 1, 1)),
|
|
55 _byte_map_size(compute_byte_map_size())
|
|
56 {
|
|
57 _kind = BarrierSet::CardTableModRef;
|
|
58
|
|
59 HeapWord* low_bound = _whole_heap.start();
|
|
60 HeapWord* high_bound = _whole_heap.end();
|
|
61 assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary");
|
|
62 assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary");
|
|
63
|
|
64 assert(card_size <= 512, "card_size must be less than 512"); // why?
|
|
65
|
|
66 _covered = new MemRegion[max_covered_regions];
|
|
67 _committed = new MemRegion[max_covered_regions];
|
|
68 if (_covered == NULL || _committed == NULL)
|
|
69 vm_exit_during_initialization("couldn't alloc card table covered region set.");
|
|
70 int i;
|
|
71 for (i = 0; i < max_covered_regions; i++) {
|
|
72 _covered[i].set_word_size(0);
|
|
73 _committed[i].set_word_size(0);
|
|
74 }
|
|
75 _cur_covered_regions = 0;
|
|
76
|
|
77 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
|
|
78 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
|
|
79 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
|
|
80 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
|
|
81 _page_size, heap_rs.base(), heap_rs.size());
|
|
82 if (!heap_rs.is_reserved()) {
|
|
83 vm_exit_during_initialization("Could not reserve enough space for the "
|
|
84 "card marking array");
|
|
85 }
|
|
86
|
|
87 // The assember store_check code will do an unsigned shift of the oop,
|
|
88 // then add it to byte_map_base, i.e.
|
|
89 //
|
|
90 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
|
|
91 _byte_map = (jbyte*) heap_rs.base();
|
|
92 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
|
|
93 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
|
|
94 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
|
|
95
|
|
96 jbyte* guard_card = &_byte_map[_guard_index];
|
|
97 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
|
|
98 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
|
|
99 if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
|
|
100 // Do better than this for Merlin
|
|
101 vm_exit_out_of_memory(_page_size, "card table last card");
|
|
102 }
|
|
103 *guard_card = last_card;
|
|
104
|
|
105 _lowest_non_clean =
|
|
106 NEW_C_HEAP_ARRAY(CardArr, max_covered_regions);
|
|
107 _lowest_non_clean_chunk_size =
|
|
108 NEW_C_HEAP_ARRAY(size_t, max_covered_regions);
|
|
109 _lowest_non_clean_base_chunk_index =
|
|
110 NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions);
|
|
111 _last_LNC_resizing_collection =
|
|
112 NEW_C_HEAP_ARRAY(int, max_covered_regions);
|
|
113 if (_lowest_non_clean == NULL
|
|
114 || _lowest_non_clean_chunk_size == NULL
|
|
115 || _lowest_non_clean_base_chunk_index == NULL
|
|
116 || _last_LNC_resizing_collection == NULL)
|
|
117 vm_exit_during_initialization("couldn't allocate an LNC array.");
|
|
118 for (i = 0; i < max_covered_regions; i++) {
|
|
119 _lowest_non_clean[i] = NULL;
|
|
120 _lowest_non_clean_chunk_size[i] = 0;
|
|
121 _last_LNC_resizing_collection[i] = -1;
|
|
122 }
|
|
123
|
|
124 if (TraceCardTableModRefBS) {
|
|
125 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
|
|
126 gclog_or_tty->print_cr(" "
|
|
127 " &_byte_map[0]: " INTPTR_FORMAT
|
|
128 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
|
|
129 &_byte_map[0],
|
|
130 &_byte_map[_last_valid_index]);
|
|
131 gclog_or_tty->print_cr(" "
|
|
132 " byte_map_base: " INTPTR_FORMAT,
|
|
133 byte_map_base);
|
|
134 }
|
|
135 }
|
|
136
|
|
137 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
|
|
138 int i;
|
|
139 for (i = 0; i < _cur_covered_regions; i++) {
|
|
140 if (_covered[i].start() == base) return i;
|
|
141 if (_covered[i].start() > base) break;
|
|
142 }
|
|
143 // If we didn't find it, create a new one.
|
|
144 assert(_cur_covered_regions < _max_covered_regions,
|
|
145 "too many covered regions");
|
|
146 // Move the ones above up, to maintain sorted order.
|
|
147 for (int j = _cur_covered_regions; j > i; j--) {
|
|
148 _covered[j] = _covered[j-1];
|
|
149 _committed[j] = _committed[j-1];
|
|
150 }
|
|
151 int res = i;
|
|
152 _cur_covered_regions++;
|
|
153 _covered[res].set_start(base);
|
|
154 _covered[res].set_word_size(0);
|
|
155 jbyte* ct_start = byte_for(base);
|
|
156 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
|
|
157 _committed[res].set_start((HeapWord*)ct_start_aligned);
|
|
158 _committed[res].set_word_size(0);
|
|
159 return res;
|
|
160 }
|
|
161
|
|
162 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
|
|
163 for (int i = 0; i < _cur_covered_regions; i++) {
|
|
164 if (_covered[i].contains(addr)) {
|
|
165 return i;
|
|
166 }
|
|
167 }
|
|
168 assert(0, "address outside of heap?");
|
|
169 return -1;
|
|
170 }
|
|
171
|
|
172 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
|
|
173 HeapWord* max_end = NULL;
|
|
174 for (int j = 0; j < ind; j++) {
|
|
175 HeapWord* this_end = _committed[j].end();
|
|
176 if (this_end > max_end) max_end = this_end;
|
|
177 }
|
|
178 return max_end;
|
|
179 }
|
|
180
|
|
181 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
|
|
182 MemRegion mr) const {
|
|
183 MemRegion result = mr;
|
|
184 for (int r = 0; r < _cur_covered_regions; r += 1) {
|
|
185 if (r != self) {
|
|
186 result = result.minus(_committed[r]);
|
|
187 }
|
|
188 }
|
|
189 // Never include the guard page.
|
|
190 result = result.minus(_guard_region);
|
|
191 return result;
|
|
192 }
|
|
193
|
|
194 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
|
|
195 // We don't change the start of a region, only the end.
|
|
196 assert(_whole_heap.contains(new_region),
|
|
197 "attempt to cover area not in reserved area");
|
|
198 debug_only(verify_guard();)
|
|
199 int ind = find_covering_region_by_base(new_region.start());
|
|
200 MemRegion old_region = _covered[ind];
|
|
201 assert(old_region.start() == new_region.start(), "just checking");
|
|
202 if (new_region.word_size() != old_region.word_size()) {
|
|
203 // Commit new or uncommit old pages, if necessary.
|
|
204 MemRegion cur_committed = _committed[ind];
|
|
205 // Extend the end of this _commited region
|
|
206 // to cover the end of any lower _committed regions.
|
|
207 // This forms overlapping regions, but never interior regions.
|
|
208 HeapWord* max_prev_end = largest_prev_committed_end(ind);
|
|
209 if (max_prev_end > cur_committed.end()) {
|
|
210 cur_committed.set_end(max_prev_end);
|
|
211 }
|
|
212 // Align the end up to a page size (starts are already aligned).
|
|
213 jbyte* new_end = byte_after(new_region.last());
|
|
214 HeapWord* new_end_aligned =
|
|
215 (HeapWord*)align_size_up((uintptr_t)new_end, _page_size);
|
|
216 assert(new_end_aligned >= (HeapWord*) new_end,
|
|
217 "align up, but less");
|
|
218 // The guard page is always committed and should not be committed over.
|
|
219 HeapWord* new_end_for_commit = MIN2(new_end_aligned, _guard_region.start());
|
|
220 if (new_end_for_commit > cur_committed.end()) {
|
|
221 // Must commit new pages.
|
|
222 MemRegion new_committed =
|
|
223 MemRegion(cur_committed.end(), new_end_for_commit);
|
|
224
|
|
225 assert(!new_committed.is_empty(), "Region should not be empty here");
|
|
226 if (!os::commit_memory((char*)new_committed.start(),
|
|
227 new_committed.byte_size(), _page_size)) {
|
|
228 // Do better than this for Merlin
|
|
229 vm_exit_out_of_memory(new_committed.byte_size(),
|
|
230 "card table expansion");
|
|
231 }
|
|
232 // Use new_end_aligned (as opposed to new_end_for_commit) because
|
|
233 // the cur_committed region may include the guard region.
|
|
234 } else if (new_end_aligned < cur_committed.end()) {
|
|
235 // Must uncommit pages.
|
|
236 MemRegion uncommit_region =
|
|
237 committed_unique_to_self(ind, MemRegion(new_end_aligned,
|
|
238 cur_committed.end()));
|
|
239 if (!uncommit_region.is_empty()) {
|
|
240 if (!os::uncommit_memory((char*)uncommit_region.start(),
|
|
241 uncommit_region.byte_size())) {
|
|
242 // Do better than this for Merlin
|
|
243 vm_exit_out_of_memory(uncommit_region.byte_size(),
|
|
244 "card table contraction");
|
|
245 }
|
|
246 }
|
|
247 }
|
|
248 // In any case, we can reset the end of the current committed entry.
|
|
249 _committed[ind].set_end(new_end_aligned);
|
|
250
|
|
251 // The default of 0 is not necessarily clean cards.
|
|
252 jbyte* entry;
|
|
253 if (old_region.last() < _whole_heap.start()) {
|
|
254 entry = byte_for(_whole_heap.start());
|
|
255 } else {
|
|
256 entry = byte_after(old_region.last());
|
|
257 }
|
|
258 assert(index_for(new_region.last()) < (int) _guard_index,
|
|
259 "The guard card will be overwritten");
|
|
260 jbyte* end = byte_after(new_region.last());
|
|
261 // do nothing if we resized downward.
|
|
262 if (entry < end) {
|
|
263 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
|
|
264 }
|
|
265 }
|
|
266 // In any case, the covered size changes.
|
|
267 _covered[ind].set_word_size(new_region.word_size());
|
|
268 if (TraceCardTableModRefBS) {
|
|
269 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
|
|
270 gclog_or_tty->print_cr(" "
|
|
271 " _covered[%d].start(): " INTPTR_FORMAT
|
|
272 " _covered[%d].last(): " INTPTR_FORMAT,
|
|
273 ind, _covered[ind].start(),
|
|
274 ind, _covered[ind].last());
|
|
275 gclog_or_tty->print_cr(" "
|
|
276 " _committed[%d].start(): " INTPTR_FORMAT
|
|
277 " _committed[%d].last(): " INTPTR_FORMAT,
|
|
278 ind, _committed[ind].start(),
|
|
279 ind, _committed[ind].last());
|
|
280 gclog_or_tty->print_cr(" "
|
|
281 " byte_for(start): " INTPTR_FORMAT
|
|
282 " byte_for(last): " INTPTR_FORMAT,
|
|
283 byte_for(_covered[ind].start()),
|
|
284 byte_for(_covered[ind].last()));
|
|
285 gclog_or_tty->print_cr(" "
|
|
286 " addr_for(start): " INTPTR_FORMAT
|
|
287 " addr_for(last): " INTPTR_FORMAT,
|
|
288 addr_for((jbyte*) _committed[ind].start()),
|
|
289 addr_for((jbyte*) _committed[ind].last()));
|
|
290 }
|
|
291 debug_only(verify_guard();)
|
|
292 }
|
|
293
|
|
294 // Note that these versions are precise! The scanning code has to handle the
|
|
295 // fact that the write barrier may be either precise or imprecise.
|
|
296
|
|
297 void CardTableModRefBS::write_ref_field_work(oop* field, oop newVal) {
|
|
298 inline_write_ref_field(field, newVal);
|
|
299 }
|
|
300
|
|
301
|
|
302 void CardTableModRefBS::non_clean_card_iterate(Space* sp,
|
|
303 MemRegion mr,
|
|
304 DirtyCardToOopClosure* dcto_cl,
|
|
305 MemRegionClosure* cl,
|
|
306 bool clear) {
|
|
307 if (!mr.is_empty()) {
|
|
308 int n_threads = SharedHeap::heap()->n_par_threads();
|
|
309 if (n_threads > 0) {
|
|
310 #ifndef SERIALGC
|
|
311 par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads);
|
|
312 #else // SERIALGC
|
|
313 fatal("Parallel gc not supported here.");
|
|
314 #endif // SERIALGC
|
|
315 } else {
|
|
316 non_clean_card_iterate_work(mr, cl, clear);
|
|
317 }
|
|
318 }
|
|
319 }
|
|
320
|
|
321 // NOTE: For this to work correctly, it is important that
|
|
322 // we look for non-clean cards below (so as to catch those
|
|
323 // marked precleaned), rather than look explicitly for dirty
|
|
324 // cards (and miss those marked precleaned). In that sense,
|
|
325 // the name precleaned is currently somewhat of a misnomer.
|
|
326 void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
|
|
327 MemRegionClosure* cl,
|
|
328 bool clear) {
|
|
329 // Figure out whether we have to worry about parallelism.
|
|
330 bool is_par = (SharedHeap::heap()->n_par_threads() > 1);
|
|
331 for (int i = 0; i < _cur_covered_regions; i++) {
|
|
332 MemRegion mri = mr.intersection(_covered[i]);
|
|
333 if (mri.word_size() > 0) {
|
|
334 jbyte* cur_entry = byte_for(mri.last());
|
|
335 jbyte* limit = byte_for(mri.start());
|
|
336 while (cur_entry >= limit) {
|
|
337 jbyte* next_entry = cur_entry - 1;
|
|
338 if (*cur_entry != clean_card) {
|
|
339 size_t non_clean_cards = 1;
|
|
340 // Should the next card be included in this range of dirty cards.
|
|
341 while (next_entry >= limit && *next_entry != clean_card) {
|
|
342 non_clean_cards++;
|
|
343 cur_entry = next_entry;
|
|
344 next_entry--;
|
|
345 }
|
|
346 // The memory region may not be on a card boundary. So that
|
|
347 // objects beyond the end of the region are not processed, make
|
|
348 // cur_cards precise with regard to the end of the memory region.
|
|
349 MemRegion cur_cards(addr_for(cur_entry),
|
|
350 non_clean_cards * card_size_in_words);
|
|
351 MemRegion dirty_region = cur_cards.intersection(mri);
|
|
352 if (clear) {
|
|
353 for (size_t i = 0; i < non_clean_cards; i++) {
|
|
354 // Clean the dirty cards (but leave the other non-clean
|
|
355 // alone.) If parallel, do the cleaning atomically.
|
|
356 jbyte cur_entry_val = cur_entry[i];
|
|
357 if (card_is_dirty_wrt_gen_iter(cur_entry_val)) {
|
|
358 if (is_par) {
|
|
359 jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val);
|
|
360 assert(res != clean_card,
|
|
361 "Dirty card mysteriously cleaned");
|
|
362 } else {
|
|
363 cur_entry[i] = clean_card;
|
|
364 }
|
|
365 }
|
|
366 }
|
|
367 }
|
|
368 cl->do_MemRegion(dirty_region);
|
|
369 }
|
|
370 cur_entry = next_entry;
|
|
371 }
|
|
372 }
|
|
373 }
|
|
374 }
|
|
375
|
|
376 void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
|
|
377 OopClosure* cl,
|
|
378 bool clear,
|
|
379 bool before_save_marks) {
|
|
380 // Note that dcto_cl is resource-allocated, so there is no
|
|
381 // corresponding "delete".
|
|
382 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
|
|
383 MemRegion used_mr;
|
|
384 if (before_save_marks) {
|
|
385 used_mr = sp->used_region_at_save_marks();
|
|
386 } else {
|
|
387 used_mr = sp->used_region();
|
|
388 }
|
|
389 non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear);
|
|
390 }
|
|
391
|
|
392 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
|
|
393 jbyte* cur = byte_for(mr.start());
|
|
394 jbyte* last = byte_after(mr.last());
|
|
395 while (cur < last) {
|
|
396 *cur = dirty_card;
|
|
397 cur++;
|
|
398 }
|
|
399 }
|
|
400
|
|
401 void CardTableModRefBS::invalidate(MemRegion mr) {
|
|
402 for (int i = 0; i < _cur_covered_regions; i++) {
|
|
403 MemRegion mri = mr.intersection(_covered[i]);
|
|
404 if (!mri.is_empty()) dirty_MemRegion(mri);
|
|
405 }
|
|
406 }
|
|
407
|
|
408 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
|
|
409 // Be conservative: only clean cards entirely contained within the
|
|
410 // region.
|
|
411 jbyte* cur;
|
|
412 if (mr.start() == _whole_heap.start()) {
|
|
413 cur = byte_for(mr.start());
|
|
414 } else {
|
|
415 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
|
|
416 cur = byte_after(mr.start() - 1);
|
|
417 }
|
|
418 jbyte* last = byte_after(mr.last());
|
|
419 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
|
|
420 }
|
|
421
|
|
422 void CardTableModRefBS::clear(MemRegion mr) {
|
|
423 for (int i = 0; i < _cur_covered_regions; i++) {
|
|
424 MemRegion mri = mr.intersection(_covered[i]);
|
|
425 if (!mri.is_empty()) clear_MemRegion(mri);
|
|
426 }
|
|
427 }
|
|
428
|
|
429 // NOTES:
|
|
430 // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate()
|
|
431 // iterates over dirty cards ranges in increasing address order.
|
|
432 // (2) Unlike, e.g., dirty_card_range_after_preclean() below,
|
|
433 // this method does not make the dirty cards prelceaned.
|
|
434 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
|
|
435 MemRegionClosure* cl) {
|
|
436 for (int i = 0; i < _cur_covered_regions; i++) {
|
|
437 MemRegion mri = mr.intersection(_covered[i]);
|
|
438 if (!mri.is_empty()) {
|
|
439 jbyte *cur_entry, *next_entry, *limit;
|
|
440 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
|
|
441 cur_entry <= limit;
|
|
442 cur_entry = next_entry) {
|
|
443 next_entry = cur_entry + 1;
|
|
444 if (*cur_entry == dirty_card) {
|
|
445 size_t dirty_cards;
|
|
446 // Accumulate maximal dirty card range, starting at cur_entry
|
|
447 for (dirty_cards = 1;
|
|
448 next_entry <= limit && *next_entry == dirty_card;
|
|
449 dirty_cards++, next_entry++);
|
|
450 MemRegion cur_cards(addr_for(cur_entry),
|
|
451 dirty_cards*card_size_in_words);
|
|
452 cl->do_MemRegion(cur_cards);
|
|
453 }
|
|
454 }
|
|
455 }
|
|
456 }
|
|
457 }
|
|
458
|
|
459 MemRegion CardTableModRefBS::dirty_card_range_after_preclean(MemRegion mr) {
|
|
460 for (int i = 0; i < _cur_covered_regions; i++) {
|
|
461 MemRegion mri = mr.intersection(_covered[i]);
|
|
462 if (!mri.is_empty()) {
|
|
463 jbyte* cur_entry, *next_entry, *limit;
|
|
464 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
|
|
465 cur_entry <= limit;
|
|
466 cur_entry = next_entry) {
|
|
467 next_entry = cur_entry + 1;
|
|
468 if (*cur_entry == dirty_card) {
|
|
469 size_t dirty_cards;
|
|
470 // Accumulate maximal dirty card range, starting at cur_entry
|
|
471 for (dirty_cards = 1;
|
|
472 next_entry <= limit && *next_entry == dirty_card;
|
|
473 dirty_cards++, next_entry++);
|
|
474 MemRegion cur_cards(addr_for(cur_entry),
|
|
475 dirty_cards*card_size_in_words);
|
|
476 for (size_t i = 0; i < dirty_cards; i++) {
|
|
477 cur_entry[i] = precleaned_card;
|
|
478 }
|
|
479 return cur_cards;
|
|
480 }
|
|
481 }
|
|
482 }
|
|
483 }
|
|
484 return MemRegion(mr.end(), mr.end());
|
|
485 }
|
|
486
|
|
487 // Set all the dirty cards in the given region to "precleaned" state.
|
|
488 void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) {
|
|
489 for (int i = 0; i < _cur_covered_regions; i++) {
|
|
490 MemRegion mri = mr.intersection(_covered[i]);
|
|
491 if (!mri.is_empty()) {
|
|
492 jbyte *cur_entry, *limit;
|
|
493 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
|
|
494 cur_entry <= limit;
|
|
495 cur_entry++) {
|
|
496 if (*cur_entry == dirty_card) {
|
|
497 *cur_entry = precleaned_card;
|
|
498 }
|
|
499 }
|
|
500 }
|
|
501 }
|
|
502 }
|
|
503
|
|
504 uintx CardTableModRefBS::ct_max_alignment_constraint() {
|
|
505 return card_size * os::vm_page_size();
|
|
506 }
|
|
507
|
|
508 void CardTableModRefBS::verify_guard() {
|
|
509 // For product build verification
|
|
510 guarantee(_byte_map[_guard_index] == last_card,
|
|
511 "card table guard has been modified");
|
|
512 }
|
|
513
|
|
514 void CardTableModRefBS::verify() {
|
|
515 verify_guard();
|
|
516 }
|
|
517
|
|
518 #ifndef PRODUCT
|
|
519 class GuaranteeNotModClosure: public MemRegionClosure {
|
|
520 CardTableModRefBS* _ct;
|
|
521 public:
|
|
522 GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {}
|
|
523 void do_MemRegion(MemRegion mr) {
|
|
524 jbyte* entry = _ct->byte_for(mr.start());
|
|
525 guarantee(*entry != CardTableModRefBS::clean_card,
|
|
526 "Dirty card in region that should be clean");
|
|
527 }
|
|
528 };
|
|
529
|
|
530 void CardTableModRefBS::verify_clean_region(MemRegion mr) {
|
|
531 GuaranteeNotModClosure blk(this);
|
|
532 non_clean_card_iterate_work(mr, &blk, false);
|
|
533 }
|
|
534 #endif
|
|
535
|
|
536 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
|
|
537 return
|
|
538 CardTableModRefBS::card_will_be_scanned(cv) ||
|
|
539 _rs->is_prev_nonclean_card_val(cv);
|
|
540 };
|
|
541
|
|
542 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
|
|
543 return
|
|
544 cv != clean_card &&
|
|
545 (CardTableModRefBS::card_may_have_been_dirty(cv) ||
|
|
546 CardTableRS::youngergen_may_have_been_dirty(cv));
|
|
547 };
|