Mercurial > hg > graal-jvmci-8
annotate src/share/vm/memory/cardTableModRefBS.cpp @ 1091:6aa7255741f3
6906727: UseCompressedOops: some card-marking fixes related to object arrays
Summary: Introduced a new write_ref_array(HeapWords* start, size_t count) method that does the requisite MemRegion range calculation so (some of the) clients of the erstwhile write_ref_array(MemRegion mr) do not need to worry. This removed all external uses of array_size(), which was also simplified and made private. Asserts were added to catch other possible issues. Further, less essential, fixes stemming from this investigation are deferred to CR 6904516 (to follow shortly in hs17).
Reviewed-by: kvn, coleenp, jmasa
author | ysr |
---|---|
date | Thu, 03 Dec 2009 15:01:57 -0800 |
parents | 8624da129f0b |
children | c18cbe5936b8 |
rev | line source |
---|---|
0 | 1 /* |
579 | 2 * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and | |
26 // enumerate ref fields that have been modified (since the last | |
27 // enumeration.) | |
28 | |
29 # include "incls/_precompiled.incl" | |
30 # include "incls/_cardTableModRefBS.cpp.incl" | |
31 | |
32 size_t CardTableModRefBS::cards_required(size_t covered_words) | |
33 { | |
34 // Add one for a guard card, used to detect errors. | |
35 const size_t words = align_size_up(covered_words, card_size_in_words); | |
36 return words / card_size_in_words + 1; | |
37 } | |
38 | |
39 size_t CardTableModRefBS::compute_byte_map_size() | |
40 { | |
41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, | |
42 "unitialized, check declaration order"); | |
43 assert(_page_size != 0, "unitialized, check declaration order"); | |
44 const size_t granularity = os::vm_allocation_granularity(); | |
45 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity)); | |
46 } | |
47 | |
48 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap, | |
49 int max_covered_regions): | |
50 ModRefBarrierSet(max_covered_regions), | |
51 _whole_heap(whole_heap), | |
52 _guard_index(cards_required(whole_heap.word_size()) - 1), | |
53 _last_valid_index(_guard_index - 1), | |
21 | 54 _page_size(os::vm_page_size()), |
0 | 55 _byte_map_size(compute_byte_map_size()) |
56 { | |
57 _kind = BarrierSet::CardTableModRef; | |
58 | |
59 HeapWord* low_bound = _whole_heap.start(); | |
60 HeapWord* high_bound = _whole_heap.end(); | |
61 assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary"); | |
62 assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary"); | |
63 | |
64 assert(card_size <= 512, "card_size must be less than 512"); // why? | |
65 | |
66 _covered = new MemRegion[max_covered_regions]; | |
67 _committed = new MemRegion[max_covered_regions]; | |
68 if (_covered == NULL || _committed == NULL) | |
69 vm_exit_during_initialization("couldn't alloc card table covered region set."); | |
70 int i; | |
71 for (i = 0; i < max_covered_regions; i++) { | |
72 _covered[i].set_word_size(0); | |
73 _committed[i].set_word_size(0); | |
74 } | |
75 _cur_covered_regions = 0; | |
76 | |
77 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : | |
78 MAX2(_page_size, (size_t) os::vm_allocation_granularity()); | |
79 ReservedSpace heap_rs(_byte_map_size, rs_align, false); | |
80 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1, | |
81 _page_size, heap_rs.base(), heap_rs.size()); | |
82 if (!heap_rs.is_reserved()) { | |
83 vm_exit_during_initialization("Could not reserve enough space for the " | |
84 "card marking array"); | |
85 } | |
86 | |
87 // The assember store_check code will do an unsigned shift of the oop, | |
88 // then add it to byte_map_base, i.e. | |
89 // | |
90 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) | |
91 _byte_map = (jbyte*) heap_rs.base(); | |
92 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); | |
93 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); | |
94 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); | |
95 | |
96 jbyte* guard_card = &_byte_map[_guard_index]; | |
97 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); | |
98 _guard_region = MemRegion((HeapWord*)guard_page, _page_size); | |
99 if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) { | |
100 // Do better than this for Merlin | |
101 vm_exit_out_of_memory(_page_size, "card table last card"); | |
102 } | |
103 *guard_card = last_card; | |
104 | |
105 _lowest_non_clean = | |
106 NEW_C_HEAP_ARRAY(CardArr, max_covered_regions); | |
107 _lowest_non_clean_chunk_size = | |
108 NEW_C_HEAP_ARRAY(size_t, max_covered_regions); | |
109 _lowest_non_clean_base_chunk_index = | |
110 NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions); | |
111 _last_LNC_resizing_collection = | |
112 NEW_C_HEAP_ARRAY(int, max_covered_regions); | |
113 if (_lowest_non_clean == NULL | |
114 || _lowest_non_clean_chunk_size == NULL | |
115 || _lowest_non_clean_base_chunk_index == NULL | |
116 || _last_LNC_resizing_collection == NULL) | |
117 vm_exit_during_initialization("couldn't allocate an LNC array."); | |
118 for (i = 0; i < max_covered_regions; i++) { | |
119 _lowest_non_clean[i] = NULL; | |
120 _lowest_non_clean_chunk_size[i] = 0; | |
121 _last_LNC_resizing_collection[i] = -1; | |
122 } | |
123 | |
124 if (TraceCardTableModRefBS) { | |
125 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: "); | |
126 gclog_or_tty->print_cr(" " | |
127 " &_byte_map[0]: " INTPTR_FORMAT | |
128 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, | |
129 &_byte_map[0], | |
130 &_byte_map[_last_valid_index]); | |
131 gclog_or_tty->print_cr(" " | |
132 " byte_map_base: " INTPTR_FORMAT, | |
133 byte_map_base); | |
134 } | |
135 } | |
136 | |
137 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) { | |
138 int i; | |
139 for (i = 0; i < _cur_covered_regions; i++) { | |
140 if (_covered[i].start() == base) return i; | |
141 if (_covered[i].start() > base) break; | |
142 } | |
143 // If we didn't find it, create a new one. | |
144 assert(_cur_covered_regions < _max_covered_regions, | |
145 "too many covered regions"); | |
146 // Move the ones above up, to maintain sorted order. | |
147 for (int j = _cur_covered_regions; j > i; j--) { | |
148 _covered[j] = _covered[j-1]; | |
149 _committed[j] = _committed[j-1]; | |
150 } | |
151 int res = i; | |
152 _cur_covered_regions++; | |
153 _covered[res].set_start(base); | |
154 _covered[res].set_word_size(0); | |
155 jbyte* ct_start = byte_for(base); | |
156 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); | |
157 _committed[res].set_start((HeapWord*)ct_start_aligned); | |
158 _committed[res].set_word_size(0); | |
159 return res; | |
160 } | |
161 | |
162 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) { | |
163 for (int i = 0; i < _cur_covered_regions; i++) { | |
164 if (_covered[i].contains(addr)) { | |
165 return i; | |
166 } | |
167 } | |
168 assert(0, "address outside of heap?"); | |
169 return -1; | |
170 } | |
171 | |
172 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const { | |
173 HeapWord* max_end = NULL; | |
174 for (int j = 0; j < ind; j++) { | |
175 HeapWord* this_end = _committed[j].end(); | |
176 if (this_end > max_end) max_end = this_end; | |
177 } | |
178 return max_end; | |
179 } | |
180 | |
181 MemRegion CardTableModRefBS::committed_unique_to_self(int self, | |
182 MemRegion mr) const { | |
183 MemRegion result = mr; | |
184 for (int r = 0; r < _cur_covered_regions; r += 1) { | |
185 if (r != self) { | |
186 result = result.minus(_committed[r]); | |
187 } | |
188 } | |
189 // Never include the guard page. | |
190 result = result.minus(_guard_region); | |
191 return result; | |
192 } | |
193 | |
194 void CardTableModRefBS::resize_covered_region(MemRegion new_region) { | |
195 // We don't change the start of a region, only the end. | |
196 assert(_whole_heap.contains(new_region), | |
197 "attempt to cover area not in reserved area"); | |
198 debug_only(verify_guard();) | |
208
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
199 // collided is true if the expansion would push into another committed region |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
200 debug_only(bool collided = false;) |
6
73e96e5c30df
6624765: Guarantee failure "Unexpected dirty card found"
jmasa
parents:
0
diff
changeset
|
201 int const ind = find_covering_region_by_base(new_region.start()); |
73e96e5c30df
6624765: Guarantee failure "Unexpected dirty card found"
jmasa
parents:
0
diff
changeset
|
202 MemRegion const old_region = _covered[ind]; |
0 | 203 assert(old_region.start() == new_region.start(), "just checking"); |
204 if (new_region.word_size() != old_region.word_size()) { | |
205 // Commit new or uncommit old pages, if necessary. | |
206 MemRegion cur_committed = _committed[ind]; | |
207 // Extend the end of this _commited region | |
208 // to cover the end of any lower _committed regions. | |
209 // This forms overlapping regions, but never interior regions. | |
6
73e96e5c30df
6624765: Guarantee failure "Unexpected dirty card found"
jmasa
parents:
0
diff
changeset
|
210 HeapWord* const max_prev_end = largest_prev_committed_end(ind); |
0 | 211 if (max_prev_end > cur_committed.end()) { |
212 cur_committed.set_end(max_prev_end); | |
213 } | |
214 // Align the end up to a page size (starts are already aligned). | |
6
73e96e5c30df
6624765: Guarantee failure "Unexpected dirty card found"
jmasa
parents:
0
diff
changeset
|
215 jbyte* const new_end = byte_after(new_region.last()); |
208
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
216 HeapWord* new_end_aligned = |
6
73e96e5c30df
6624765: Guarantee failure "Unexpected dirty card found"
jmasa
parents:
0
diff
changeset
|
217 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); |
0 | 218 assert(new_end_aligned >= (HeapWord*) new_end, |
219 "align up, but less"); | |
581
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
220 // Check the other regions (excludes "ind") to ensure that |
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
221 // the new_end_aligned does not intrude onto the committed |
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
222 // space of another region. |
208
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
223 int ri = 0; |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
224 for (ri = 0; ri < _cur_covered_regions; ri++) { |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
225 if (ri != ind) { |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
226 if (_committed[ri].contains(new_end_aligned)) { |
581
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
227 // The prior check included in the assert |
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
228 // (new_end_aligned >= _committed[ri].start()) |
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
229 // is redundant with the "contains" test. |
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
230 // Any region containing the new end |
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
231 // should start at or beyond the region found (ind) |
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
232 // for the new end (committed regions are not expected to |
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
233 // be proper subsets of other committed regions). |
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
234 assert(_committed[ri].start() >= _committed[ind].start(), |
208
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
235 "New end of committed region is inconsistent"); |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
236 new_end_aligned = _committed[ri].start(); |
581
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
237 // new_end_aligned can be equal to the start of its |
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
238 // committed region (i.e., of "ind") if a second |
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
239 // region following "ind" also start at the same location |
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
240 // as "ind". |
9e5a6ed08fc9
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
jmasa
parents:
489
diff
changeset
|
241 assert(new_end_aligned >= _committed[ind].start(), |
208
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
242 "New end of committed region is before start"); |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
243 debug_only(collided = true;) |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
244 // Should only collide with 1 region |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
245 break; |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
246 } |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
247 } |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
248 } |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
249 #ifdef ASSERT |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
250 for (++ri; ri < _cur_covered_regions; ri++) { |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
251 assert(!_committed[ri].contains(new_end_aligned), |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
252 "New end of committed region is in a second committed region"); |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
253 } |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
254 #endif |
0 | 255 // The guard page is always committed and should not be committed over. |
887
ff004bcd2596
6843292: "Expect to be beyond new region unless impacting another region" assertion too strong
jmasa
parents:
628
diff
changeset
|
256 // "guarded" is used for assertion checking below and recalls the fact |
ff004bcd2596
6843292: "Expect to be beyond new region unless impacting another region" assertion too strong
jmasa
parents:
628
diff
changeset
|
257 // that the would-be end of the new committed region would have |
ff004bcd2596
6843292: "Expect to be beyond new region unless impacting another region" assertion too strong
jmasa
parents:
628
diff
changeset
|
258 // penetrated the guard page. |
ff004bcd2596
6843292: "Expect to be beyond new region unless impacting another region" assertion too strong
jmasa
parents:
628
diff
changeset
|
259 HeapWord* new_end_for_commit = new_end_aligned; |
ff004bcd2596
6843292: "Expect to be beyond new region unless impacting another region" assertion too strong
jmasa
parents:
628
diff
changeset
|
260 |
ff004bcd2596
6843292: "Expect to be beyond new region unless impacting another region" assertion too strong
jmasa
parents:
628
diff
changeset
|
261 DEBUG_ONLY(bool guarded = false;) |
ff004bcd2596
6843292: "Expect to be beyond new region unless impacting another region" assertion too strong
jmasa
parents:
628
diff
changeset
|
262 if (new_end_for_commit > _guard_region.start()) { |
ff004bcd2596
6843292: "Expect to be beyond new region unless impacting another region" assertion too strong
jmasa
parents:
628
diff
changeset
|
263 new_end_for_commit = _guard_region.start(); |
ff004bcd2596
6843292: "Expect to be beyond new region unless impacting another region" assertion too strong
jmasa
parents:
628
diff
changeset
|
264 DEBUG_ONLY(guarded = true;) |
ff004bcd2596
6843292: "Expect to be beyond new region unless impacting another region" assertion too strong
jmasa
parents:
628
diff
changeset
|
265 } |
208
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
266 |
0 | 267 if (new_end_for_commit > cur_committed.end()) { |
268 // Must commit new pages. | |
6
73e96e5c30df
6624765: Guarantee failure "Unexpected dirty card found"
jmasa
parents:
0
diff
changeset
|
269 MemRegion const new_committed = |
0 | 270 MemRegion(cur_committed.end(), new_end_for_commit); |
271 | |
272 assert(!new_committed.is_empty(), "Region should not be empty here"); | |
273 if (!os::commit_memory((char*)new_committed.start(), | |
274 new_committed.byte_size(), _page_size)) { | |
275 // Do better than this for Merlin | |
276 vm_exit_out_of_memory(new_committed.byte_size(), | |
277 "card table expansion"); | |
278 } | |
279 // Use new_end_aligned (as opposed to new_end_for_commit) because | |
280 // the cur_committed region may include the guard region. | |
281 } else if (new_end_aligned < cur_committed.end()) { | |
282 // Must uncommit pages. | |
6
73e96e5c30df
6624765: Guarantee failure "Unexpected dirty card found"
jmasa
parents:
0
diff
changeset
|
283 MemRegion const uncommit_region = |
0 | 284 committed_unique_to_self(ind, MemRegion(new_end_aligned, |
285 cur_committed.end())); | |
286 if (!uncommit_region.is_empty()) { | |
287 if (!os::uncommit_memory((char*)uncommit_region.start(), | |
288 uncommit_region.byte_size())) { | |
208
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
289 assert(false, "Card table contraction failed"); |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
290 // The call failed so don't change the end of the |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
291 // committed region. This is better than taking the |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
292 // VM down. |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
293 new_end_aligned = _committed[ind].end(); |
0 | 294 } |
295 } | |
296 } | |
297 // In any case, we can reset the end of the current committed entry. | |
298 _committed[ind].set_end(new_end_aligned); | |
299 | |
300 // The default of 0 is not necessarily clean cards. | |
301 jbyte* entry; | |
302 if (old_region.last() < _whole_heap.start()) { | |
303 entry = byte_for(_whole_heap.start()); | |
304 } else { | |
305 entry = byte_after(old_region.last()); | |
306 } | |
489
2494ab195856
6653214: MemoryPoolMXBean.setUsageThreshold() does not support large heap sizes.
swamyv
parents:
356
diff
changeset
|
307 assert(index_for(new_region.last()) < _guard_index, |
0 | 308 "The guard card will be overwritten"); |
208
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
309 // This line commented out cleans the newly expanded region and |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
310 // not the aligned up expanded region. |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
311 // jbyte* const end = byte_after(new_region.last()); |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
312 jbyte* const end = (jbyte*) new_end_for_commit; |
887
ff004bcd2596
6843292: "Expect to be beyond new region unless impacting another region" assertion too strong
jmasa
parents:
628
diff
changeset
|
313 assert((end >= byte_after(new_region.last())) || collided || guarded, |
208
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
314 "Expect to be beyond new region unless impacting another region"); |
0 | 315 // do nothing if we resized downward. |
208
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
316 #ifdef ASSERT |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
317 for (int ri = 0; ri < _cur_covered_regions; ri++) { |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
318 if (ri != ind) { |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
319 // The end of the new committed region should not |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
320 // be in any existing region unless it matches |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
321 // the start of the next region. |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
322 assert(!_committed[ri].contains(end) || |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
323 (_committed[ri].start() == (HeapWord*) end), |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
324 "Overlapping committed regions"); |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
325 } |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
326 } |
35ca13d63fe8
6688799: Second fix for Guarantee failure "Unexpected dirty card found"
jmasa
parents:
113
diff
changeset
|
327 #endif |
0 | 328 if (entry < end) { |
329 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); | |
330 } | |
331 } | |
332 // In any case, the covered size changes. | |
333 _covered[ind].set_word_size(new_region.word_size()); | |
334 if (TraceCardTableModRefBS) { | |
335 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); | |
336 gclog_or_tty->print_cr(" " | |
337 " _covered[%d].start(): " INTPTR_FORMAT | |
338 " _covered[%d].last(): " INTPTR_FORMAT, | |
339 ind, _covered[ind].start(), | |
340 ind, _covered[ind].last()); | |
341 gclog_or_tty->print_cr(" " | |
342 " _committed[%d].start(): " INTPTR_FORMAT | |
343 " _committed[%d].last(): " INTPTR_FORMAT, | |
344 ind, _committed[ind].start(), | |
345 ind, _committed[ind].last()); | |
346 gclog_or_tty->print_cr(" " | |
347 " byte_for(start): " INTPTR_FORMAT | |
348 " byte_for(last): " INTPTR_FORMAT, | |
349 byte_for(_covered[ind].start()), | |
350 byte_for(_covered[ind].last())); | |
351 gclog_or_tty->print_cr(" " | |
352 " addr_for(start): " INTPTR_FORMAT | |
353 " addr_for(last): " INTPTR_FORMAT, | |
354 addr_for((jbyte*) _committed[ind].start()), | |
355 addr_for((jbyte*) _committed[ind].last())); | |
356 } | |
357 debug_only(verify_guard();) | |
358 } | |
359 | |
360 // Note that these versions are precise! The scanning code has to handle the | |
361 // fact that the write barrier may be either precise or imprecise. | |
362 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
46
diff
changeset
|
363 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) { |
0 | 364 inline_write_ref_field(field, newVal); |
365 } | |
366 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
367 /* |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
368 Claimed and deferred bits are used together in G1 during the evacuation |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
369 pause. These bits can have the following state transitions: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
370 1. The claimed bit can be put over any other card state. Except that |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
371 the "dirty -> dirty and claimed" transition is checked for in |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
372 G1 code and is not used. |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
373 2. Deferred bit can be set only if the previous state of the card |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
374 was either clean or claimed. mark_card_deferred() is wait-free. |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
375 We do not care if the operation is be successful because if |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
376 it does not it will only result in duplicate entry in the update |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
377 buffer because of the "cache-miss". So it's not worth spinning. |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
378 */ |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
379 |
0 | 380 |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
381 bool CardTableModRefBS::claim_card(size_t card_index) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
382 jbyte val = _byte_map[card_index]; |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
383 assert(val != dirty_card_val(), "Shouldn't claim a dirty card"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
384 while (val == clean_card_val() || |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
385 (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
386 jbyte new_val = val; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
387 if (val == clean_card_val()) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
388 new_val = (jbyte)claimed_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
389 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
390 new_val = val | (jbyte)claimed_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
391 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
392 jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
393 if (res == val) { |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
394 return true; |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
395 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
396 val = res; |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
397 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
398 return false; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
399 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
400 |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
401 bool CardTableModRefBS::mark_card_deferred(size_t card_index) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
402 jbyte val = _byte_map[card_index]; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
403 // It's already processed |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
404 if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
405 return false; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
406 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
407 // Cached bit can be installed either on a clean card or on a claimed card. |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
408 jbyte new_val = val; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
409 if (val == clean_card_val()) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
410 new_val = (jbyte)deferred_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
411 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
412 if (val & claimed_card_val()) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
413 new_val = val | (jbyte)deferred_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
414 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
415 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
416 if (new_val != val) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
417 Atomic::cmpxchg(new_val, &_byte_map[card_index], val); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
418 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
419 return true; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
420 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
421 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
581
diff
changeset
|
422 |
0 | 423 void CardTableModRefBS::non_clean_card_iterate(Space* sp, |
424 MemRegion mr, | |
425 DirtyCardToOopClosure* dcto_cl, | |
426 MemRegionClosure* cl, | |
427 bool clear) { | |
428 if (!mr.is_empty()) { | |
429 int n_threads = SharedHeap::heap()->n_par_threads(); | |
430 if (n_threads > 0) { | |
431 #ifndef SERIALGC | |
432 par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads); | |
433 #else // SERIALGC | |
434 fatal("Parallel gc not supported here."); | |
435 #endif // SERIALGC | |
436 } else { | |
437 non_clean_card_iterate_work(mr, cl, clear); | |
438 } | |
439 } | |
440 } | |
441 | |
442 // NOTE: For this to work correctly, it is important that | |
443 // we look for non-clean cards below (so as to catch those | |
444 // marked precleaned), rather than look explicitly for dirty | |
445 // cards (and miss those marked precleaned). In that sense, | |
446 // the name precleaned is currently somewhat of a misnomer. | |
447 void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr, | |
448 MemRegionClosure* cl, | |
449 bool clear) { | |
450 // Figure out whether we have to worry about parallelism. | |
451 bool is_par = (SharedHeap::heap()->n_par_threads() > 1); | |
452 for (int i = 0; i < _cur_covered_regions; i++) { | |
453 MemRegion mri = mr.intersection(_covered[i]); | |
454 if (mri.word_size() > 0) { | |
455 jbyte* cur_entry = byte_for(mri.last()); | |
456 jbyte* limit = byte_for(mri.start()); | |
457 while (cur_entry >= limit) { | |
458 jbyte* next_entry = cur_entry - 1; | |
459 if (*cur_entry != clean_card) { | |
460 size_t non_clean_cards = 1; | |
461 // Should the next card be included in this range of dirty cards. | |
462 while (next_entry >= limit && *next_entry != clean_card) { | |
463 non_clean_cards++; | |
464 cur_entry = next_entry; | |
465 next_entry--; | |
466 } | |
467 // The memory region may not be on a card boundary. So that | |
468 // objects beyond the end of the region are not processed, make | |
469 // cur_cards precise with regard to the end of the memory region. | |
470 MemRegion cur_cards(addr_for(cur_entry), | |
471 non_clean_cards * card_size_in_words); | |
472 MemRegion dirty_region = cur_cards.intersection(mri); | |
473 if (clear) { | |
474 for (size_t i = 0; i < non_clean_cards; i++) { | |
475 // Clean the dirty cards (but leave the other non-clean | |
476 // alone.) If parallel, do the cleaning atomically. | |
477 jbyte cur_entry_val = cur_entry[i]; | |
478 if (card_is_dirty_wrt_gen_iter(cur_entry_val)) { | |
479 if (is_par) { | |
480 jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val); | |
481 assert(res != clean_card, | |
482 "Dirty card mysteriously cleaned"); | |
483 } else { | |
484 cur_entry[i] = clean_card; | |
485 } | |
486 } | |
487 } | |
488 } | |
489 cl->do_MemRegion(dirty_region); | |
490 } | |
491 cur_entry = next_entry; | |
492 } | |
493 } | |
494 } | |
495 } | |
496 | |
497 void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp, | |
498 OopClosure* cl, | |
499 bool clear, | |
500 bool before_save_marks) { | |
501 // Note that dcto_cl is resource-allocated, so there is no | |
502 // corresponding "delete". | |
503 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision()); | |
504 MemRegion used_mr; | |
505 if (before_save_marks) { | |
506 used_mr = sp->used_region_at_save_marks(); | |
507 } else { | |
508 used_mr = sp->used_region(); | |
509 } | |
510 non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear); | |
511 } | |
512 | |
513 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { | |
1091
6aa7255741f3
6906727: UseCompressedOops: some card-marking fixes related to object arrays
ysr
parents:
940
diff
changeset
|
514 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); |
6aa7255741f3
6906727: UseCompressedOops: some card-marking fixes related to object arrays
ysr
parents:
940
diff
changeset
|
515 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); |
0 | 516 jbyte* cur = byte_for(mr.start()); |
517 jbyte* last = byte_after(mr.last()); | |
518 while (cur < last) { | |
519 *cur = dirty_card; | |
520 cur++; | |
521 } | |
522 } | |
523 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
524 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) { |
1091
6aa7255741f3
6906727: UseCompressedOops: some card-marking fixes related to object arrays
ysr
parents:
940
diff
changeset
|
525 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); |
6aa7255741f3
6906727: UseCompressedOops: some card-marking fixes related to object arrays
ysr
parents:
940
diff
changeset
|
526 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); |
0 | 527 for (int i = 0; i < _cur_covered_regions; i++) { |
528 MemRegion mri = mr.intersection(_covered[i]); | |
529 if (!mri.is_empty()) dirty_MemRegion(mri); | |
530 } | |
531 } | |
532 | |
533 void CardTableModRefBS::clear_MemRegion(MemRegion mr) { | |
534 // Be conservative: only clean cards entirely contained within the | |
535 // region. | |
536 jbyte* cur; | |
537 if (mr.start() == _whole_heap.start()) { | |
538 cur = byte_for(mr.start()); | |
539 } else { | |
540 assert(mr.start() > _whole_heap.start(), "mr is not covered."); | |
541 cur = byte_after(mr.start() - 1); | |
542 } | |
543 jbyte* last = byte_after(mr.last()); | |
544 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); | |
545 } | |
546 | |
547 void CardTableModRefBS::clear(MemRegion mr) { | |
548 for (int i = 0; i < _cur_covered_regions; i++) { | |
549 MemRegion mri = mr.intersection(_covered[i]); | |
550 if (!mri.is_empty()) clear_MemRegion(mri); | |
551 } | |
552 } | |
553 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
554 void CardTableModRefBS::dirty(MemRegion mr) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
555 jbyte* first = byte_for(mr.start()); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
556 jbyte* last = byte_after(mr.last()); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
557 memset(first, dirty_card, last-first); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
558 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
559 |
0 | 560 // NOTES: |
561 // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate() | |
562 // iterates over dirty cards ranges in increasing address order. | |
563 void CardTableModRefBS::dirty_card_iterate(MemRegion mr, | |
564 MemRegionClosure* cl) { | |
565 for (int i = 0; i < _cur_covered_regions; i++) { | |
566 MemRegion mri = mr.intersection(_covered[i]); | |
567 if (!mri.is_empty()) { | |
568 jbyte *cur_entry, *next_entry, *limit; | |
569 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); | |
570 cur_entry <= limit; | |
571 cur_entry = next_entry) { | |
572 next_entry = cur_entry + 1; | |
573 if (*cur_entry == dirty_card) { | |
574 size_t dirty_cards; | |
575 // Accumulate maximal dirty card range, starting at cur_entry | |
576 for (dirty_cards = 1; | |
577 next_entry <= limit && *next_entry == dirty_card; | |
578 dirty_cards++, next_entry++); | |
579 MemRegion cur_cards(addr_for(cur_entry), | |
580 dirty_cards*card_size_in_words); | |
581 cl->do_MemRegion(cur_cards); | |
582 } | |
583 } | |
584 } | |
585 } | |
586 } | |
587 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
588 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
589 bool reset, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
590 int reset_val) { |
0 | 591 for (int i = 0; i < _cur_covered_regions; i++) { |
592 MemRegion mri = mr.intersection(_covered[i]); | |
593 if (!mri.is_empty()) { | |
594 jbyte* cur_entry, *next_entry, *limit; | |
595 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); | |
596 cur_entry <= limit; | |
597 cur_entry = next_entry) { | |
598 next_entry = cur_entry + 1; | |
599 if (*cur_entry == dirty_card) { | |
600 size_t dirty_cards; | |
601 // Accumulate maximal dirty card range, starting at cur_entry | |
602 for (dirty_cards = 1; | |
603 next_entry <= limit && *next_entry == dirty_card; | |
604 dirty_cards++, next_entry++); | |
605 MemRegion cur_cards(addr_for(cur_entry), | |
606 dirty_cards*card_size_in_words); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
607 if (reset) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
608 for (size_t i = 0; i < dirty_cards; i++) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
609 cur_entry[i] = reset_val; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
610 } |
0 | 611 } |
612 return cur_cards; | |
613 } | |
614 } | |
615 } | |
616 } | |
617 return MemRegion(mr.end(), mr.end()); | |
618 } | |
619 | |
620 // Set all the dirty cards in the given region to "precleaned" state. | |
621 void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) { | |
622 for (int i = 0; i < _cur_covered_regions; i++) { | |
623 MemRegion mri = mr.intersection(_covered[i]); | |
624 if (!mri.is_empty()) { | |
625 jbyte *cur_entry, *limit; | |
626 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); | |
627 cur_entry <= limit; | |
628 cur_entry++) { | |
629 if (*cur_entry == dirty_card) { | |
630 *cur_entry = precleaned_card; | |
631 } | |
632 } | |
633 } | |
634 } | |
635 } | |
636 | |
637 uintx CardTableModRefBS::ct_max_alignment_constraint() { | |
638 return card_size * os::vm_page_size(); | |
639 } | |
640 | |
641 void CardTableModRefBS::verify_guard() { | |
642 // For product build verification | |
643 guarantee(_byte_map[_guard_index] == last_card, | |
644 "card table guard has been modified"); | |
645 } | |
646 | |
647 void CardTableModRefBS::verify() { | |
648 verify_guard(); | |
649 } | |
650 | |
651 #ifndef PRODUCT | |
652 class GuaranteeNotModClosure: public MemRegionClosure { | |
653 CardTableModRefBS* _ct; | |
654 public: | |
655 GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {} | |
656 void do_MemRegion(MemRegion mr) { | |
657 jbyte* entry = _ct->byte_for(mr.start()); | |
658 guarantee(*entry != CardTableModRefBS::clean_card, | |
659 "Dirty card in region that should be clean"); | |
660 } | |
661 }; | |
662 | |
663 void CardTableModRefBS::verify_clean_region(MemRegion mr) { | |
664 GuaranteeNotModClosure blk(this); | |
665 non_clean_card_iterate_work(mr, &blk, false); | |
666 } | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
667 |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
668 // To verify a MemRegion is entirely dirty this closure is passed to |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
669 // dirty_card_iterate. If the region is dirty do_MemRegion will be |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
670 // invoked only once with a MemRegion equal to the one being |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
671 // verified. |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
672 class GuaranteeDirtyClosure: public MemRegionClosure { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
673 CardTableModRefBS* _ct; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
674 MemRegion _mr; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
675 bool _result; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
676 public: |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
677 GuaranteeDirtyClosure(CardTableModRefBS* ct, MemRegion mr) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
678 : _ct(ct), _mr(mr), _result(false) {} |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
679 void do_MemRegion(MemRegion mr) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
680 _result = _mr.equals(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
681 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
682 bool result() const { return _result; } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
683 }; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
684 |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
685 void CardTableModRefBS::verify_dirty_region(MemRegion mr) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
686 GuaranteeDirtyClosure blk(this, mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
687 dirty_card_iterate(mr, &blk); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
688 guarantee(blk.result(), "Non-dirty cards in region that should be dirty"); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
887
diff
changeset
|
689 } |
0 | 690 #endif |
691 | |
692 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) { | |
693 return | |
694 CardTableModRefBS::card_will_be_scanned(cv) || | |
695 _rs->is_prev_nonclean_card_val(cv); | |
696 }; | |
697 | |
698 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) { | |
699 return | |
700 cv != clean_card && | |
701 (CardTableModRefBS::card_may_have_been_dirty(cv) || | |
702 CardTableRS::youngergen_may_have_been_dirty(cv)); | |
703 }; |