Mercurial > hg > truffle
annotate src/share/vm/memory/cardTableModRefBS.hpp @ 3109:3664989976e2
Merge
author | Gilles Duboscq <gilles.duboscq@oracle.com> |
---|---|
date | Fri, 01 Jul 2011 12:57:10 +0200 |
parents | abdfc822206f |
children | c69b1043dfb1 |
rev | line source |
---|---|
0 | 1 /* |
1692 | 2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1261
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1261
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1261
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_MEMORY_CARDTABLEMODREFBS_HPP |
26 #define SHARE_VM_MEMORY_CARDTABLEMODREFBS_HPP | |
27 | |
28 #include "memory/modRefBarrierSet.hpp" | |
29 #include "oops/oop.hpp" | |
30 #include "oops/oop.inline2.hpp" | |
31 | |
0 | 32 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and |
33 // enumerate ref fields that have been modified (since the last | |
34 // enumeration.) | |
35 | |
36 // As it currently stands, this barrier is *imprecise*: when a ref field in | |
37 // an object "o" is modified, the card table entry for the card containing | |
38 // the head of "o" is dirtied, not necessarily the card containing the | |
39 // modified field itself. For object arrays, however, the barrier *is* | |
40 // precise; only the card containing the modified element is dirtied. | |
41 // Any MemRegionClosures used to scan dirty cards should take these | |
42 // considerations into account. | |
43 | |
44 class Generation; | |
45 class OopsInGenClosure; | |
46 class DirtyCardToOopClosure; | |
47 | |
48 class CardTableModRefBS: public ModRefBarrierSet { | |
49 // Some classes get to look at some private stuff. | |
50 friend class BytecodeInterpreter; | |
51 friend class VMStructs; | |
52 friend class CardTableRS; | |
53 friend class CheckForUnmarkedOops; // Needs access to raw card bytes. | |
1692 | 54 friend class SharkBuilder; |
0 | 55 #ifndef PRODUCT |
56 // For debugging. | |
57 friend class GuaranteeNotModClosure; | |
58 #endif | |
59 protected: | |
60 | |
61 enum CardValues { | |
62 clean_card = -1, | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
63 // The mask contains zeros in places for all other values. |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
64 clean_card_mask = clean_card - 31, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
65 |
0 | 66 dirty_card = 0, |
67 precleaned_card = 1, | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
68 claimed_card = 2, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
69 deferred_card = 4, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
70 last_card = 8, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
71 CT_MR_BS_last_reserved = 16 |
0 | 72 }; |
73 | |
74 // dirty and precleaned are equivalent wrt younger_refs_iter. | |
75 static bool card_is_dirty_wrt_gen_iter(jbyte cv) { | |
76 return cv == dirty_card || cv == precleaned_card; | |
77 } | |
78 | |
79 // Returns "true" iff the value "cv" will cause the card containing it | |
80 // to be scanned in the current traversal. May be overridden by | |
81 // subtypes. | |
82 virtual bool card_will_be_scanned(jbyte cv) { | |
83 return CardTableModRefBS::card_is_dirty_wrt_gen_iter(cv); | |
84 } | |
85 | |
86 // Returns "true" iff the value "cv" may have represented a dirty card at | |
87 // some point. | |
88 virtual bool card_may_have_been_dirty(jbyte cv) { | |
89 return card_is_dirty_wrt_gen_iter(cv); | |
90 } | |
91 | |
92 // The declaration order of these const fields is important; see the | |
93 // constructor before changing. | |
94 const MemRegion _whole_heap; // the region covered by the card table | |
95 const size_t _guard_index; // index of very last element in the card | |
96 // table; it is set to a guard value | |
97 // (last_card) and should never be modified | |
98 const size_t _last_valid_index; // index of the last valid element | |
99 const size_t _page_size; // page size used when mapping _byte_map | |
100 const size_t _byte_map_size; // in bytes | |
101 jbyte* _byte_map; // the card marking array | |
102 | |
103 int _cur_covered_regions; | |
104 // The covered regions should be in address order. | |
105 MemRegion* _covered; | |
106 // The committed regions correspond one-to-one to the covered regions. | |
107 // They represent the card-table memory that has been committed to service | |
108 // the corresponding covered region. It may be that committed region for | |
109 // one covered region corresponds to a larger region because of page-size | |
110 // roundings. Thus, a committed region for one covered region may | |
111 // actually extend onto the card-table space for the next covered region. | |
112 MemRegion* _committed; | |
113 | |
114 // The last card is a guard card, and we commit the page for it so | |
115 // we can use the card for verification purposes. We make sure we never | |
116 // uncommit the MemRegion for that page. | |
117 MemRegion _guard_region; | |
118 | |
119 protected: | |
120 // Initialization utilities; covered_words is the size of the covered region | |
121 // in, um, words. | |
122 inline size_t cards_required(size_t covered_words); | |
123 inline size_t compute_byte_map_size(); | |
124 | |
125 // Finds and return the index of the region, if any, to which the given | |
126 // region would be contiguous. If none exists, assign a new region and | |
127 // returns its index. Requires that no more than the maximum number of | |
128 // covered regions defined in the constructor are ever in use. | |
129 int find_covering_region_by_base(HeapWord* base); | |
130 | |
131 // Same as above, but finds the region containing the given address | |
132 // instead of starting at a given base address. | |
133 int find_covering_region_containing(HeapWord* addr); | |
134 | |
135 // Resize one of the regions covered by the remembered set. | |
136 void resize_covered_region(MemRegion new_region); | |
137 | |
138 // Returns the leftmost end of a committed region corresponding to a | |
139 // covered region before covered region "ind", or else "NULL" if "ind" is | |
140 // the first covered region. | |
141 HeapWord* largest_prev_committed_end(int ind) const; | |
142 | |
143 // Returns the part of the region mr that doesn't intersect with | |
144 // any committed region other than self. Used to prevent uncommitting | |
145 // regions that are also committed by other regions. Also protects | |
146 // against uncommitting the guard region. | |
147 MemRegion committed_unique_to_self(int self, MemRegion mr) const; | |
148 | |
149 // Mapping from address to card marking array entry | |
150 jbyte* byte_for(const void* p) const { | |
151 assert(_whole_heap.contains(p), | |
152 "out of bounds access to card marking array"); | |
153 jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift]; | |
154 assert(result >= _byte_map && result < _byte_map + _byte_map_size, | |
155 "out of bounds accessor for card marking array"); | |
156 return result; | |
157 } | |
158 | |
159 // The card table byte one after the card marking array | |
160 // entry for argument address. Typically used for higher bounds | |
161 // for loops iterating through the card table. | |
162 jbyte* byte_after(const void* p) const { | |
163 return byte_for(p) + 1; | |
164 } | |
165 | |
166 // Iterate over the portion of the card-table which covers the given | |
167 // region mr in the given space and apply cl to any dirty sub-regions | |
168 // of mr. cl and dcto_cl must either be the same closure or cl must | |
169 // wrap dcto_cl. Both are required - neither may be NULL. Also, dcto_cl | |
170 // may be modified. Note that this function will operate in a parallel | |
171 // mode if worker threads are available. | |
172 void non_clean_card_iterate(Space* sp, MemRegion mr, | |
173 DirtyCardToOopClosure* dcto_cl, | |
174 MemRegionClosure* cl, | |
175 bool clear); | |
176 | |
177 // Utility function used to implement the other versions below. | |
178 void non_clean_card_iterate_work(MemRegion mr, MemRegionClosure* cl, | |
179 bool clear); | |
180 | |
181 void par_non_clean_card_iterate_work(Space* sp, MemRegion mr, | |
182 DirtyCardToOopClosure* dcto_cl, | |
183 MemRegionClosure* cl, | |
184 bool clear, | |
185 int n_threads); | |
186 | |
187 // Dirty the bytes corresponding to "mr" (not all of which must be | |
188 // covered.) | |
189 void dirty_MemRegion(MemRegion mr); | |
190 | |
191 // Clear (to clean_card) the bytes entirely contained within "mr" (not | |
192 // all of which must be covered.) | |
193 void clear_MemRegion(MemRegion mr); | |
194 | |
195 // *** Support for parallel card scanning. | |
196 | |
197 enum SomeConstantsForParallelism { | |
198 StridesPerThread = 2, | |
199 CardsPerStrideChunk = 256 | |
200 }; | |
201 | |
202 // This is an array, one element per covered region of the card table. | |
203 // Each entry is itself an array, with one element per chunk in the | |
204 // covered region. Each entry of these arrays is the lowest non-clean | |
205 // card of the corresponding chunk containing part of an object from the | |
206 // previous chunk, or else NULL. | |
207 typedef jbyte* CardPtr; | |
208 typedef CardPtr* CardArr; | |
209 CardArr* _lowest_non_clean; | |
210 size_t* _lowest_non_clean_chunk_size; | |
211 uintptr_t* _lowest_non_clean_base_chunk_index; | |
212 int* _last_LNC_resizing_collection; | |
213 | |
214 // Initializes "lowest_non_clean" to point to the array for the region | |
215 // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk | |
216 // index of the corresponding to the first element of that array. | |
217 // Ensures that these arrays are of sufficient size, allocating if necessary. | |
218 // May be called by several threads concurrently. | |
219 void get_LNC_array_for_space(Space* sp, | |
220 jbyte**& lowest_non_clean, | |
221 uintptr_t& lowest_non_clean_base_chunk_index, | |
222 size_t& lowest_non_clean_chunk_size); | |
223 | |
224 // Returns the number of chunks necessary to cover "mr". | |
225 size_t chunks_to_cover(MemRegion mr) { | |
226 return (size_t)(addr_to_chunk_index(mr.last()) - | |
227 addr_to_chunk_index(mr.start()) + 1); | |
228 } | |
229 | |
230 // Returns the index of the chunk in a stride which | |
231 // covers the given address. | |
232 uintptr_t addr_to_chunk_index(const void* addr) { | |
233 uintptr_t card = (uintptr_t) byte_for(addr); | |
234 return card / CardsPerStrideChunk; | |
235 } | |
236 | |
237 // Apply cl, which must either itself apply dcto_cl or be dcto_cl, | |
238 // to the cards in the stride (of n_strides) within the given space. | |
239 void process_stride(Space* sp, | |
240 MemRegion used, | |
241 jint stride, int n_strides, | |
242 DirtyCardToOopClosure* dcto_cl, | |
243 MemRegionClosure* cl, | |
244 bool clear, | |
245 jbyte** lowest_non_clean, | |
246 uintptr_t lowest_non_clean_base_chunk_index, | |
247 size_t lowest_non_clean_chunk_size); | |
248 | |
249 // Makes sure that chunk boundaries are handled appropriately, by | |
250 // adjusting the min_done of dcto_cl, and by using a special card-table | |
251 // value to indicate how min_done should be set. | |
252 void process_chunk_boundaries(Space* sp, | |
253 DirtyCardToOopClosure* dcto_cl, | |
254 MemRegion chunk_mr, | |
255 MemRegion used, | |
256 jbyte** lowest_non_clean, | |
257 uintptr_t lowest_non_clean_base_chunk_index, | |
258 size_t lowest_non_clean_chunk_size); | |
259 | |
260 public: | |
261 // Constants | |
262 enum SomePublicConstants { | |
263 card_shift = 9, | |
264 card_size = 1 << card_shift, | |
265 card_size_in_words = card_size / sizeof(HeapWord) | |
266 }; | |
267 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
268 static int clean_card_val() { return clean_card; } |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
269 static int clean_card_mask_val() { return clean_card_mask; } |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
270 static int dirty_card_val() { return dirty_card; } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
271 static int claimed_card_val() { return claimed_card; } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
272 static int precleaned_card_val() { return precleaned_card; } |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
273 static int deferred_card_val() { return deferred_card; } |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
274 |
0 | 275 // For RTTI simulation. |
276 bool is_a(BarrierSet::Name bsn) { | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
277 return bsn == BarrierSet::CardTableModRef || ModRefBarrierSet::is_a(bsn); |
0 | 278 } |
279 | |
280 CardTableModRefBS(MemRegion whole_heap, int max_covered_regions); | |
281 | |
282 // *** Barrier set functions. | |
283 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
284 bool has_write_ref_pre_barrier() { return false; } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
285 |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
286 inline bool write_ref_needs_barrier(void* field, oop new_val) { |
0 | 287 // Note that this assumes the perm gen is the highest generation |
288 // in the address space | |
289 return new_val != NULL && !new_val->is_perm(); | |
290 } | |
291 | |
292 // Record a reference update. Note that these versions are precise! | |
293 // The scanning code has to handle the fact that the write barrier may be | |
294 // either precise or imprecise. We make non-virtual inline variants of | |
295 // these functions here for performance. | |
296 protected: | |
297 void write_ref_field_work(oop obj, size_t offset, oop newVal); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
628
diff
changeset
|
298 virtual void write_ref_field_work(void* field, oop newVal); |
0 | 299 public: |
300 | |
301 bool has_write_ref_array_opt() { return true; } | |
302 bool has_write_region_opt() { return true; } | |
303 | |
304 inline void inline_write_region(MemRegion mr) { | |
305 dirty_MemRegion(mr); | |
306 } | |
307 protected: | |
308 void write_region_work(MemRegion mr) { | |
309 inline_write_region(mr); | |
310 } | |
311 public: | |
312 | |
313 inline void inline_write_ref_array(MemRegion mr) { | |
314 dirty_MemRegion(mr); | |
315 } | |
316 protected: | |
317 void write_ref_array_work(MemRegion mr) { | |
318 inline_write_ref_array(mr); | |
319 } | |
320 public: | |
321 | |
322 bool is_aligned(HeapWord* addr) { | |
323 return is_card_aligned(addr); | |
324 } | |
325 | |
326 // *** Card-table-barrier-specific things. | |
327 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
628
diff
changeset
|
328 template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {} |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
329 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
628
diff
changeset
|
330 template <class T> inline void inline_write_ref_field(T* field, oop newVal) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
628
diff
changeset
|
331 jbyte* byte = byte_for((void*)field); |
0 | 332 *byte = dirty_card; |
333 } | |
334 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
335 // These are used by G1, when it uses the card table as a temporary data |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
336 // structure for card claiming. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
337 bool is_card_dirty(size_t card_index) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
338 return _byte_map[card_index] == dirty_card_val(); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
339 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
340 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
341 void mark_card_dirty(size_t card_index) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
342 _byte_map[card_index] = dirty_card_val(); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
343 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
344 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
345 bool is_card_claimed(size_t card_index) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
346 jbyte val = _byte_map[card_index]; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
347 return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val(); |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
348 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
349 |
1261 | 350 void set_card_claimed(size_t card_index) { |
351 jbyte val = _byte_map[card_index]; | |
352 if (val == clean_card_val()) { | |
353 val = (jbyte)claimed_card_val(); | |
354 } else { | |
355 val |= (jbyte)claimed_card_val(); | |
356 } | |
357 _byte_map[card_index] = val; | |
358 } | |
359 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
360 bool claim_card(size_t card_index); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
361 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
362 bool is_card_clean(size_t card_index) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
363 return _byte_map[card_index] == clean_card_val(); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
364 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
365 |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
366 bool is_card_deferred(size_t card_index) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
367 jbyte val = _byte_map[card_index]; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
368 return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
369 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
370 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
371 bool mark_card_deferred(size_t card_index); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
372 |
0 | 373 // Card marking array base (adjusted for heap low boundary) |
374 // This would be the 0th element of _byte_map, if the heap started at 0x0. | |
375 // But since the heap starts at some higher address, this points to somewhere | |
376 // before the beginning of the actual _byte_map. | |
377 jbyte* byte_map_base; | |
378 | |
379 // Return true if "p" is at the start of a card. | |
380 bool is_card_aligned(HeapWord* p) { | |
381 jbyte* pcard = byte_for(p); | |
382 return (addr_for(pcard) == p); | |
383 } | |
384 | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
1972
diff
changeset
|
385 HeapWord* align_to_card_boundary(HeapWord* p) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
1972
diff
changeset
|
386 jbyte* pcard = byte_for(p + card_size_in_words - 1); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
1972
diff
changeset
|
387 return addr_for(pcard); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
1972
diff
changeset
|
388 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
1972
diff
changeset
|
389 |
0 | 390 // The kinds of precision a CardTableModRefBS may offer. |
391 enum PrecisionStyle { | |
392 Precise, | |
393 ObjHeadPreciseArray | |
394 }; | |
395 | |
396 // Tells what style of precision this card table offers. | |
397 PrecisionStyle precision() { | |
398 return ObjHeadPreciseArray; // Only one supported for now. | |
399 } | |
400 | |
401 // ModRefBS functions. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
402 virtual void invalidate(MemRegion mr, bool whole_heap = false); |
0 | 403 void clear(MemRegion mr); |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
404 void dirty(MemRegion mr); |
0 | 405 void mod_oop_in_space_iterate(Space* sp, OopClosure* cl, |
406 bool clear = false, | |
407 bool before_save_marks = false); | |
408 | |
409 // *** Card-table-RemSet-specific things. | |
410 | |
411 // Invoke "cl.do_MemRegion" on a set of MemRegions that collectively | |
412 // includes all the modified cards (expressing each card as a | |
413 // MemRegion). Thus, several modified cards may be lumped into one | |
414 // region. The regions are non-overlapping, and are visited in | |
415 // *decreasing* address order. (This order aids with imprecise card | |
416 // marking, where a dirty card may cause scanning, and summarization | |
417 // marking, of objects that extend onto subsequent cards.) | |
418 // If "clear" is true, the card is (conceptually) marked unmodified before | |
419 // applying the closure. | |
420 void mod_card_iterate(MemRegionClosure* cl, bool clear = false) { | |
421 non_clean_card_iterate_work(_whole_heap, cl, clear); | |
422 } | |
423 | |
424 // Like the "mod_cards_iterate" above, except only invokes the closure | |
425 // for cards within the MemRegion "mr" (which is required to be | |
426 // card-aligned and sized.) | |
427 void mod_card_iterate(MemRegion mr, MemRegionClosure* cl, | |
428 bool clear = false) { | |
429 non_clean_card_iterate_work(mr, cl, clear); | |
430 } | |
431 | |
432 static uintx ct_max_alignment_constraint(); | |
433 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
434 // Apply closure "cl" to the dirty cards containing some part of |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
435 // MemRegion "mr". |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
436 void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl); |
0 | 437 |
438 // Return the MemRegion corresponding to the first maximal run | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
439 // of dirty cards lying completely within MemRegion mr. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
440 // If reset is "true", then sets those card table entries to the given |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
441 // value. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
442 MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
443 int reset_val); |
0 | 444 |
445 // Set all the dirty cards in the given region to precleaned state. | |
446 void preclean_dirty_cards(MemRegion mr); | |
447 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
448 // Provide read-only access to the card table array. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
449 const jbyte* byte_for_const(const void* p) const { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
450 return byte_for(p); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
451 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
452 const jbyte* byte_after_const(const void* p) const { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
453 return byte_after(p); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
454 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
455 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
456 // Mapping from card marking array entry to address of first word |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
457 HeapWord* addr_for(const jbyte* p) const { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
458 assert(p >= _byte_map && p < _byte_map + _byte_map_size, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
459 "out of bounds access to card marking array"); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
460 size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte)); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
461 HeapWord* result = (HeapWord*) (delta << card_shift); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
462 assert(_whole_heap.contains(result), |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
463 "out of bounds accessor from card marking array"); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
464 return result; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
465 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
466 |
0 | 467 // Mapping from address to card marking array index. |
489
2494ab195856
6653214: MemoryPoolMXBean.setUsageThreshold() does not support large heap sizes.
swamyv
parents:
356
diff
changeset
|
468 size_t index_for(void* p) { |
0 | 469 assert(_whole_heap.contains(p), |
470 "out of bounds access to card marking array"); | |
471 return byte_for(p) - _byte_map; | |
472 } | |
473 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
474 const jbyte* byte_for_index(const size_t card_index) const { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
475 return _byte_map + card_index; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
476 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
489
diff
changeset
|
477 |
0 | 478 void verify(); |
479 void verify_guard(); | |
480 | |
481 void verify_clean_region(MemRegion mr) PRODUCT_RETURN; | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
845
diff
changeset
|
482 void verify_dirty_region(MemRegion mr) PRODUCT_RETURN; |
0 | 483 |
484 static size_t par_chunk_heapword_alignment() { | |
485 return CardsPerStrideChunk * card_size_in_words; | |
486 } | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
487 |
0 | 488 }; |
489 | |
490 class CardTableRS; | |
491 | |
492 // A specialization for the CardTableRS gen rem set. | |
493 class CardTableModRefBSForCTRS: public CardTableModRefBS { | |
494 CardTableRS* _rs; | |
495 protected: | |
496 bool card_will_be_scanned(jbyte cv); | |
497 bool card_may_have_been_dirty(jbyte cv); | |
498 public: | |
499 CardTableModRefBSForCTRS(MemRegion whole_heap, | |
500 int max_covered_regions) : | |
501 CardTableModRefBS(whole_heap, max_covered_regions) {} | |
502 | |
503 void set_CTRS(CardTableRS* rs) { _rs = rs; } | |
504 }; | |
1972 | 505 |
506 #endif // SHARE_VM_MEMORY_CARDTABLEMODREFBS_HPP |