Mercurial > hg > graal-jvmci-8
annotate src/share/vm/memory/blockOffsetTable.hpp @ 1716:be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
Summary: GC workers now recognize an intermediate transient state of blocks which are allocated but have not yet completed initialization. blk_start() calls do not attempt to determine the size of a block in the transient state, rather waiting for the block to become initialized so that it is safe to query its size. Audited and ensured the order of initialization of object fields (klass, free bit and size) to respect block state transition protocol. Also included some new assertion checking code enabled in debug mode.
Reviewed-by: chrisphi, johnc, poonam
author | ysr |
---|---|
date | Mon, 16 Aug 2010 15:58:42 -0700 |
parents | c18cbe5936b8 |
children | 52f2bc645da5 |
rev | line source |
---|---|
0 | 1 /* |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1518
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1518
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1518
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
25 // The CollectedHeap type requires subtypes to implement a method | |
26 // "block_start". For some subtypes, notably generational | |
27 // systems using card-table-based write barriers, the efficiency of this | |
28 // operation may be important. Implementations of the "BlockOffsetArray" | |
29 // class may be useful in providing such efficient implementations. | |
30 // | |
31 // BlockOffsetTable (abstract) | |
32 // - BlockOffsetArray (abstract) | |
33 // - BlockOffsetArrayNonContigSpace | |
34 // - BlockOffsetArrayContigSpace | |
35 // | |
36 | |
37 class ContiguousSpace; | |
38 class SerializeOopClosure; | |
39 | |
40 ////////////////////////////////////////////////////////////////////////// | |
41 // The BlockOffsetTable "interface" | |
42 ////////////////////////////////////////////////////////////////////////// | |
43 class BlockOffsetTable VALUE_OBJ_CLASS_SPEC { | |
44 friend class VMStructs; | |
45 protected: | |
46 // These members describe the region covered by the table. | |
47 | |
48 // The space this table is covering. | |
49 HeapWord* _bottom; // == reserved.start | |
50 HeapWord* _end; // End of currently allocated region. | |
51 | |
52 public: | |
53 // Initialize the table to cover the given space. | |
54 // The contents of the initial table are undefined. | |
55 BlockOffsetTable(HeapWord* bottom, HeapWord* end): | |
56 _bottom(bottom), _end(end) { | |
57 assert(_bottom <= _end, "arguments out of order"); | |
58 } | |
59 | |
60 // Note that the committed size of the covered space may have changed, | |
61 // so the table size might also wish to change. | |
62 virtual void resize(size_t new_word_size) = 0; | |
63 | |
64 virtual void set_bottom(HeapWord* new_bottom) { | |
65 assert(new_bottom <= _end, "new_bottom > _end"); | |
66 _bottom = new_bottom; | |
67 resize(pointer_delta(_end, _bottom)); | |
68 } | |
69 | |
70 // Requires "addr" to be contained by a block, and returns the address of | |
71 // the start of that block. | |
72 virtual HeapWord* block_start_unsafe(const void* addr) const = 0; | |
73 | |
74 // Returns the address of the start of the block containing "addr", or | |
75 // else "null" if it is covered by no block. | |
76 HeapWord* block_start(const void* addr) const; | |
77 }; | |
78 | |
79 ////////////////////////////////////////////////////////////////////////// | |
80 // One implementation of "BlockOffsetTable," the BlockOffsetArray, | |
81 // divides the covered region into "N"-word subregions (where | |
82 // "N" = 2^"LogN". An array with an entry for each such subregion | |
83 // indicates how far back one must go to find the start of the | |
84 // chunk that includes the first word of the subregion. | |
85 // | |
86 // Each BlockOffsetArray is owned by a Space. However, the actual array | |
87 // may be shared by several BlockOffsetArrays; this is useful | |
88 // when a single resizable area (such as a generation) is divided up into | |
89 // several spaces in which contiguous allocation takes place. (Consider, | |
90 // for example, the garbage-first generation.) | |
91 | |
92 // Here is the shared array type. | |
93 ////////////////////////////////////////////////////////////////////////// | |
94 // BlockOffsetSharedArray | |
95 ////////////////////////////////////////////////////////////////////////// | |
96 class BlockOffsetSharedArray: public CHeapObj { | |
97 friend class BlockOffsetArray; | |
98 friend class BlockOffsetArrayNonContigSpace; | |
99 friend class BlockOffsetArrayContigSpace; | |
100 friend class VMStructs; | |
101 | |
102 private: | |
103 enum SomePrivateConstants { | |
104 LogN = 9, | |
105 LogN_words = LogN - LogHeapWordSize, | |
106 N_bytes = 1 << LogN, | |
107 N_words = 1 << LogN_words | |
108 }; | |
109 | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
110 bool _init_to_zero; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
111 |
0 | 112 // The reserved region covered by the shared array. |
113 MemRegion _reserved; | |
114 | |
115 // End of the current committed region. | |
116 HeapWord* _end; | |
117 | |
118 // Array for keeping offsets for retrieving object start fast given an | |
119 // address. | |
120 VirtualSpace _vs; | |
121 u_char* _offset_array; // byte array keeping backwards offsets | |
122 | |
123 protected: | |
124 // Bounds checking accessors: | |
125 // For performance these have to devolve to array accesses in product builds. | |
126 u_char offset_array(size_t index) const { | |
127 assert(index < _vs.committed_size(), "index out of range"); | |
128 return _offset_array[index]; | |
129 } | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
130 // An assertion-checking helper method for the set_offset_array() methods below. |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
131 void check_reducing_assertion(bool reducing); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
132 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
133 void set_offset_array(size_t index, u_char offset, bool reducing = false) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
134 check_reducing_assertion(reducing); |
0 | 135 assert(index < _vs.committed_size(), "index out of range"); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
136 assert(!reducing || _offset_array[index] >= offset, "Not reducing"); |
0 | 137 _offset_array[index] = offset; |
138 } | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
139 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
140 void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
141 check_reducing_assertion(reducing); |
0 | 142 assert(index < _vs.committed_size(), "index out of range"); |
143 assert(high >= low, "addresses out of order"); | |
144 assert(pointer_delta(high, low) <= N_words, "offset too large"); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
145 assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
146 "Not reducing"); |
0 | 147 _offset_array[index] = (u_char)pointer_delta(high, low); |
148 } | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
149 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
150 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
151 check_reducing_assertion(reducing); |
0 | 152 assert(index_for(right - 1) < _vs.committed_size(), |
153 "right address out of range"); | |
154 assert(left < right, "Heap addresses out of order"); | |
155 size_t num_cards = pointer_delta(right, left) >> LogN_words; | |
1518
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
156 |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
157 // Below, we may use an explicit loop instead of memset() |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
158 // because on certain platforms memset() can give concurrent |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
159 // readers "out-of-thin-air," phantom zeros; see 6948537. |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
160 if (UseMemSetInBOT) { |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
161 memset(&_offset_array[index_for(left)], offset, num_cards); |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
162 } else { |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
163 size_t i = index_for(left); |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
164 const size_t end = i + num_cards; |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
165 for (; i < end; i++) { |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
166 assert(!reducing || _offset_array[i] >= offset, "Not reducing"); |
1518
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
167 _offset_array[i] = offset; |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
168 } |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
169 } |
0 | 170 } |
171 | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
172 void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
173 check_reducing_assertion(reducing); |
0 | 174 assert(right < _vs.committed_size(), "right address out of range"); |
175 assert(left <= right, "indexes out of order"); | |
176 size_t num_cards = right - left + 1; | |
1518
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
177 |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
178 // Below, we may use an explicit loop instead of memset |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
179 // because on certain platforms memset() can give concurrent |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
180 // readers "out-of-thin-air," phantom zeros; see 6948537. |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
181 if (UseMemSetInBOT) { |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
182 memset(&_offset_array[left], offset, num_cards); |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
183 } else { |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
184 size_t i = left; |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
185 const size_t end = i + num_cards; |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
186 for (; i < end; i++) { |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
187 assert(!reducing || _offset_array[i] >= offset, "Not reducing"); |
1518
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
188 _offset_array[i] = offset; |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
189 } |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
844
diff
changeset
|
190 } |
0 | 191 } |
192 | |
193 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const { | |
194 assert(index < _vs.committed_size(), "index out of range"); | |
195 assert(high >= low, "addresses out of order"); | |
196 assert(pointer_delta(high, low) <= N_words, "offset too large"); | |
197 assert(_offset_array[index] == pointer_delta(high, low), | |
198 "Wrong offset"); | |
199 } | |
200 | |
201 bool is_card_boundary(HeapWord* p) const; | |
202 | |
203 // Return the number of slots needed for an offset array | |
204 // that covers mem_region_words words. | |
205 // We always add an extra slot because if an object | |
206 // ends on a card boundary we put a 0 in the next | |
207 // offset array slot, so we want that slot always | |
208 // to be reserved. | |
209 | |
210 size_t compute_size(size_t mem_region_words) { | |
211 size_t number_of_slots = (mem_region_words / N_words) + 1; | |
212 return ReservedSpace::allocation_align_size_up(number_of_slots); | |
213 } | |
214 | |
215 public: | |
216 // Initialize the table to cover from "base" to (at least) | |
217 // "base + init_word_size". In the future, the table may be expanded | |
218 // (see "resize" below) up to the size of "_reserved" (which must be at | |
219 // least "init_word_size".) The contents of the initial table are | |
220 // undefined; it is the responsibility of the constituent | |
221 // BlockOffsetTable(s) to initialize cards. | |
222 BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size); | |
223 | |
224 // Notes a change in the committed size of the region covered by the | |
225 // table. The "new_word_size" may not be larger than the size of the | |
226 // reserved region this table covers. | |
227 void resize(size_t new_word_size); | |
228 | |
229 void set_bottom(HeapWord* new_bottom); | |
230 | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
231 // Whether entries should be initialized to zero. Used currently only for |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
232 // error checking. |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
233 void set_init_to_zero(bool val) { _init_to_zero = val; } |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
234 bool init_to_zero() { return _init_to_zero; } |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
235 |
0 | 236 // Updates all the BlockOffsetArray's sharing this shared array to |
237 // reflect the current "top"'s of their spaces. | |
238 void update_offset_arrays(); // Not yet implemented! | |
239 | |
240 // Return the appropriate index into "_offset_array" for "p". | |
241 size_t index_for(const void* p) const; | |
242 | |
243 // Return the address indicating the start of the region corresponding to | |
244 // "index" in "_offset_array". | |
245 HeapWord* address_for_index(size_t index) const; | |
246 | |
301
387a62b4be60
6728478: Assertion at parallel promotion from young to old generation
jmasa
parents:
0
diff
changeset
|
247 // Return the address "p" incremented by the size of |
387a62b4be60
6728478: Assertion at parallel promotion from young to old generation
jmasa
parents:
0
diff
changeset
|
248 // a region. This method does not align the address |
387a62b4be60
6728478: Assertion at parallel promotion from young to old generation
jmasa
parents:
0
diff
changeset
|
249 // returned to the start of a region. It is a simple |
387a62b4be60
6728478: Assertion at parallel promotion from young to old generation
jmasa
parents:
0
diff
changeset
|
250 // primitive. |
387a62b4be60
6728478: Assertion at parallel promotion from young to old generation
jmasa
parents:
0
diff
changeset
|
251 HeapWord* inc_by_region_size(HeapWord* p) const { return p + N_words; } |
387a62b4be60
6728478: Assertion at parallel promotion from young to old generation
jmasa
parents:
0
diff
changeset
|
252 |
0 | 253 // Shared space support |
254 void serialize(SerializeOopClosure* soc, HeapWord* start, HeapWord* end); | |
255 }; | |
256 | |
257 ////////////////////////////////////////////////////////////////////////// | |
258 // The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray. | |
259 ////////////////////////////////////////////////////////////////////////// | |
260 class BlockOffsetArray: public BlockOffsetTable { | |
261 friend class VMStructs; | |
342 | 262 friend class G1BlockOffsetArray; // temp. until we restructure and cleanup |
0 | 263 protected: |
264 // The following enums are used by do_block_internal() below | |
265 enum Action { | |
266 Action_single, // BOT records a single block (see single_block()) | |
267 Action_mark, // BOT marks the start of a block (see mark_block()) | |
268 Action_check // Check that BOT records block correctly | |
269 // (see verify_single_block()). | |
270 }; | |
271 | |
272 enum SomePrivateConstants { | |
273 N_words = BlockOffsetSharedArray::N_words, | |
274 LogN = BlockOffsetSharedArray::LogN, | |
275 // entries "e" of at least N_words mean "go back by Base^(e-N_words)." | |
276 // All entries are less than "N_words + N_powers". | |
277 LogBase = 4, | |
278 Base = (1 << LogBase), | |
279 N_powers = 14 | |
280 }; | |
281 | |
282 static size_t power_to_cards_back(uint i) { | |
645
c3a720eefe82
6816308: Changes to allow builds with latest Windows SDK 6.1 on 64bit Windows 2003
kvn
parents:
380
diff
changeset
|
283 return (size_t)(1 << (LogBase * i)); |
0 | 284 } |
285 static size_t power_to_words_back(uint i) { | |
286 return power_to_cards_back(i) * N_words; | |
287 } | |
288 static size_t entry_to_cards_back(u_char entry) { | |
289 assert(entry >= N_words, "Precondition"); | |
290 return power_to_cards_back(entry - N_words); | |
291 } | |
292 static size_t entry_to_words_back(u_char entry) { | |
293 assert(entry >= N_words, "Precondition"); | |
294 return power_to_words_back(entry - N_words); | |
295 } | |
296 | |
297 // The shared array, which is shared with other BlockOffsetArray's | |
298 // corresponding to different spaces within a generation or span of | |
299 // memory. | |
300 BlockOffsetSharedArray* _array; | |
301 | |
302 // The space that owns this subregion. | |
303 Space* _sp; | |
304 | |
305 // If true, array entries are initialized to 0; otherwise, they are | |
306 // initialized to point backwards to the beginning of the covered region. | |
307 bool _init_to_zero; | |
308 | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
309 // An assertion-checking helper method for the set_remainder*() methods below. |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
310 void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); } |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
311 |
0 | 312 // Sets the entries |
313 // corresponding to the cards starting at "start" and ending at "end" | |
314 // to point back to the card before "start": the interval [start, end) | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
315 // is right-open. The last parameter, reducing, indicates whether the |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
316 // updates to individual entries always reduce the entry from a higher |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
317 // to a lower value. (For example this would hold true during a temporal |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
318 // regime during which only block splits were updating the BOT. |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
319 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing = false); |
0 | 320 // Same as above, except that the args here are a card _index_ interval |
321 // that is closed: [start_index, end_index] | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
322 void set_remainder_to_point_to_start_incl(size_t start, size_t end, bool reducing = false); |
0 | 323 |
324 // A helper function for BOT adjustment/verification work | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
325 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action, bool reducing = false); |
0 | 326 |
327 public: | |
328 // The space may not have its bottom and top set yet, which is why the | |
329 // region is passed as a parameter. If "init_to_zero" is true, the | |
330 // elements of the array are initialized to zero. Otherwise, they are | |
331 // initialized to point backwards to the beginning. | |
332 BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr, | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
333 bool init_to_zero_); |
0 | 334 |
335 // Note: this ought to be part of the constructor, but that would require | |
336 // "this" to be passed as a parameter to a member constructor for | |
337 // the containing concrete subtype of Space. | |
338 // This would be legal C++, but MS VC++ doesn't allow it. | |
339 void set_space(Space* sp) { _sp = sp; } | |
340 | |
341 // Resets the covered region to the given "mr". | |
342 void set_region(MemRegion mr) { | |
343 _bottom = mr.start(); | |
344 _end = mr.end(); | |
345 } | |
346 | |
347 // Note that the committed size of the covered space may have changed, | |
348 // so the table size might also wish to change. | |
349 virtual void resize(size_t new_word_size) { | |
350 HeapWord* new_end = _bottom + new_word_size; | |
351 if (_end < new_end && !init_to_zero()) { | |
352 // verify that the old and new boundaries are also card boundaries | |
353 assert(_array->is_card_boundary(_end), | |
354 "_end not a card boundary"); | |
355 assert(_array->is_card_boundary(new_end), | |
356 "new _end would not be a card boundary"); | |
357 // set all the newly added cards | |
358 _array->set_offset_array(_end, new_end, N_words); | |
359 } | |
360 _end = new_end; // update _end | |
361 } | |
362 | |
363 // Adjust the BOT to show that it has a single block in the | |
364 // range [blk_start, blk_start + size). All necessary BOT | |
365 // cards are adjusted, but _unallocated_block isn't. | |
366 void single_block(HeapWord* blk_start, HeapWord* blk_end); | |
367 void single_block(HeapWord* blk, size_t size) { | |
368 single_block(blk, blk + size); | |
369 } | |
370 | |
371 // When the alloc_block() call returns, the block offset table should | |
372 // have enough information such that any subsequent block_start() call | |
373 // with an argument equal to an address that is within the range | |
374 // [blk_start, blk_end) would return the value blk_start, provided | |
375 // there have been no calls in between that reset this information | |
376 // (e.g. see BlockOffsetArrayNonContigSpace::single_block() call | |
377 // for an appropriate range covering the said interval). | |
378 // These methods expect to be called with [blk_start, blk_end) | |
379 // representing a block of memory in the heap. | |
380 virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end); | |
381 void alloc_block(HeapWord* blk, size_t size) { | |
382 alloc_block(blk, blk + size); | |
383 } | |
384 | |
385 // If true, initialize array slots with no allocated blocks to zero. | |
386 // Otherwise, make them point back to the front. | |
387 bool init_to_zero() { return _init_to_zero; } | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
388 // Corresponding setter |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
389 void set_init_to_zero(bool val) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
390 _init_to_zero = val; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
391 assert(_array != NULL, "_array should be non-NULL"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
392 _array->set_init_to_zero(val); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
393 } |
0 | 394 |
395 // Debugging | |
396 // Return the index of the last entry in the "active" region. | |
397 virtual size_t last_active_index() const = 0; | |
398 // Verify the block offset table | |
399 void verify() const; | |
400 void check_all_cards(size_t left_card, size_t right_card) const; | |
401 }; | |
402 | |
403 //////////////////////////////////////////////////////////////////////////// | |
404 // A subtype of BlockOffsetArray that takes advantage of the fact | |
405 // that its underlying space is a NonContiguousSpace, so that some | |
406 // specialized interfaces can be made available for spaces that | |
407 // manipulate the table. | |
408 //////////////////////////////////////////////////////////////////////////// | |
409 class BlockOffsetArrayNonContigSpace: public BlockOffsetArray { | |
410 friend class VMStructs; | |
411 private: | |
412 // The portion [_unallocated_block, _sp.end()) of the space that | |
413 // is a single block known not to contain any objects. | |
414 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag. | |
415 HeapWord* _unallocated_block; | |
416 | |
417 public: | |
418 BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr): | |
419 BlockOffsetArray(array, mr, false), | |
420 _unallocated_block(_bottom) { } | |
421 | |
422 // accessor | |
423 HeapWord* unallocated_block() const { | |
424 assert(BlockOffsetArrayUseUnallocatedBlock, | |
425 "_unallocated_block is not being maintained"); | |
426 return _unallocated_block; | |
427 } | |
428 | |
429 void set_unallocated_block(HeapWord* block) { | |
430 assert(BlockOffsetArrayUseUnallocatedBlock, | |
431 "_unallocated_block is not being maintained"); | |
432 assert(block >= _bottom && block <= _end, "out of range"); | |
433 _unallocated_block = block; | |
434 } | |
435 | |
436 // These methods expect to be called with [blk_start, blk_end) | |
437 // representing a block of memory in the heap. | |
438 void alloc_block(HeapWord* blk_start, HeapWord* blk_end); | |
439 void alloc_block(HeapWord* blk, size_t size) { | |
440 alloc_block(blk, blk + size); | |
441 } | |
442 | |
443 // The following methods are useful and optimized for a | |
444 // non-contiguous space. | |
445 | |
446 // Given a block [blk_start, blk_start + full_blk_size), and | |
447 // a left_blk_size < full_blk_size, adjust the BOT to show two | |
448 // blocks [blk_start, blk_start + left_blk_size) and | |
449 // [blk_start + left_blk_size, blk_start + full_blk_size). | |
450 // It is assumed (and verified in the non-product VM) that the | |
451 // BOT was correct for the original block. | |
452 void split_block(HeapWord* blk_start, size_t full_blk_size, | |
453 size_t left_blk_size); | |
454 | |
455 // Adjust BOT to show that it has a block in the range | |
456 // [blk_start, blk_start + size). Only the first card | |
457 // of BOT is touched. It is assumed (and verified in the | |
458 // non-product VM) that the remaining cards of the block | |
459 // are correct. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
460 void mark_block(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
461 void mark_block(HeapWord* blk, size_t size, bool reducing = false) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
462 mark_block(blk, blk + size, reducing); |
0 | 463 } |
464 | |
465 // Adjust _unallocated_block to indicate that a particular | |
466 // block has been newly allocated or freed. It is assumed (and | |
467 // verified in the non-product VM) that the BOT is correct for | |
468 // the given block. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
469 void allocated(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false) { |
0 | 470 // Verify that the BOT shows [blk, blk + blk_size) to be one block. |
471 verify_single_block(blk_start, blk_end); | |
472 if (BlockOffsetArrayUseUnallocatedBlock) { | |
473 _unallocated_block = MAX2(_unallocated_block, blk_end); | |
474 } | |
475 } | |
476 | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
477 void allocated(HeapWord* blk, size_t size, bool reducing = false) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
478 allocated(blk, blk + size, reducing); |
0 | 479 } |
480 | |
481 void freed(HeapWord* blk_start, HeapWord* blk_end); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
482 void freed(HeapWord* blk, size_t size); |
0 | 483 |
484 HeapWord* block_start_unsafe(const void* addr) const; | |
485 | |
486 // Requires "addr" to be the start of a card and returns the | |
487 // start of the block that contains the given address. | |
488 HeapWord* block_start_careful(const void* addr) const; | |
489 | |
490 // Verification & debugging: ensure that the offset table reflects | |
491 // the fact that the block [blk_start, blk_end) or [blk, blk + size) | |
492 // is a single block of storage. NOTE: can't const this because of | |
493 // call to non-const do_block_internal() below. | |
494 void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) | |
495 PRODUCT_RETURN; | |
496 void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN; | |
497 | |
498 // Verify that the given block is before _unallocated_block | |
499 void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end) | |
500 const PRODUCT_RETURN; | |
501 void verify_not_unallocated(HeapWord* blk, size_t size) | |
502 const PRODUCT_RETURN; | |
503 | |
504 // Debugging support | |
505 virtual size_t last_active_index() const; | |
506 }; | |
507 | |
508 //////////////////////////////////////////////////////////////////////////// | |
509 // A subtype of BlockOffsetArray that takes advantage of the fact | |
510 // that its underlying space is a ContiguousSpace, so that its "active" | |
511 // region can be more efficiently tracked (than for a non-contiguous space). | |
512 //////////////////////////////////////////////////////////////////////////// | |
513 class BlockOffsetArrayContigSpace: public BlockOffsetArray { | |
514 friend class VMStructs; | |
515 private: | |
516 // allocation boundary at which offset array must be updated | |
517 HeapWord* _next_offset_threshold; | |
518 size_t _next_offset_index; // index corresponding to that boundary | |
519 | |
520 // Work function when allocation start crosses threshold. | |
521 void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end); | |
522 | |
523 public: | |
524 BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr): | |
525 BlockOffsetArray(array, mr, true) { | |
526 _next_offset_threshold = NULL; | |
527 _next_offset_index = 0; | |
528 } | |
529 | |
530 void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); } | |
531 | |
532 // Initialize the threshold for an empty heap. | |
533 HeapWord* initialize_threshold(); | |
534 // Zero out the entry for _bottom (offset will be zero) | |
535 void zero_bottom_entry(); | |
536 | |
537 // Return the next threshold, the point at which the table should be | |
538 // updated. | |
539 HeapWord* threshold() const { return _next_offset_threshold; } | |
540 | |
541 // In general, these methods expect to be called with | |
542 // [blk_start, blk_end) representing a block of memory in the heap. | |
543 // In this implementation, however, we are OK even if blk_start and/or | |
544 // blk_end are NULL because NULL is represented as 0, and thus | |
545 // never exceeds the "_next_offset_threshold". | |
546 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) { | |
547 if (blk_end > _next_offset_threshold) { | |
548 alloc_block_work(blk_start, blk_end); | |
549 } | |
550 } | |
551 void alloc_block(HeapWord* blk, size_t size) { | |
552 alloc_block(blk, blk + size); | |
553 } | |
554 | |
555 HeapWord* block_start_unsafe(const void* addr) const; | |
556 | |
557 void serialize(SerializeOopClosure* soc); | |
558 | |
559 // Debugging support | |
560 virtual size_t last_active_index() const; | |
561 }; |