Mercurial > hg > graal-jvmci-8
annotate src/share/vm/memory/blockOffsetTable.cpp @ 1994:6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
7002546: regression on SpecJbb2005 on 7b118 comparing to 7b117 on small heaps
Summary: Relaxed assertion checking related to incremental_collection_failed flag to allow for ExplicitGCInvokesConcurrent behaviour where we do not want a failing scavenge to bail to a stop-world collection. Parameterized incremental_collection_will_fail() so we can selectively use, or not use, as appropriate, the statistical prediction at specific use sites. This essentially reverts the scavenge bail-out logic to what it was prior to some recent changes that had inadvertently started using the statistical prediction which can be noisy in the presence of bursty loads. Added some associated verbose non-product debugging messages.
Reviewed-by: johnc, tonyp
author | ysr |
---|---|
date | Tue, 07 Dec 2010 21:55:53 -0800 |
parents | f95d63e2154a |
children | 7d64aa23eb96 |
rev | line source |
---|---|
0 | 1 /* |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1489
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1489
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1489
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "gc_interface/collectedHeap.inline.hpp" | |
27 #include "memory/blockOffsetTable.inline.hpp" | |
28 #include "memory/iterator.hpp" | |
29 #include "memory/space.inline.hpp" | |
30 #include "memory/universe.hpp" | |
31 #include "oops/oop.inline.hpp" | |
32 #include "runtime/java.hpp" | |
0 | 33 |
34 ////////////////////////////////////////////////////////////////////// | |
35 // BlockOffsetSharedArray | |
36 ////////////////////////////////////////////////////////////////////// | |
37 | |
38 BlockOffsetSharedArray::BlockOffsetSharedArray(MemRegion reserved, | |
39 size_t init_word_size): | |
40 _reserved(reserved), _end(NULL) | |
41 { | |
42 size_t size = compute_size(reserved.word_size()); | |
43 ReservedSpace rs(size); | |
44 if (!rs.is_reserved()) { | |
45 vm_exit_during_initialization("Could not reserve enough space for heap offset array"); | |
46 } | |
47 if (!_vs.initialize(rs, 0)) { | |
48 vm_exit_during_initialization("Could not reserve enough space for heap offset array"); | |
49 } | |
50 _offset_array = (u_char*)_vs.low_boundary(); | |
51 resize(init_word_size); | |
52 if (TraceBlockOffsetTable) { | |
53 gclog_or_tty->print_cr("BlockOffsetSharedArray::BlockOffsetSharedArray: "); | |
54 gclog_or_tty->print_cr(" " | |
55 " rs.base(): " INTPTR_FORMAT | |
56 " rs.size(): " INTPTR_FORMAT | |
57 " rs end(): " INTPTR_FORMAT, | |
58 rs.base(), rs.size(), rs.base() + rs.size()); | |
59 gclog_or_tty->print_cr(" " | |
60 " _vs.low_boundary(): " INTPTR_FORMAT | |
61 " _vs.high_boundary(): " INTPTR_FORMAT, | |
62 _vs.low_boundary(), | |
63 _vs.high_boundary()); | |
64 } | |
65 } | |
66 | |
67 void BlockOffsetSharedArray::resize(size_t new_word_size) { | |
68 assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved"); | |
69 size_t new_size = compute_size(new_word_size); | |
70 size_t old_size = _vs.committed_size(); | |
71 size_t delta; | |
72 char* high = _vs.high(); | |
73 _end = _reserved.start() + new_word_size; | |
74 if (new_size > old_size) { | |
75 delta = ReservedSpace::page_align_size_up(new_size - old_size); | |
76 assert(delta > 0, "just checking"); | |
77 if (!_vs.expand_by(delta)) { | |
78 // Do better than this for Merlin | |
79 vm_exit_out_of_memory(delta, "offset table expansion"); | |
80 } | |
81 assert(_vs.high() == high + delta, "invalid expansion"); | |
82 } else { | |
83 delta = ReservedSpace::page_align_size_down(old_size - new_size); | |
84 if (delta == 0) return; | |
85 _vs.shrink_by(delta); | |
86 assert(_vs.high() == high - delta, "invalid expansion"); | |
87 } | |
88 } | |
89 | |
90 bool BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const { | |
91 assert(p >= _reserved.start(), "just checking"); | |
92 size_t delta = pointer_delta(p, _reserved.start()); | |
93 return (delta & right_n_bits(LogN_words)) == (size_t)NoBits; | |
94 } | |
95 | |
96 | |
97 void BlockOffsetSharedArray::serialize(SerializeOopClosure* soc, | |
98 HeapWord* start, HeapWord* end) { | |
99 assert(_offset_array[0] == 0, "objects can't cross covered areas"); | |
100 assert(start <= end, "bad address range"); | |
101 size_t start_index = index_for(start); | |
102 size_t end_index = index_for(end-1)+1; | |
103 soc->do_region(&_offset_array[start_index], | |
104 (end_index - start_index) * sizeof(_offset_array[0])); | |
105 } | |
106 | |
107 ////////////////////////////////////////////////////////////////////// | |
108 // BlockOffsetArray | |
109 ////////////////////////////////////////////////////////////////////// | |
110 | |
111 BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array, | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
112 MemRegion mr, bool init_to_zero_) : |
0 | 113 BlockOffsetTable(mr.start(), mr.end()), |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
114 _array(array) |
0 | 115 { |
116 assert(_bottom <= _end, "arguments out of order"); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
117 set_init_to_zero(init_to_zero_); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
118 if (!init_to_zero_) { |
0 | 119 // initialize cards to point back to mr.start() |
120 set_remainder_to_point_to_start(mr.start() + N_words, mr.end()); | |
121 _array->set_offset_array(0, 0); // set first card to 0 | |
122 } | |
123 } | |
124 | |
125 | |
126 // The arguments follow the normal convention of denoting | |
127 // a right-open interval: [start, end) | |
128 void | |
129 BlockOffsetArray:: | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
130 set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing) { |
0 | 131 |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
132 check_reducing_assertion(reducing); |
0 | 133 if (start >= end) { |
134 // The start address is equal to the end address (or to | |
135 // the right of the end address) so there are not cards | |
136 // that need to be updated.. | |
137 return; | |
138 } | |
139 | |
140 // Write the backskip value for each region. | |
141 // | |
142 // offset | |
143 // card 2nd 3rd | |
144 // | +- 1st | | | |
145 // v v v v | |
146 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+- | |
147 // |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ... | |
148 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+- | |
149 // 11 19 75 | |
150 // 12 | |
151 // | |
152 // offset card is the card that points to the start of an object | |
153 // x - offset value of offset card | |
154 // 1st - start of first logarithmic region | |
155 // 0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1 | |
156 // 2nd - start of second logarithmic region | |
157 // 1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8 | |
158 // 3rd - start of third logarithmic region | |
159 // 2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64 | |
160 // | |
161 // integer below the block offset entry is an example of | |
162 // the index of the entry | |
163 // | |
164 // Given an address, | |
165 // Find the index for the address | |
166 // Find the block offset table entry | |
167 // Convert the entry to a back slide | |
168 // (e.g., with today's, offset = 0x81 => | |
169 // back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8 | |
170 // Move back N (e.g., 8) entries and repeat with the | |
171 // value of the new entry | |
172 // | |
173 size_t start_card = _array->index_for(start); | |
174 size_t end_card = _array->index_for(end-1); | |
175 assert(start ==_array->address_for_index(start_card), "Precondition"); | |
176 assert(end ==_array->address_for_index(end_card)+N_words, "Precondition"); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
177 set_remainder_to_point_to_start_incl(start_card, end_card, reducing); // closed interval |
0 | 178 } |
179 | |
180 | |
181 // Unlike the normal convention in this code, the argument here denotes | |
182 // a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start() | |
183 // above. | |
184 void | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
185 BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card, bool reducing) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
186 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
187 check_reducing_assertion(reducing); |
0 | 188 if (start_card > end_card) { |
189 return; | |
190 } | |
191 assert(start_card > _array->index_for(_bottom), "Cannot be first card"); | |
192 assert(_array->offset_array(start_card-1) <= N_words, | |
193 "Offset card has an unexpected value"); | |
194 size_t start_card_for_region = start_card; | |
195 u_char offset = max_jubyte; | |
342 | 196 for (int i = 0; i < N_powers; i++) { |
0 | 197 // -1 so that the the card with the actual offset is counted. Another -1 |
198 // so that the reach ends in this region and not at the start | |
199 // of the next. | |
200 size_t reach = start_card - 1 + (power_to_cards_back(i+1) - 1); | |
201 offset = N_words + i; | |
202 if (reach >= end_card) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
203 _array->set_offset_array(start_card_for_region, end_card, offset, reducing); |
0 | 204 start_card_for_region = reach + 1; |
205 break; | |
206 } | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
207 _array->set_offset_array(start_card_for_region, reach, offset, reducing); |
0 | 208 start_card_for_region = reach + 1; |
209 } | |
210 assert(start_card_for_region > end_card, "Sanity check"); | |
211 DEBUG_ONLY(check_all_cards(start_card, end_card);) | |
212 } | |
213 | |
214 // The card-interval [start_card, end_card] is a closed interval; this | |
215 // is an expensive check -- use with care and only under protection of | |
216 // suitable flag. | |
217 void BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const { | |
218 | |
219 if (end_card < start_card) { | |
220 return; | |
221 } | |
222 guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card"); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
223 u_char last_entry = N_words; |
0 | 224 for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) { |
225 u_char entry = _array->offset_array(c); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
226 guarantee(entry >= last_entry, "Monotonicity"); |
0 | 227 if (c - start_card > power_to_cards_back(1)) { |
228 guarantee(entry > N_words, "Should be in logarithmic region"); | |
229 } | |
230 size_t backskip = entry_to_cards_back(entry); | |
231 size_t landing_card = c - backskip; | |
232 guarantee(landing_card >= (start_card - 1), "Inv"); | |
233 if (landing_card >= start_card) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
234 guarantee(_array->offset_array(landing_card) <= entry, "Monotonicity"); |
0 | 235 } else { |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
236 guarantee(landing_card == (start_card - 1), "Tautology"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
237 // Note that N_words is the maximum offset value |
0 | 238 guarantee(_array->offset_array(landing_card) <= N_words, "Offset value"); |
239 } | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
240 last_entry = entry; // remember for monotonicity test |
0 | 241 } |
242 } | |
243 | |
244 | |
245 void | |
246 BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) { | |
247 assert(blk_start != NULL && blk_end > blk_start, | |
248 "phantom block"); | |
249 single_block(blk_start, blk_end); | |
250 } | |
251 | |
252 // Action_mark - update the BOT for the block [blk_start, blk_end). | |
253 // Current typical use is for splitting a block. | |
254 // Action_single - udpate the BOT for an allocation. | |
255 // Action_verify - BOT verification. | |
256 void | |
257 BlockOffsetArray::do_block_internal(HeapWord* blk_start, | |
258 HeapWord* blk_end, | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
259 Action action, bool reducing) { |
0 | 260 assert(Universe::heap()->is_in_reserved(blk_start), |
261 "reference must be into the heap"); | |
262 assert(Universe::heap()->is_in_reserved(blk_end-1), | |
263 "limit must be within the heap"); | |
264 // This is optimized to make the test fast, assuming we only rarely | |
265 // cross boundaries. | |
266 uintptr_t end_ui = (uintptr_t)(blk_end - 1); | |
267 uintptr_t start_ui = (uintptr_t)blk_start; | |
268 // Calculate the last card boundary preceding end of blk | |
269 intptr_t boundary_before_end = (intptr_t)end_ui; | |
270 clear_bits(boundary_before_end, right_n_bits(LogN)); | |
271 if (start_ui <= (uintptr_t)boundary_before_end) { | |
272 // blk starts at or crosses a boundary | |
273 // Calculate index of card on which blk begins | |
274 size_t start_index = _array->index_for(blk_start); | |
275 // Index of card on which blk ends | |
276 size_t end_index = _array->index_for(blk_end - 1); | |
277 // Start address of card on which blk begins | |
278 HeapWord* boundary = _array->address_for_index(start_index); | |
279 assert(boundary <= blk_start, "blk should start at or after boundary"); | |
280 if (blk_start != boundary) { | |
281 // blk starts strictly after boundary | |
282 // adjust card boundary and start_index forward to next card | |
283 boundary += N_words; | |
284 start_index++; | |
285 } | |
286 assert(start_index <= end_index, "monotonicity of index_for()"); | |
287 assert(boundary <= (HeapWord*)boundary_before_end, "tautology"); | |
288 switch (action) { | |
289 case Action_mark: { | |
290 if (init_to_zero()) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
291 _array->set_offset_array(start_index, boundary, blk_start, reducing); |
0 | 292 break; |
293 } // Else fall through to the next case | |
294 } | |
295 case Action_single: { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
296 _array->set_offset_array(start_index, boundary, blk_start, reducing); |
0 | 297 // We have finished marking the "offset card". We need to now |
298 // mark the subsequent cards that this blk spans. | |
299 if (start_index < end_index) { | |
300 HeapWord* rem_st = _array->address_for_index(start_index) + N_words; | |
301 HeapWord* rem_end = _array->address_for_index(end_index) + N_words; | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
302 set_remainder_to_point_to_start(rem_st, rem_end, reducing); |
0 | 303 } |
304 break; | |
305 } | |
306 case Action_check: { | |
307 _array->check_offset_array(start_index, boundary, blk_start); | |
308 // We have finished checking the "offset card". We need to now | |
309 // check the subsequent cards that this blk spans. | |
310 check_all_cards(start_index + 1, end_index); | |
311 break; | |
312 } | |
313 default: | |
314 ShouldNotReachHere(); | |
315 } | |
316 } | |
317 } | |
318 | |
319 // The range [blk_start, blk_end) represents a single contiguous block | |
320 // of storage; modify the block offset table to represent this | |
321 // information; Right-open interval: [blk_start, blk_end) | |
322 // NOTE: this method does _not_ adjust _unallocated_block. | |
323 void | |
324 BlockOffsetArray::single_block(HeapWord* blk_start, | |
325 HeapWord* blk_end) { | |
326 do_block_internal(blk_start, blk_end, Action_single); | |
327 } | |
328 | |
329 void BlockOffsetArray::verify() const { | |
330 // For each entry in the block offset table, verify that | |
331 // the entry correctly finds the start of an object at the | |
332 // first address covered by the block or to the left of that | |
333 // first address. | |
334 | |
335 size_t next_index = 1; | |
336 size_t last_index = last_active_index(); | |
337 | |
338 // Use for debugging. Initialize to NULL to distinguish the | |
339 // first iteration through the while loop. | |
340 HeapWord* last_p = NULL; | |
341 HeapWord* last_start = NULL; | |
342 oop last_o = NULL; | |
343 | |
344 while (next_index <= last_index) { | |
345 // Use an address past the start of the address for | |
346 // the entry. | |
347 HeapWord* p = _array->address_for_index(next_index) + 1; | |
348 if (p >= _end) { | |
349 // That's all of the allocated block table. | |
350 return; | |
351 } | |
352 // block_start() asserts that start <= p. | |
353 HeapWord* start = block_start(p); | |
354 // First check if the start is an allocated block and only | |
355 // then if it is a valid object. | |
356 oop o = oop(start); | |
357 assert(!Universe::is_fully_initialized() || | |
358 _sp->is_free_block(start) || | |
359 o->is_oop_or_null(), "Bad object was found"); | |
360 next_index++; | |
361 last_p = p; | |
362 last_start = start; | |
363 last_o = o; | |
364 } | |
365 } | |
366 | |
367 ////////////////////////////////////////////////////////////////////// | |
368 // BlockOffsetArrayNonContigSpace | |
369 ////////////////////////////////////////////////////////////////////// | |
370 | |
371 // The block [blk_start, blk_end) has been allocated; | |
372 // adjust the block offset table to represent this information; | |
373 // NOTE: Clients of BlockOffsetArrayNonContigSpace: consider using | |
374 // the somewhat more lightweight split_block() or | |
375 // (when init_to_zero()) mark_block() wherever possible. | |
376 // right-open interval: [blk_start, blk_end) | |
377 void | |
378 BlockOffsetArrayNonContigSpace::alloc_block(HeapWord* blk_start, | |
379 HeapWord* blk_end) { | |
380 assert(blk_start != NULL && blk_end > blk_start, | |
381 "phantom block"); | |
382 single_block(blk_start, blk_end); | |
383 allocated(blk_start, blk_end); | |
384 } | |
385 | |
386 // Adjust BOT to show that a previously whole block has been split | |
387 // into two. We verify the BOT for the first part (prefix) and | |
388 // update the BOT for the second part (suffix). | |
389 // blk is the start of the block | |
390 // blk_size is the size of the original block | |
391 // left_blk_size is the size of the first part of the split | |
392 void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk, | |
393 size_t blk_size, | |
394 size_t left_blk_size) { | |
395 // Verify that the BOT shows [blk, blk + blk_size) to be one block. | |
396 verify_single_block(blk, blk_size); | |
397 // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size) | |
398 // is one single block. | |
399 assert(blk_size > 0, "Should be positive"); | |
400 assert(left_blk_size > 0, "Should be positive"); | |
401 assert(left_blk_size < blk_size, "Not a split"); | |
402 | |
403 // Start addresses of prefix block and suffix block. | |
404 HeapWord* pref_addr = blk; | |
405 HeapWord* suff_addr = blk + left_blk_size; | |
406 HeapWord* end_addr = blk + blk_size; | |
407 | |
408 // Indices for starts of prefix block and suffix block. | |
409 size_t pref_index = _array->index_for(pref_addr); | |
410 if (_array->address_for_index(pref_index) != pref_addr) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
411 // pref_addr does not begin pref_index |
0 | 412 pref_index++; |
413 } | |
414 | |
415 size_t suff_index = _array->index_for(suff_addr); | |
416 if (_array->address_for_index(suff_index) != suff_addr) { | |
417 // suff_addr does not begin suff_index | |
418 suff_index++; | |
419 } | |
420 | |
421 // Definition: A block B, denoted [B_start, B_end) __starts__ | |
422 // a card C, denoted [C_start, C_end), where C_start and C_end | |
423 // are the heap addresses that card C covers, iff | |
424 // B_start <= C_start < B_end. | |
425 // | |
426 // We say that a card C "is started by" a block B, iff | |
427 // B "starts" C. | |
428 // | |
429 // Note that the cardinality of the set of cards {C} | |
430 // started by a block B can be 0, 1, or more. | |
431 // | |
432 // Below, pref_index and suff_index are, respectively, the | |
433 // first (least) card indices that the prefix and suffix of | |
434 // the split start; end_index is one more than the index of | |
435 // the last (greatest) card that blk starts. | |
436 size_t end_index = _array->index_for(end_addr - 1) + 1; | |
437 | |
438 // Calculate the # cards that the prefix and suffix affect. | |
439 size_t num_pref_cards = suff_index - pref_index; | |
440 | |
441 size_t num_suff_cards = end_index - suff_index; | |
442 // Change the cards that need changing | |
443 if (num_suff_cards > 0) { | |
444 HeapWord* boundary = _array->address_for_index(suff_index); | |
445 // Set the offset card for suffix block | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
446 _array->set_offset_array(suff_index, boundary, suff_addr, true /* reducing */); |
0 | 447 // Change any further cards that need changing in the suffix |
448 if (num_pref_cards > 0) { | |
449 if (num_pref_cards >= num_suff_cards) { | |
450 // Unilaterally fix all of the suffix cards: closed card | |
451 // index interval in args below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
452 set_remainder_to_point_to_start_incl(suff_index + 1, end_index - 1, true /* reducing */); |
0 | 453 } else { |
454 // Unilaterally fix the first (num_pref_cards - 1) following | |
455 // the "offset card" in the suffix block. | |
456 set_remainder_to_point_to_start_incl(suff_index + 1, | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
457 suff_index + num_pref_cards - 1, true /* reducing */); |
0 | 458 // Fix the appropriate cards in the remainder of the |
459 // suffix block -- these are the last num_pref_cards | |
460 // cards in each power block of the "new" range plumbed | |
461 // from suff_addr. | |
462 bool more = true; | |
463 uint i = 1; | |
464 while (more && (i < N_powers)) { | |
465 size_t back_by = power_to_cards_back(i); | |
466 size_t right_index = suff_index + back_by - 1; | |
467 size_t left_index = right_index - num_pref_cards + 1; | |
468 if (right_index >= end_index - 1) { // last iteration | |
469 right_index = end_index - 1; | |
470 more = false; | |
471 } | |
472 if (back_by > num_pref_cards) { | |
473 // Fill in the remainder of this "power block", if it | |
474 // is non-null. | |
475 if (left_index <= right_index) { | |
476 _array->set_offset_array(left_index, right_index, | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
477 N_words + i - 1, true /* reducing */); |
0 | 478 } else { |
479 more = false; // we are done | |
480 } | |
481 i++; | |
482 break; | |
483 } | |
484 i++; | |
485 } | |
486 while (more && (i < N_powers)) { | |
487 size_t back_by = power_to_cards_back(i); | |
488 size_t right_index = suff_index + back_by - 1; | |
489 size_t left_index = right_index - num_pref_cards + 1; | |
490 if (right_index >= end_index - 1) { // last iteration | |
491 right_index = end_index - 1; | |
492 if (left_index > right_index) { | |
493 break; | |
494 } | |
495 more = false; | |
496 } | |
497 assert(left_index <= right_index, "Error"); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
498 _array->set_offset_array(left_index, right_index, N_words + i - 1, true /* reducing */); |
0 | 499 i++; |
500 } | |
501 } | |
502 } // else no more cards to fix in suffix | |
503 } // else nothing needs to be done | |
504 // Verify that we did the right thing | |
505 verify_single_block(pref_addr, left_blk_size); | |
506 verify_single_block(suff_addr, blk_size - left_blk_size); | |
507 } | |
508 | |
509 | |
510 // Mark the BOT such that if [blk_start, blk_end) straddles a card | |
511 // boundary, the card following the first such boundary is marked | |
512 // with the appropriate offset. | |
513 // NOTE: this method does _not_ adjust _unallocated_block or | |
514 // any cards subsequent to the first one. | |
515 void | |
516 BlockOffsetArrayNonContigSpace::mark_block(HeapWord* blk_start, | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
517 HeapWord* blk_end, bool reducing) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
518 do_block_internal(blk_start, blk_end, Action_mark, reducing); |
0 | 519 } |
520 | |
521 HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe( | |
522 const void* addr) const { | |
523 assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); | |
524 assert(_bottom <= addr && addr < _end, | |
525 "addr must be covered by this Array"); | |
526 // Must read this exactly once because it can be modified by parallel | |
527 // allocation. | |
528 HeapWord* ub = _unallocated_block; | |
529 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) { | |
530 assert(ub < _end, "tautology (see above)"); | |
531 return ub; | |
532 } | |
533 | |
534 // Otherwise, find the block start using the table. | |
535 size_t index = _array->index_for(addr); | |
536 HeapWord* q = _array->address_for_index(index); | |
537 | |
538 uint offset = _array->offset_array(index); // Extend u_char to uint. | |
539 while (offset >= N_words) { | |
540 // The excess of the offset from N_words indicates a power of Base | |
541 // to go back by. | |
542 size_t n_cards_back = entry_to_cards_back(offset); | |
543 q -= (N_words * n_cards_back); | |
544 assert(q >= _sp->bottom(), "Went below bottom!"); | |
545 index -= n_cards_back; | |
546 offset = _array->offset_array(index); | |
547 } | |
548 assert(offset < N_words, "offset too large"); | |
549 index--; | |
550 q -= offset; | |
551 HeapWord* n = q; | |
552 | |
553 while (n <= addr) { | |
554 debug_only(HeapWord* last = q); // for debugging | |
555 q = n; | |
556 n += _sp->block_size(n); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
557 assert(n > q, err_msg("Looping at: " INTPTR_FORMAT, n)); |
0 | 558 } |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
559 assert(q <= addr, err_msg("wrong order for current (" INTPTR_FORMAT ") <= arg (" INTPTR_FORMAT ")", q, addr)); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
560 assert(addr <= n, err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", addr, n)); |
0 | 561 return q; |
562 } | |
563 | |
564 HeapWord* BlockOffsetArrayNonContigSpace::block_start_careful( | |
565 const void* addr) const { | |
566 assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); | |
567 | |
568 assert(_bottom <= addr && addr < _end, | |
569 "addr must be covered by this Array"); | |
570 // Must read this exactly once because it can be modified by parallel | |
571 // allocation. | |
572 HeapWord* ub = _unallocated_block; | |
573 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) { | |
574 assert(ub < _end, "tautology (see above)"); | |
575 return ub; | |
576 } | |
577 | |
578 // Otherwise, find the block start using the table, but taking | |
579 // care (cf block_start_unsafe() above) not to parse any objects/blocks | |
580 // on the cards themsleves. | |
581 size_t index = _array->index_for(addr); | |
582 assert(_array->address_for_index(index) == addr, | |
583 "arg should be start of card"); | |
584 | |
585 HeapWord* q = (HeapWord*)addr; | |
586 uint offset; | |
587 do { | |
588 offset = _array->offset_array(index); | |
589 if (offset < N_words) { | |
590 q -= offset; | |
591 } else { | |
592 size_t n_cards_back = entry_to_cards_back(offset); | |
593 q -= (n_cards_back * N_words); | |
594 index -= n_cards_back; | |
595 } | |
596 } while (offset >= N_words); | |
597 assert(q <= addr, "block start should be to left of arg"); | |
598 return q; | |
599 } | |
600 | |
601 #ifndef PRODUCT | |
602 // Verification & debugging - ensure that the offset table reflects the fact | |
603 // that the block [blk_start, blk_end) or [blk, blk + size) is a | |
604 // single block of storage. NOTE: can't const this because of | |
605 // call to non-const do_block_internal() below. | |
606 void BlockOffsetArrayNonContigSpace::verify_single_block( | |
607 HeapWord* blk_start, HeapWord* blk_end) { | |
608 if (VerifyBlockOffsetArray) { | |
609 do_block_internal(blk_start, blk_end, Action_check); | |
610 } | |
611 } | |
612 | |
613 void BlockOffsetArrayNonContigSpace::verify_single_block( | |
614 HeapWord* blk, size_t size) { | |
615 verify_single_block(blk, blk + size); | |
616 } | |
617 | |
618 // Verify that the given block is before _unallocated_block | |
619 void BlockOffsetArrayNonContigSpace::verify_not_unallocated( | |
620 HeapWord* blk_start, HeapWord* blk_end) const { | |
621 if (BlockOffsetArrayUseUnallocatedBlock) { | |
622 assert(blk_start < blk_end, "Block inconsistency?"); | |
623 assert(blk_end <= _unallocated_block, "_unallocated_block problem"); | |
624 } | |
625 } | |
626 | |
627 void BlockOffsetArrayNonContigSpace::verify_not_unallocated( | |
628 HeapWord* blk, size_t size) const { | |
629 verify_not_unallocated(blk, blk + size); | |
630 } | |
631 #endif // PRODUCT | |
632 | |
633 size_t BlockOffsetArrayNonContigSpace::last_active_index() const { | |
634 if (_unallocated_block == _bottom) { | |
635 return 0; | |
636 } else { | |
637 return _array->index_for(_unallocated_block - 1); | |
638 } | |
639 } | |
640 | |
641 ////////////////////////////////////////////////////////////////////// | |
642 // BlockOffsetArrayContigSpace | |
643 ////////////////////////////////////////////////////////////////////// | |
644 | |
645 HeapWord* BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) const { | |
646 assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); | |
647 | |
648 // Otherwise, find the block start using the table. | |
649 assert(_bottom <= addr && addr < _end, | |
650 "addr must be covered by this Array"); | |
651 size_t index = _array->index_for(addr); | |
652 // We must make sure that the offset table entry we use is valid. If | |
653 // "addr" is past the end, start at the last known one and go forward. | |
654 index = MIN2(index, _next_offset_index-1); | |
655 HeapWord* q = _array->address_for_index(index); | |
656 | |
657 uint offset = _array->offset_array(index); // Extend u_char to uint. | |
658 while (offset > N_words) { | |
659 // The excess of the offset from N_words indicates a power of Base | |
660 // to go back by. | |
661 size_t n_cards_back = entry_to_cards_back(offset); | |
662 q -= (N_words * n_cards_back); | |
663 assert(q >= _sp->bottom(), "Went below bottom!"); | |
664 index -= n_cards_back; | |
665 offset = _array->offset_array(index); | |
666 } | |
667 while (offset == N_words) { | |
668 assert(q >= _sp->bottom(), "Went below bottom!"); | |
669 q -= N_words; | |
670 index--; | |
671 offset = _array->offset_array(index); | |
672 } | |
673 assert(offset < N_words, "offset too large"); | |
674 q -= offset; | |
675 HeapWord* n = q; | |
676 | |
677 while (n <= addr) { | |
678 debug_only(HeapWord* last = q); // for debugging | |
679 q = n; | |
680 n += _sp->block_size(n); | |
681 } | |
682 assert(q <= addr, "wrong order for current and arg"); | |
683 assert(addr <= n, "wrong order for arg and next"); | |
684 return q; | |
685 } | |
686 | |
687 // | |
688 // _next_offset_threshold | |
689 // | _next_offset_index | |
690 // v v | |
691 // +-------+-------+-------+-------+-------+ | |
692 // | i-1 | i | i+1 | i+2 | i+3 | | |
693 // +-------+-------+-------+-------+-------+ | |
694 // ( ^ ] | |
695 // block-start | |
696 // | |
697 | |
698 void BlockOffsetArrayContigSpace::alloc_block_work(HeapWord* blk_start, | |
699 HeapWord* blk_end) { | |
700 assert(blk_start != NULL && blk_end > blk_start, | |
701 "phantom block"); | |
702 assert(blk_end > _next_offset_threshold, | |
703 "should be past threshold"); | |
704 assert(blk_start <= _next_offset_threshold, | |
1489
cff162798819
6888953: some calls to function-like macros are missing semicolons
jcoomes
parents:
342
diff
changeset
|
705 "blk_start should be at or before threshold"); |
0 | 706 assert(pointer_delta(_next_offset_threshold, blk_start) <= N_words, |
707 "offset should be <= BlockOffsetSharedArray::N"); | |
708 assert(Universe::heap()->is_in_reserved(blk_start), | |
709 "reference must be into the heap"); | |
710 assert(Universe::heap()->is_in_reserved(blk_end-1), | |
711 "limit must be within the heap"); | |
712 assert(_next_offset_threshold == | |
713 _array->_reserved.start() + _next_offset_index*N_words, | |
714 "index must agree with threshold"); | |
715 | |
716 debug_only(size_t orig_next_offset_index = _next_offset_index;) | |
717 | |
718 // Mark the card that holds the offset into the block. Note | |
719 // that _next_offset_index and _next_offset_threshold are not | |
720 // updated until the end of this method. | |
721 _array->set_offset_array(_next_offset_index, | |
722 _next_offset_threshold, | |
723 blk_start); | |
724 | |
725 // We need to now mark the subsequent cards that this blk spans. | |
726 | |
727 // Index of card on which blk ends. | |
728 size_t end_index = _array->index_for(blk_end - 1); | |
729 | |
730 // Are there more cards left to be updated? | |
731 if (_next_offset_index + 1 <= end_index) { | |
732 HeapWord* rem_st = _array->address_for_index(_next_offset_index + 1); | |
733 // Calculate rem_end this way because end_index | |
734 // may be the last valid index in the covered region. | |
735 HeapWord* rem_end = _array->address_for_index(end_index) + N_words; | |
736 set_remainder_to_point_to_start(rem_st, rem_end); | |
737 } | |
738 | |
739 // _next_offset_index and _next_offset_threshold updated here. | |
740 _next_offset_index = end_index + 1; | |
741 // Calculate _next_offset_threshold this way because end_index | |
742 // may be the last valid index in the covered region. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
743 _next_offset_threshold = _array->address_for_index(end_index) + N_words; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1552
diff
changeset
|
744 assert(_next_offset_threshold >= blk_end, "Incorrect offset threshold"); |
0 | 745 |
746 #ifdef ASSERT | |
747 // The offset can be 0 if the block starts on a boundary. That | |
748 // is checked by an assertion above. | |
749 size_t start_index = _array->index_for(blk_start); | |
750 HeapWord* boundary = _array->address_for_index(start_index); | |
751 assert((_array->offset_array(orig_next_offset_index) == 0 && | |
752 blk_start == boundary) || | |
753 (_array->offset_array(orig_next_offset_index) > 0 && | |
754 _array->offset_array(orig_next_offset_index) <= N_words), | |
755 "offset array should have been set"); | |
756 for (size_t j = orig_next_offset_index + 1; j <= end_index; j++) { | |
757 assert(_array->offset_array(j) > 0 && | |
758 _array->offset_array(j) <= (u_char) (N_words+N_powers-1), | |
759 "offset array should have been set"); | |
760 } | |
761 #endif | |
762 } | |
763 | |
764 HeapWord* BlockOffsetArrayContigSpace::initialize_threshold() { | |
765 assert(!Universe::heap()->is_in_reserved(_array->_offset_array), | |
766 "just checking"); | |
767 _next_offset_index = _array->index_for(_bottom); | |
768 _next_offset_index++; | |
769 _next_offset_threshold = | |
770 _array->address_for_index(_next_offset_index); | |
771 return _next_offset_threshold; | |
772 } | |
773 | |
774 void BlockOffsetArrayContigSpace::zero_bottom_entry() { | |
775 assert(!Universe::heap()->is_in_reserved(_array->_offset_array), | |
776 "just checking"); | |
777 size_t bottom_index = _array->index_for(_bottom); | |
778 _array->set_offset_array(bottom_index, 0); | |
779 } | |
780 | |
781 | |
782 void BlockOffsetArrayContigSpace::serialize(SerializeOopClosure* soc) { | |
783 if (soc->reading()) { | |
784 // Null these values so that the serializer won't object to updating them. | |
785 _next_offset_threshold = NULL; | |
786 _next_offset_index = 0; | |
787 } | |
788 soc->do_ptr(&_next_offset_threshold); | |
789 soc->do_size_t(&_next_offset_index); | |
790 } | |
791 | |
792 size_t BlockOffsetArrayContigSpace::last_active_index() const { | |
793 size_t result = _next_offset_index - 1; | |
794 return result >= 0 ? result : 0; | |
795 } |