comparison src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp @ 342:37f87013dfd8

6711316: Open source the Garbage-First garbage collector Summary: First mercurial integration of the code for the Garbage-First garbage collector. Reviewed-by: apetrusenko, iveresov, jmasa, sgoldman, tonyp, ysr
author ysr
date Thu, 05 Jun 2008 15:57:56 -0700
parents
children e0c09f7ec5c4
comparison
equal deleted inserted replaced
189:0b27f3512f9e 342:37f87013dfd8
1 /*
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_g1BlockOffsetTable.cpp.incl"
27
28 //////////////////////////////////////////////////////////////////////
29 // G1BlockOffsetSharedArray
30 //////////////////////////////////////////////////////////////////////
31
32 G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved,
33 size_t init_word_size) :
34 _reserved(reserved), _end(NULL)
35 {
36 size_t size = compute_size(reserved.word_size());
37 ReservedSpace rs(ReservedSpace::allocation_align_size_up(size));
38 if (!rs.is_reserved()) {
39 vm_exit_during_initialization("Could not reserve enough space for heap offset array");
40 }
41 if (!_vs.initialize(rs, 0)) {
42 vm_exit_during_initialization("Could not reserve enough space for heap offset array");
43 }
44 _offset_array = (u_char*)_vs.low_boundary();
45 resize(init_word_size);
46 if (TraceBlockOffsetTable) {
47 gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
48 gclog_or_tty->print_cr(" "
49 " rs.base(): " INTPTR_FORMAT
50 " rs.size(): " INTPTR_FORMAT
51 " rs end(): " INTPTR_FORMAT,
52 rs.base(), rs.size(), rs.base() + rs.size());
53 gclog_or_tty->print_cr(" "
54 " _vs.low_boundary(): " INTPTR_FORMAT
55 " _vs.high_boundary(): " INTPTR_FORMAT,
56 _vs.low_boundary(),
57 _vs.high_boundary());
58 }
59 }
60
61 void G1BlockOffsetSharedArray::resize(size_t new_word_size) {
62 assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
63 size_t new_size = compute_size(new_word_size);
64 size_t old_size = _vs.committed_size();
65 size_t delta;
66 char* high = _vs.high();
67 _end = _reserved.start() + new_word_size;
68 if (new_size > old_size) {
69 delta = ReservedSpace::page_align_size_up(new_size - old_size);
70 assert(delta > 0, "just checking");
71 if (!_vs.expand_by(delta)) {
72 // Do better than this for Merlin
73 vm_exit_out_of_memory(delta, "offset table expansion");
74 }
75 assert(_vs.high() == high + delta, "invalid expansion");
76 // Initialization of the contents is left to the
77 // G1BlockOffsetArray that uses it.
78 } else {
79 delta = ReservedSpace::page_align_size_down(old_size - new_size);
80 if (delta == 0) return;
81 _vs.shrink_by(delta);
82 assert(_vs.high() == high - delta, "invalid expansion");
83 }
84 }
85
86 bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
87 assert(p >= _reserved.start(), "just checking");
88 size_t delta = pointer_delta(p, _reserved.start());
89 return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
90 }
91
92
93 //////////////////////////////////////////////////////////////////////
94 // G1BlockOffsetArray
95 //////////////////////////////////////////////////////////////////////
96
97 G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
98 MemRegion mr, bool init_to_zero) :
99 G1BlockOffsetTable(mr.start(), mr.end()),
100 _unallocated_block(_bottom),
101 _array(array), _csp(NULL),
102 _init_to_zero(init_to_zero) {
103 assert(_bottom <= _end, "arguments out of order");
104 if (!_init_to_zero) {
105 // initialize cards to point back to mr.start()
106 set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
107 _array->set_offset_array(0, 0); // set first card to 0
108 }
109 }
110
111 void G1BlockOffsetArray::set_space(Space* sp) {
112 _sp = sp;
113 _csp = sp->toContiguousSpace();
114 }
115
116 // The arguments follow the normal convention of denoting
117 // a right-open interval: [start, end)
118 void
119 G1BlockOffsetArray:: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
120
121 if (start >= end) {
122 // The start address is equal to the end address (or to
123 // the right of the end address) so there are not cards
124 // that need to be updated..
125 return;
126 }
127
128 // Write the backskip value for each region.
129 //
130 // offset
131 // card 2nd 3rd
132 // | +- 1st | |
133 // v v v v
134 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
135 // |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ...
136 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
137 // 11 19 75
138 // 12
139 //
140 // offset card is the card that points to the start of an object
141 // x - offset value of offset card
142 // 1st - start of first logarithmic region
143 // 0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1
144 // 2nd - start of second logarithmic region
145 // 1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8
146 // 3rd - start of third logarithmic region
147 // 2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64
148 //
149 // integer below the block offset entry is an example of
150 // the index of the entry
151 //
152 // Given an address,
153 // Find the index for the address
154 // Find the block offset table entry
155 // Convert the entry to a back slide
156 // (e.g., with today's, offset = 0x81 =>
157 // back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8
158 // Move back N (e.g., 8) entries and repeat with the
159 // value of the new entry
160 //
161 size_t start_card = _array->index_for(start);
162 size_t end_card = _array->index_for(end-1);
163 assert(start ==_array->address_for_index(start_card), "Precondition");
164 assert(end ==_array->address_for_index(end_card)+N_words, "Precondition");
165 set_remainder_to_point_to_start_incl(start_card, end_card); // closed interval
166 }
167
168 // Unlike the normal convention in this code, the argument here denotes
169 // a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start()
170 // above.
171 void
172 G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card) {
173 if (start_card > end_card) {
174 return;
175 }
176 assert(start_card > _array->index_for(_bottom), "Cannot be first card");
177 assert(_array->offset_array(start_card-1) <= N_words,
178 "Offset card has an unexpected value");
179 size_t start_card_for_region = start_card;
180 u_char offset = max_jubyte;
181 for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
182 // -1 so that the the card with the actual offset is counted. Another -1
183 // so that the reach ends in this region and not at the start
184 // of the next.
185 size_t reach = start_card - 1 + (BlockOffsetArray::power_to_cards_back(i+1) - 1);
186 offset = N_words + i;
187 if (reach >= end_card) {
188 _array->set_offset_array(start_card_for_region, end_card, offset);
189 start_card_for_region = reach + 1;
190 break;
191 }
192 _array->set_offset_array(start_card_for_region, reach, offset);
193 start_card_for_region = reach + 1;
194 }
195 assert(start_card_for_region > end_card, "Sanity check");
196 DEBUG_ONLY(check_all_cards(start_card, end_card);)
197 }
198
199 // The block [blk_start, blk_end) has been allocated;
200 // adjust the block offset table to represent this information;
201 // right-open interval: [blk_start, blk_end)
202 void
203 G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
204 mark_block(blk_start, blk_end);
205 allocated(blk_start, blk_end);
206 }
207
208 // Adjust BOT to show that a previously whole block has been split
209 // into two.
210 void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size,
211 size_t left_blk_size) {
212 // Verify that the BOT shows [blk, blk + blk_size) to be one block.
213 verify_single_block(blk, blk_size);
214 // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
215 // is one single block.
216 mark_block(blk + left_blk_size, blk + blk_size);
217 }
218
219
220 // Action_mark - update the BOT for the block [blk_start, blk_end).
221 // Current typical use is for splitting a block.
222 // Action_single - udpate the BOT for an allocation.
223 // Action_verify - BOT verification.
224 void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
225 HeapWord* blk_end,
226 Action action) {
227 assert(Universe::heap()->is_in_reserved(blk_start),
228 "reference must be into the heap");
229 assert(Universe::heap()->is_in_reserved(blk_end-1),
230 "limit must be within the heap");
231 // This is optimized to make the test fast, assuming we only rarely
232 // cross boundaries.
233 uintptr_t end_ui = (uintptr_t)(blk_end - 1);
234 uintptr_t start_ui = (uintptr_t)blk_start;
235 // Calculate the last card boundary preceding end of blk
236 intptr_t boundary_before_end = (intptr_t)end_ui;
237 clear_bits(boundary_before_end, right_n_bits(LogN));
238 if (start_ui <= (uintptr_t)boundary_before_end) {
239 // blk starts at or crosses a boundary
240 // Calculate index of card on which blk begins
241 size_t start_index = _array->index_for(blk_start);
242 // Index of card on which blk ends
243 size_t end_index = _array->index_for(blk_end - 1);
244 // Start address of card on which blk begins
245 HeapWord* boundary = _array->address_for_index(start_index);
246 assert(boundary <= blk_start, "blk should start at or after boundary");
247 if (blk_start != boundary) {
248 // blk starts strictly after boundary
249 // adjust card boundary and start_index forward to next card
250 boundary += N_words;
251 start_index++;
252 }
253 assert(start_index <= end_index, "monotonicity of index_for()");
254 assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
255 switch (action) {
256 case Action_mark: {
257 if (init_to_zero()) {
258 _array->set_offset_array(start_index, boundary, blk_start);
259 break;
260 } // Else fall through to the next case
261 }
262 case Action_single: {
263 _array->set_offset_array(start_index, boundary, blk_start);
264 // We have finished marking the "offset card". We need to now
265 // mark the subsequent cards that this blk spans.
266 if (start_index < end_index) {
267 HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
268 HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
269 set_remainder_to_point_to_start(rem_st, rem_end);
270 }
271 break;
272 }
273 case Action_check: {
274 _array->check_offset_array(start_index, boundary, blk_start);
275 // We have finished checking the "offset card". We need to now
276 // check the subsequent cards that this blk spans.
277 check_all_cards(start_index + 1, end_index);
278 break;
279 }
280 default:
281 ShouldNotReachHere();
282 }
283 }
284 }
285
286 // The card-interval [start_card, end_card] is a closed interval; this
287 // is an expensive check -- use with care and only under protection of
288 // suitable flag.
289 void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
290
291 if (end_card < start_card) {
292 return;
293 }
294 guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card");
295 for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
296 u_char entry = _array->offset_array(c);
297 if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
298 guarantee(entry > N_words, "Should be in logarithmic region");
299 }
300 size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
301 size_t landing_card = c - backskip;
302 guarantee(landing_card >= (start_card - 1), "Inv");
303 if (landing_card >= start_card) {
304 guarantee(_array->offset_array(landing_card) <= entry, "monotonicity");
305 } else {
306 guarantee(landing_card == start_card - 1, "Tautology");
307 guarantee(_array->offset_array(landing_card) <= N_words, "Offset value");
308 }
309 }
310 }
311
312 // The range [blk_start, blk_end) represents a single contiguous block
313 // of storage; modify the block offset table to represent this
314 // information; Right-open interval: [blk_start, blk_end)
315 // NOTE: this method does _not_ adjust _unallocated_block.
316 void
317 G1BlockOffsetArray::single_block(HeapWord* blk_start, HeapWord* blk_end) {
318 do_block_internal(blk_start, blk_end, Action_single);
319 }
320
321 // Mark the BOT such that if [blk_start, blk_end) straddles a card
322 // boundary, the card following the first such boundary is marked
323 // with the appropriate offset.
324 // NOTE: this method does _not_ adjust _unallocated_block or
325 // any cards subsequent to the first one.
326 void
327 G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) {
328 do_block_internal(blk_start, blk_end, Action_mark);
329 }
330
331 void G1BlockOffsetArray::join_blocks(HeapWord* blk1, HeapWord* blk2) {
332 HeapWord* blk1_start = Universe::heap()->block_start(blk1);
333 HeapWord* blk2_start = Universe::heap()->block_start(blk2);
334 assert(blk1 == blk1_start && blk2 == blk2_start,
335 "Must be block starts.");
336 assert(blk1 + _sp->block_size(blk1) == blk2, "Must be contiguous.");
337 size_t blk1_start_index = _array->index_for(blk1);
338 size_t blk2_start_index = _array->index_for(blk2);
339 assert(blk1_start_index <= blk2_start_index, "sanity");
340 HeapWord* blk2_card_start = _array->address_for_index(blk2_start_index);
341 if (blk2 == blk2_card_start) {
342 // blk2 starts a card. Does blk1 start on the prevous card, or futher
343 // back?
344 assert(blk1_start_index < blk2_start_index, "must be lower card.");
345 if (blk1_start_index + 1 == blk2_start_index) {
346 // previous card; new value for blk2 card is size of blk1.
347 _array->set_offset_array(blk2_start_index, (u_char) _sp->block_size(blk1));
348 } else {
349 // Earlier card; go back a card.
350 _array->set_offset_array(blk2_start_index, N_words);
351 }
352 } else {
353 // blk2 does not start a card. Does it cross a card? If not, nothing
354 // to do.
355 size_t blk2_end_index =
356 _array->index_for(blk2 + _sp->block_size(blk2) - 1);
357 assert(blk2_end_index >= blk2_start_index, "sanity");
358 if (blk2_end_index > blk2_start_index) {
359 // Yes, it crosses a card. The value for the next card must change.
360 if (blk1_start_index + 1 == blk2_start_index) {
361 // previous card; new value for second blk2 card is size of blk1.
362 _array->set_offset_array(blk2_start_index + 1,
363 (u_char) _sp->block_size(blk1));
364 } else {
365 // Earlier card; go back a card.
366 _array->set_offset_array(blk2_start_index + 1, N_words);
367 }
368 }
369 }
370 }
371
372 HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
373 assert(_bottom <= addr && addr < _end,
374 "addr must be covered by this Array");
375 // Must read this exactly once because it can be modified by parallel
376 // allocation.
377 HeapWord* ub = _unallocated_block;
378 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
379 assert(ub < _end, "tautology (see above)");
380 return ub;
381 }
382 // Otherwise, find the block start using the table.
383 HeapWord* q = block_at_or_preceding(addr, false, 0);
384 return forward_to_block_containing_addr(q, addr);
385 }
386
387 // This duplicates a little code from the above: unavoidable.
388 HeapWord*
389 G1BlockOffsetArray::block_start_unsafe_const(const void* addr) const {
390 assert(_bottom <= addr && addr < _end,
391 "addr must be covered by this Array");
392 // Must read this exactly once because it can be modified by parallel
393 // allocation.
394 HeapWord* ub = _unallocated_block;
395 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
396 assert(ub < _end, "tautology (see above)");
397 return ub;
398 }
399 // Otherwise, find the block start using the table.
400 HeapWord* q = block_at_or_preceding(addr, false, 0);
401 HeapWord* n = q + _sp->block_size(q);
402 return forward_to_block_containing_addr_const(q, n, addr);
403 }
404
405
406 HeapWord*
407 G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
408 HeapWord* n,
409 const void* addr) {
410 // We're not in the normal case. We need to handle an important subcase
411 // here: LAB allocation. An allocation previously recorded in the
412 // offset table was actually a lab allocation, and was divided into
413 // several objects subsequently. Fix this situation as we answer the
414 // query, by updating entries as we cross them.
415 size_t next_index = _array->index_for(n) + 1;
416 HeapWord* next_boundary = _array->address_for_index(next_index);
417 if (csp() != NULL) {
418 if (addr >= csp()->top()) return csp()->top();
419 while (next_boundary < addr) {
420 while (n <= next_boundary) {
421 q = n;
422 oop obj = oop(q);
423 if (obj->klass() == NULL) return q;
424 n += obj->size();
425 }
426 assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
427 // [q, n) is the block that crosses the boundary.
428 alloc_block_work2(&next_boundary, &next_index, q, n);
429 }
430 } else {
431 while (next_boundary < addr) {
432 while (n <= next_boundary) {
433 q = n;
434 oop obj = oop(q);
435 if (obj->klass() == NULL) return q;
436 n += _sp->block_size(q);
437 }
438 assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
439 // [q, n) is the block that crosses the boundary.
440 alloc_block_work2(&next_boundary, &next_index, q, n);
441 }
442 }
443 return forward_to_block_containing_addr_const(q, n, addr);
444 }
445
446 HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
447 assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
448
449 assert(_bottom <= addr && addr < _end,
450 "addr must be covered by this Array");
451 // Must read this exactly once because it can be modified by parallel
452 // allocation.
453 HeapWord* ub = _unallocated_block;
454 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
455 assert(ub < _end, "tautology (see above)");
456 return ub;
457 }
458
459 // Otherwise, find the block start using the table, but taking
460 // care (cf block_start_unsafe() above) not to parse any objects/blocks
461 // on the cards themsleves.
462 size_t index = _array->index_for(addr);
463 assert(_array->address_for_index(index) == addr,
464 "arg should be start of card");
465
466 HeapWord* q = (HeapWord*)addr;
467 uint offset;
468 do {
469 offset = _array->offset_array(index--);
470 q -= offset;
471 } while (offset == N_words);
472 assert(q <= addr, "block start should be to left of arg");
473 return q;
474 }
475
476 // Note that the committed size of the covered space may have changed,
477 // so the table size might also wish to change.
478 void G1BlockOffsetArray::resize(size_t new_word_size) {
479 HeapWord* new_end = _bottom + new_word_size;
480 if (_end < new_end && !init_to_zero()) {
481 // verify that the old and new boundaries are also card boundaries
482 assert(_array->is_card_boundary(_end),
483 "_end not a card boundary");
484 assert(_array->is_card_boundary(new_end),
485 "new _end would not be a card boundary");
486 // set all the newly added cards
487 _array->set_offset_array(_end, new_end, N_words);
488 }
489 _end = new_end; // update _end
490 }
491
492 void G1BlockOffsetArray::set_region(MemRegion mr) {
493 _bottom = mr.start();
494 _end = mr.end();
495 }
496
497 //
498 // threshold_
499 // | _index_
500 // v v
501 // +-------+-------+-------+-------+-------+
502 // | i-1 | i | i+1 | i+2 | i+3 |
503 // +-------+-------+-------+-------+-------+
504 // ( ^ ]
505 // block-start
506 //
507 void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_,
508 HeapWord* blk_start, HeapWord* blk_end) {
509 // For efficiency, do copy-in/copy-out.
510 HeapWord* threshold = *threshold_;
511 size_t index = *index_;
512
513 assert(blk_start != NULL && blk_end > blk_start,
514 "phantom block");
515 assert(blk_end > threshold, "should be past threshold");
516 assert(blk_start <= threshold, "blk_start should be at or before threshold")
517 assert(pointer_delta(threshold, blk_start) <= N_words,
518 "offset should be <= BlockOffsetSharedArray::N");
519 assert(Universe::heap()->is_in_reserved(blk_start),
520 "reference must be into the heap");
521 assert(Universe::heap()->is_in_reserved(blk_end-1),
522 "limit must be within the heap");
523 assert(threshold == _array->_reserved.start() + index*N_words,
524 "index must agree with threshold");
525
526 DEBUG_ONLY(size_t orig_index = index;)
527
528 // Mark the card that holds the offset into the block. Note
529 // that _next_offset_index and _next_offset_threshold are not
530 // updated until the end of this method.
531 _array->set_offset_array(index, threshold, blk_start);
532
533 // We need to now mark the subsequent cards that this blk spans.
534
535 // Index of card on which blk ends.
536 size_t end_index = _array->index_for(blk_end - 1);
537
538 // Are there more cards left to be updated?
539 if (index + 1 <= end_index) {
540 HeapWord* rem_st = _array->address_for_index(index + 1);
541 // Calculate rem_end this way because end_index
542 // may be the last valid index in the covered region.
543 HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
544 set_remainder_to_point_to_start(rem_st, rem_end);
545 }
546
547 index = end_index + 1;
548 // Calculate threshold_ this way because end_index
549 // may be the last valid index in the covered region.
550 threshold = _array->address_for_index(end_index) + N_words;
551 assert(threshold >= blk_end, "Incorrect offset threshold");
552
553 // index_ and threshold_ updated here.
554 *threshold_ = threshold;
555 *index_ = index;
556
557 #ifdef ASSERT
558 // The offset can be 0 if the block starts on a boundary. That
559 // is checked by an assertion above.
560 size_t start_index = _array->index_for(blk_start);
561 HeapWord* boundary = _array->address_for_index(start_index);
562 assert((_array->offset_array(orig_index) == 0 &&
563 blk_start == boundary) ||
564 (_array->offset_array(orig_index) > 0 &&
565 _array->offset_array(orig_index) <= N_words),
566 "offset array should have been set");
567 for (size_t j = orig_index + 1; j <= end_index; j++) {
568 assert(_array->offset_array(j) > 0 &&
569 _array->offset_array(j) <=
570 (u_char) (N_words+BlockOffsetArray::N_powers-1),
571 "offset array should have been set");
572 }
573 #endif
574 }
575
576 //////////////////////////////////////////////////////////////////////
577 // G1BlockOffsetArrayContigSpace
578 //////////////////////////////////////////////////////////////////////
579
580 HeapWord*
581 G1BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) {
582 assert(_bottom <= addr && addr < _end,
583 "addr must be covered by this Array");
584 HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
585 return forward_to_block_containing_addr(q, addr);
586 }
587
588 HeapWord*
589 G1BlockOffsetArrayContigSpace::
590 block_start_unsafe_const(const void* addr) const {
591 assert(_bottom <= addr && addr < _end,
592 "addr must be covered by this Array");
593 HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
594 HeapWord* n = q + _sp->block_size(q);
595 return forward_to_block_containing_addr_const(q, n, addr);
596 }
597
598 G1BlockOffsetArrayContigSpace::
599 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
600 MemRegion mr) :
601 G1BlockOffsetArray(array, mr, true)
602 {
603 _next_offset_threshold = NULL;
604 _next_offset_index = 0;
605 }
606
607 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
608 assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
609 "just checking");
610 _next_offset_index = _array->index_for(_bottom);
611 _next_offset_index++;
612 _next_offset_threshold =
613 _array->address_for_index(_next_offset_index);
614 return _next_offset_threshold;
615 }
616
617 void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
618 assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
619 "just checking");
620 size_t bottom_index = _array->index_for(_bottom);
621 assert(_array->address_for_index(bottom_index) == _bottom,
622 "Precondition of call");
623 _array->set_offset_array(bottom_index, 0);
624 }