comparison src/share/vm/memory/blockOffsetTable.hpp @ 1716:be3f9c242c9d

6948538: CMS: BOT walkers can fall into object allocation and initialization cracks Summary: GC workers now recognize an intermediate transient state of blocks which are allocated but have not yet completed initialization. blk_start() calls do not attempt to determine the size of a block in the transient state, rather waiting for the block to become initialized so that it is safe to query its size. Audited and ensured the order of initialization of object fields (klass, free bit and size) to respect block state transition protocol. Also included some new assertion checking code enabled in debug mode. Reviewed-by: chrisphi, johnc, poonam
author ysr
date Mon, 16 Aug 2010 15:58:42 -0700
parents c18cbe5936b8
children 52f2bc645da5
comparison
equal deleted inserted replaced
1713:7fcd5f39bd7a 1716:be3f9c242c9d
1 /* 1 /*
2 * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
105 LogN_words = LogN - LogHeapWordSize, 105 LogN_words = LogN - LogHeapWordSize,
106 N_bytes = 1 << LogN, 106 N_bytes = 1 << LogN,
107 N_words = 1 << LogN_words 107 N_words = 1 << LogN_words
108 }; 108 };
109 109
110 bool _init_to_zero;
111
110 // The reserved region covered by the shared array. 112 // The reserved region covered by the shared array.
111 MemRegion _reserved; 113 MemRegion _reserved;
112 114
113 // End of the current committed region. 115 // End of the current committed region.
114 HeapWord* _end; 116 HeapWord* _end;
123 // For performance these have to devolve to array accesses in product builds. 125 // For performance these have to devolve to array accesses in product builds.
124 u_char offset_array(size_t index) const { 126 u_char offset_array(size_t index) const {
125 assert(index < _vs.committed_size(), "index out of range"); 127 assert(index < _vs.committed_size(), "index out of range");
126 return _offset_array[index]; 128 return _offset_array[index];
127 } 129 }
128 void set_offset_array(size_t index, u_char offset) { 130 // An assertion-checking helper method for the set_offset_array() methods below.
131 void check_reducing_assertion(bool reducing);
132
133 void set_offset_array(size_t index, u_char offset, bool reducing = false) {
134 check_reducing_assertion(reducing);
129 assert(index < _vs.committed_size(), "index out of range"); 135 assert(index < _vs.committed_size(), "index out of range");
136 assert(!reducing || _offset_array[index] >= offset, "Not reducing");
130 _offset_array[index] = offset; 137 _offset_array[index] = offset;
131 } 138 }
132 void set_offset_array(size_t index, HeapWord* high, HeapWord* low) { 139
140 void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) {
141 check_reducing_assertion(reducing);
133 assert(index < _vs.committed_size(), "index out of range"); 142 assert(index < _vs.committed_size(), "index out of range");
134 assert(high >= low, "addresses out of order"); 143 assert(high >= low, "addresses out of order");
135 assert(pointer_delta(high, low) <= N_words, "offset too large"); 144 assert(pointer_delta(high, low) <= N_words, "offset too large");
145 assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low),
146 "Not reducing");
136 _offset_array[index] = (u_char)pointer_delta(high, low); 147 _offset_array[index] = (u_char)pointer_delta(high, low);
137 } 148 }
138 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) { 149
150 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) {
151 check_reducing_assertion(reducing);
139 assert(index_for(right - 1) < _vs.committed_size(), 152 assert(index_for(right - 1) < _vs.committed_size(),
140 "right address out of range"); 153 "right address out of range");
141 assert(left < right, "Heap addresses out of order"); 154 assert(left < right, "Heap addresses out of order");
142 size_t num_cards = pointer_delta(right, left) >> LogN_words; 155 size_t num_cards = pointer_delta(right, left) >> LogN_words;
143 156
148 memset(&_offset_array[index_for(left)], offset, num_cards); 161 memset(&_offset_array[index_for(left)], offset, num_cards);
149 } else { 162 } else {
150 size_t i = index_for(left); 163 size_t i = index_for(left);
151 const size_t end = i + num_cards; 164 const size_t end = i + num_cards;
152 for (; i < end; i++) { 165 for (; i < end; i++) {
166 assert(!reducing || _offset_array[i] >= offset, "Not reducing");
153 _offset_array[i] = offset; 167 _offset_array[i] = offset;
154 } 168 }
155 } 169 }
156 } 170 }
157 171
158 void set_offset_array(size_t left, size_t right, u_char offset) { 172 void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {
173 check_reducing_assertion(reducing);
159 assert(right < _vs.committed_size(), "right address out of range"); 174 assert(right < _vs.committed_size(), "right address out of range");
160 assert(left <= right, "indexes out of order"); 175 assert(left <= right, "indexes out of order");
161 size_t num_cards = right - left + 1; 176 size_t num_cards = right - left + 1;
162 177
163 // Below, we may use an explicit loop instead of memset 178 // Below, we may use an explicit loop instead of memset
167 memset(&_offset_array[left], offset, num_cards); 182 memset(&_offset_array[left], offset, num_cards);
168 } else { 183 } else {
169 size_t i = left; 184 size_t i = left;
170 const size_t end = i + num_cards; 185 const size_t end = i + num_cards;
171 for (; i < end; i++) { 186 for (; i < end; i++) {
187 assert(!reducing || _offset_array[i] >= offset, "Not reducing");
172 _offset_array[i] = offset; 188 _offset_array[i] = offset;
173 } 189 }
174 } 190 }
175 } 191 }
176 192
209 // table. The "new_word_size" may not be larger than the size of the 225 // table. The "new_word_size" may not be larger than the size of the
210 // reserved region this table covers. 226 // reserved region this table covers.
211 void resize(size_t new_word_size); 227 void resize(size_t new_word_size);
212 228
213 void set_bottom(HeapWord* new_bottom); 229 void set_bottom(HeapWord* new_bottom);
230
231 // Whether entries should be initialized to zero. Used currently only for
232 // error checking.
233 void set_init_to_zero(bool val) { _init_to_zero = val; }
234 bool init_to_zero() { return _init_to_zero; }
214 235
215 // Updates all the BlockOffsetArray's sharing this shared array to 236 // Updates all the BlockOffsetArray's sharing this shared array to
216 // reflect the current "top"'s of their spaces. 237 // reflect the current "top"'s of their spaces.
217 void update_offset_arrays(); // Not yet implemented! 238 void update_offset_arrays(); // Not yet implemented!
218 239
283 304
284 // If true, array entries are initialized to 0; otherwise, they are 305 // If true, array entries are initialized to 0; otherwise, they are
285 // initialized to point backwards to the beginning of the covered region. 306 // initialized to point backwards to the beginning of the covered region.
286 bool _init_to_zero; 307 bool _init_to_zero;
287 308
309 // An assertion-checking helper method for the set_remainder*() methods below.
310 void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); }
311
288 // Sets the entries 312 // Sets the entries
289 // corresponding to the cards starting at "start" and ending at "end" 313 // corresponding to the cards starting at "start" and ending at "end"
290 // to point back to the card before "start": the interval [start, end) 314 // to point back to the card before "start": the interval [start, end)
291 // is right-open. 315 // is right-open. The last parameter, reducing, indicates whether the
292 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end); 316 // updates to individual entries always reduce the entry from a higher
317 // to a lower value. (For example this would hold true during a temporal
318 // regime during which only block splits were updating the BOT.
319 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing = false);
293 // Same as above, except that the args here are a card _index_ interval 320 // Same as above, except that the args here are a card _index_ interval
294 // that is closed: [start_index, end_index] 321 // that is closed: [start_index, end_index]
295 void set_remainder_to_point_to_start_incl(size_t start, size_t end); 322 void set_remainder_to_point_to_start_incl(size_t start, size_t end, bool reducing = false);
296 323
297 // A helper function for BOT adjustment/verification work 324 // A helper function for BOT adjustment/verification work
298 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action); 325 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action, bool reducing = false);
299 326
300 public: 327 public:
301 // The space may not have its bottom and top set yet, which is why the 328 // The space may not have its bottom and top set yet, which is why the
302 // region is passed as a parameter. If "init_to_zero" is true, the 329 // region is passed as a parameter. If "init_to_zero" is true, the
303 // elements of the array are initialized to zero. Otherwise, they are 330 // elements of the array are initialized to zero. Otherwise, they are
304 // initialized to point backwards to the beginning. 331 // initialized to point backwards to the beginning.
305 BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr, 332 BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr,
306 bool init_to_zero); 333 bool init_to_zero_);
307 334
308 // Note: this ought to be part of the constructor, but that would require 335 // Note: this ought to be part of the constructor, but that would require
309 // "this" to be passed as a parameter to a member constructor for 336 // "this" to be passed as a parameter to a member constructor for
310 // the containing concrete subtype of Space. 337 // the containing concrete subtype of Space.
311 // This would be legal C++, but MS VC++ doesn't allow it. 338 // This would be legal C++, but MS VC++ doesn't allow it.
356 } 383 }
357 384
358 // If true, initialize array slots with no allocated blocks to zero. 385 // If true, initialize array slots with no allocated blocks to zero.
359 // Otherwise, make them point back to the front. 386 // Otherwise, make them point back to the front.
360 bool init_to_zero() { return _init_to_zero; } 387 bool init_to_zero() { return _init_to_zero; }
388 // Corresponding setter
389 void set_init_to_zero(bool val) {
390 _init_to_zero = val;
391 assert(_array != NULL, "_array should be non-NULL");
392 _array->set_init_to_zero(val);
393 }
361 394
362 // Debugging 395 // Debugging
363 // Return the index of the last entry in the "active" region. 396 // Return the index of the last entry in the "active" region.
364 virtual size_t last_active_index() const = 0; 397 virtual size_t last_active_index() const = 0;
365 // Verify the block offset table 398 // Verify the block offset table
422 // Adjust BOT to show that it has a block in the range 455 // Adjust BOT to show that it has a block in the range
423 // [blk_start, blk_start + size). Only the first card 456 // [blk_start, blk_start + size). Only the first card
424 // of BOT is touched. It is assumed (and verified in the 457 // of BOT is touched. It is assumed (and verified in the
425 // non-product VM) that the remaining cards of the block 458 // non-product VM) that the remaining cards of the block
426 // are correct. 459 // are correct.
427 void mark_block(HeapWord* blk_start, HeapWord* blk_end); 460 void mark_block(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false);
428 void mark_block(HeapWord* blk, size_t size) { 461 void mark_block(HeapWord* blk, size_t size, bool reducing = false) {
429 mark_block(blk, blk + size); 462 mark_block(blk, blk + size, reducing);
430 } 463 }
431 464
432 // Adjust _unallocated_block to indicate that a particular 465 // Adjust _unallocated_block to indicate that a particular
433 // block has been newly allocated or freed. It is assumed (and 466 // block has been newly allocated or freed. It is assumed (and
434 // verified in the non-product VM) that the BOT is correct for 467 // verified in the non-product VM) that the BOT is correct for
435 // the given block. 468 // the given block.
436 void allocated(HeapWord* blk_start, HeapWord* blk_end) { 469 void allocated(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false) {
437 // Verify that the BOT shows [blk, blk + blk_size) to be one block. 470 // Verify that the BOT shows [blk, blk + blk_size) to be one block.
438 verify_single_block(blk_start, blk_end); 471 verify_single_block(blk_start, blk_end);
439 if (BlockOffsetArrayUseUnallocatedBlock) { 472 if (BlockOffsetArrayUseUnallocatedBlock) {
440 _unallocated_block = MAX2(_unallocated_block, blk_end); 473 _unallocated_block = MAX2(_unallocated_block, blk_end);
441 } 474 }
442 } 475 }
443 476
444 void allocated(HeapWord* blk, size_t size) { 477 void allocated(HeapWord* blk, size_t size, bool reducing = false) {
445 allocated(blk, blk + size); 478 allocated(blk, blk + size, reducing);
446 } 479 }
447 480
448 void freed(HeapWord* blk_start, HeapWord* blk_end); 481 void freed(HeapWord* blk_start, HeapWord* blk_end);
449 void freed(HeapWord* blk, size_t size) { 482 void freed(HeapWord* blk, size_t size);
450 freed(blk, blk + size);
451 }
452 483
453 HeapWord* block_start_unsafe(const void* addr) const; 484 HeapWord* block_start_unsafe(const void* addr) const;
454 485
455 // Requires "addr" to be the start of a card and returns the 486 // Requires "addr" to be the start of a card and returns the
456 // start of the block that contains the given address. 487 // start of the block that contains the given address.
457 HeapWord* block_start_careful(const void* addr) const; 488 HeapWord* block_start_careful(const void* addr) const;
458
459 489
460 // Verification & debugging: ensure that the offset table reflects 490 // Verification & debugging: ensure that the offset table reflects
461 // the fact that the block [blk_start, blk_end) or [blk, blk + size) 491 // the fact that the block [blk_start, blk_end) or [blk, blk + size)
462 // is a single block of storage. NOTE: can't const this because of 492 // is a single block of storage. NOTE: can't const this because of
463 // call to non-const do_block_internal() below. 493 // call to non-const do_block_internal() below.