Mercurial > hg > truffle
comparison src/share/vm/memory/blockOffsetTable.hpp @ 0:a61af66fc99e jdk7-b24
Initial load
author | duke |
---|---|
date | Sat, 01 Dec 2007 00:00:00 +0000 |
parents | |
children | 387a62b4be60 37f87013dfd8 |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:a61af66fc99e |
---|---|
1 /* | |
2 * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // The CollectedHeap type requires subtypes to implement a method | |
26 // "block_start". For some subtypes, notably generational | |
27 // systems using card-table-based write barriers, the efficiency of this | |
28 // operation may be important. Implementations of the "BlockOffsetArray" | |
29 // class may be useful in providing such efficient implementations. | |
30 // | |
31 // BlockOffsetTable (abstract) | |
32 // - BlockOffsetArray (abstract) | |
33 // - BlockOffsetArrayNonContigSpace | |
34 // - BlockOffsetArrayContigSpace | |
35 // | |
36 | |
37 class ContiguousSpace; | |
38 class SerializeOopClosure; | |
39 | |
40 ////////////////////////////////////////////////////////////////////////// | |
41 // The BlockOffsetTable "interface" | |
42 ////////////////////////////////////////////////////////////////////////// | |
43 class BlockOffsetTable VALUE_OBJ_CLASS_SPEC { | |
44 friend class VMStructs; | |
45 protected: | |
46 // These members describe the region covered by the table. | |
47 | |
48 // The space this table is covering. | |
49 HeapWord* _bottom; // == reserved.start | |
50 HeapWord* _end; // End of currently allocated region. | |
51 | |
52 public: | |
53 // Initialize the table to cover the given space. | |
54 // The contents of the initial table are undefined. | |
55 BlockOffsetTable(HeapWord* bottom, HeapWord* end): | |
56 _bottom(bottom), _end(end) { | |
57 assert(_bottom <= _end, "arguments out of order"); | |
58 } | |
59 | |
60 // Note that the committed size of the covered space may have changed, | |
61 // so the table size might also wish to change. | |
62 virtual void resize(size_t new_word_size) = 0; | |
63 | |
64 virtual void set_bottom(HeapWord* new_bottom) { | |
65 assert(new_bottom <= _end, "new_bottom > _end"); | |
66 _bottom = new_bottom; | |
67 resize(pointer_delta(_end, _bottom)); | |
68 } | |
69 | |
70 // Requires "addr" to be contained by a block, and returns the address of | |
71 // the start of that block. | |
72 virtual HeapWord* block_start_unsafe(const void* addr) const = 0; | |
73 | |
74 // Returns the address of the start of the block containing "addr", or | |
75 // else "null" if it is covered by no block. | |
76 HeapWord* block_start(const void* addr) const; | |
77 }; | |
78 | |
79 ////////////////////////////////////////////////////////////////////////// | |
80 // One implementation of "BlockOffsetTable," the BlockOffsetArray, | |
81 // divides the covered region into "N"-word subregions (where | |
82 // "N" = 2^"LogN". An array with an entry for each such subregion | |
83 // indicates how far back one must go to find the start of the | |
84 // chunk that includes the first word of the subregion. | |
85 // | |
86 // Each BlockOffsetArray is owned by a Space. However, the actual array | |
87 // may be shared by several BlockOffsetArrays; this is useful | |
88 // when a single resizable area (such as a generation) is divided up into | |
89 // several spaces in which contiguous allocation takes place. (Consider, | |
90 // for example, the garbage-first generation.) | |
91 | |
92 // Here is the shared array type. | |
93 ////////////////////////////////////////////////////////////////////////// | |
94 // BlockOffsetSharedArray | |
95 ////////////////////////////////////////////////////////////////////////// | |
96 class BlockOffsetSharedArray: public CHeapObj { | |
97 friend class BlockOffsetArray; | |
98 friend class BlockOffsetArrayNonContigSpace; | |
99 friend class BlockOffsetArrayContigSpace; | |
100 friend class VMStructs; | |
101 | |
102 private: | |
103 enum SomePrivateConstants { | |
104 LogN = 9, | |
105 LogN_words = LogN - LogHeapWordSize, | |
106 N_bytes = 1 << LogN, | |
107 N_words = 1 << LogN_words | |
108 }; | |
109 | |
110 // The reserved region covered by the shared array. | |
111 MemRegion _reserved; | |
112 | |
113 // End of the current committed region. | |
114 HeapWord* _end; | |
115 | |
116 // Array for keeping offsets for retrieving object start fast given an | |
117 // address. | |
118 VirtualSpace _vs; | |
119 u_char* _offset_array; // byte array keeping backwards offsets | |
120 | |
121 protected: | |
122 // Bounds checking accessors: | |
123 // For performance these have to devolve to array accesses in product builds. | |
124 u_char offset_array(size_t index) const { | |
125 assert(index < _vs.committed_size(), "index out of range"); | |
126 return _offset_array[index]; | |
127 } | |
128 void set_offset_array(size_t index, u_char offset) { | |
129 assert(index < _vs.committed_size(), "index out of range"); | |
130 _offset_array[index] = offset; | |
131 } | |
132 void set_offset_array(size_t index, HeapWord* high, HeapWord* low) { | |
133 assert(index < _vs.committed_size(), "index out of range"); | |
134 assert(high >= low, "addresses out of order"); | |
135 assert(pointer_delta(high, low) <= N_words, "offset too large"); | |
136 _offset_array[index] = (u_char)pointer_delta(high, low); | |
137 } | |
138 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) { | |
139 assert(index_for(right - 1) < _vs.committed_size(), | |
140 "right address out of range"); | |
141 assert(left < right, "Heap addresses out of order"); | |
142 size_t num_cards = pointer_delta(right, left) >> LogN_words; | |
143 memset(&_offset_array[index_for(left)], offset, num_cards); | |
144 } | |
145 | |
146 void set_offset_array(size_t left, size_t right, u_char offset) { | |
147 assert(right < _vs.committed_size(), "right address out of range"); | |
148 assert(left <= right, "indexes out of order"); | |
149 size_t num_cards = right - left + 1; | |
150 memset(&_offset_array[left], offset, num_cards); | |
151 } | |
152 | |
153 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const { | |
154 assert(index < _vs.committed_size(), "index out of range"); | |
155 assert(high >= low, "addresses out of order"); | |
156 assert(pointer_delta(high, low) <= N_words, "offset too large"); | |
157 assert(_offset_array[index] == pointer_delta(high, low), | |
158 "Wrong offset"); | |
159 } | |
160 | |
161 bool is_card_boundary(HeapWord* p) const; | |
162 | |
163 // Return the number of slots needed for an offset array | |
164 // that covers mem_region_words words. | |
165 // We always add an extra slot because if an object | |
166 // ends on a card boundary we put a 0 in the next | |
167 // offset array slot, so we want that slot always | |
168 // to be reserved. | |
169 | |
170 size_t compute_size(size_t mem_region_words) { | |
171 size_t number_of_slots = (mem_region_words / N_words) + 1; | |
172 return ReservedSpace::allocation_align_size_up(number_of_slots); | |
173 } | |
174 | |
175 public: | |
176 // Initialize the table to cover from "base" to (at least) | |
177 // "base + init_word_size". In the future, the table may be expanded | |
178 // (see "resize" below) up to the size of "_reserved" (which must be at | |
179 // least "init_word_size".) The contents of the initial table are | |
180 // undefined; it is the responsibility of the constituent | |
181 // BlockOffsetTable(s) to initialize cards. | |
182 BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size); | |
183 | |
184 // Notes a change in the committed size of the region covered by the | |
185 // table. The "new_word_size" may not be larger than the size of the | |
186 // reserved region this table covers. | |
187 void resize(size_t new_word_size); | |
188 | |
189 void set_bottom(HeapWord* new_bottom); | |
190 | |
191 // Updates all the BlockOffsetArray's sharing this shared array to | |
192 // reflect the current "top"'s of their spaces. | |
193 void update_offset_arrays(); // Not yet implemented! | |
194 | |
195 // Return the appropriate index into "_offset_array" for "p". | |
196 size_t index_for(const void* p) const; | |
197 | |
198 // Return the address indicating the start of the region corresponding to | |
199 // "index" in "_offset_array". | |
200 HeapWord* address_for_index(size_t index) const; | |
201 | |
202 // Shared space support | |
203 void serialize(SerializeOopClosure* soc, HeapWord* start, HeapWord* end); | |
204 }; | |
205 | |
206 ////////////////////////////////////////////////////////////////////////// | |
207 // The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray. | |
208 ////////////////////////////////////////////////////////////////////////// | |
209 class BlockOffsetArray: public BlockOffsetTable { | |
210 friend class VMStructs; | |
211 protected: | |
212 // The following enums are used by do_block_internal() below | |
213 enum Action { | |
214 Action_single, // BOT records a single block (see single_block()) | |
215 Action_mark, // BOT marks the start of a block (see mark_block()) | |
216 Action_check // Check that BOT records block correctly | |
217 // (see verify_single_block()). | |
218 }; | |
219 | |
220 enum SomePrivateConstants { | |
221 N_words = BlockOffsetSharedArray::N_words, | |
222 LogN = BlockOffsetSharedArray::LogN, | |
223 // entries "e" of at least N_words mean "go back by Base^(e-N_words)." | |
224 // All entries are less than "N_words + N_powers". | |
225 LogBase = 4, | |
226 Base = (1 << LogBase), | |
227 N_powers = 14 | |
228 }; | |
229 | |
230 static size_t power_to_cards_back(uint i) { | |
231 return 1 << (LogBase * i); | |
232 } | |
233 static size_t power_to_words_back(uint i) { | |
234 return power_to_cards_back(i) * N_words; | |
235 } | |
236 static size_t entry_to_cards_back(u_char entry) { | |
237 assert(entry >= N_words, "Precondition"); | |
238 return power_to_cards_back(entry - N_words); | |
239 } | |
240 static size_t entry_to_words_back(u_char entry) { | |
241 assert(entry >= N_words, "Precondition"); | |
242 return power_to_words_back(entry - N_words); | |
243 } | |
244 | |
245 // The shared array, which is shared with other BlockOffsetArray's | |
246 // corresponding to different spaces within a generation or span of | |
247 // memory. | |
248 BlockOffsetSharedArray* _array; | |
249 | |
250 // The space that owns this subregion. | |
251 Space* _sp; | |
252 | |
253 // If true, array entries are initialized to 0; otherwise, they are | |
254 // initialized to point backwards to the beginning of the covered region. | |
255 bool _init_to_zero; | |
256 | |
257 // Sets the entries | |
258 // corresponding to the cards starting at "start" and ending at "end" | |
259 // to point back to the card before "start": the interval [start, end) | |
260 // is right-open. | |
261 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end); | |
262 // Same as above, except that the args here are a card _index_ interval | |
263 // that is closed: [start_index, end_index] | |
264 void set_remainder_to_point_to_start_incl(size_t start, size_t end); | |
265 | |
266 // A helper function for BOT adjustment/verification work | |
267 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action); | |
268 | |
269 public: | |
270 // The space may not have its bottom and top set yet, which is why the | |
271 // region is passed as a parameter. If "init_to_zero" is true, the | |
272 // elements of the array are initialized to zero. Otherwise, they are | |
273 // initialized to point backwards to the beginning. | |
274 BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr, | |
275 bool init_to_zero); | |
276 | |
277 // Note: this ought to be part of the constructor, but that would require | |
278 // "this" to be passed as a parameter to a member constructor for | |
279 // the containing concrete subtype of Space. | |
280 // This would be legal C++, but MS VC++ doesn't allow it. | |
281 void set_space(Space* sp) { _sp = sp; } | |
282 | |
283 // Resets the covered region to the given "mr". | |
284 void set_region(MemRegion mr) { | |
285 _bottom = mr.start(); | |
286 _end = mr.end(); | |
287 } | |
288 | |
289 // Note that the committed size of the covered space may have changed, | |
290 // so the table size might also wish to change. | |
291 virtual void resize(size_t new_word_size) { | |
292 HeapWord* new_end = _bottom + new_word_size; | |
293 if (_end < new_end && !init_to_zero()) { | |
294 // verify that the old and new boundaries are also card boundaries | |
295 assert(_array->is_card_boundary(_end), | |
296 "_end not a card boundary"); | |
297 assert(_array->is_card_boundary(new_end), | |
298 "new _end would not be a card boundary"); | |
299 // set all the newly added cards | |
300 _array->set_offset_array(_end, new_end, N_words); | |
301 } | |
302 _end = new_end; // update _end | |
303 } | |
304 | |
305 // Adjust the BOT to show that it has a single block in the | |
306 // range [blk_start, blk_start + size). All necessary BOT | |
307 // cards are adjusted, but _unallocated_block isn't. | |
308 void single_block(HeapWord* blk_start, HeapWord* blk_end); | |
309 void single_block(HeapWord* blk, size_t size) { | |
310 single_block(blk, blk + size); | |
311 } | |
312 | |
313 // When the alloc_block() call returns, the block offset table should | |
314 // have enough information such that any subsequent block_start() call | |
315 // with an argument equal to an address that is within the range | |
316 // [blk_start, blk_end) would return the value blk_start, provided | |
317 // there have been no calls in between that reset this information | |
318 // (e.g. see BlockOffsetArrayNonContigSpace::single_block() call | |
319 // for an appropriate range covering the said interval). | |
320 // These methods expect to be called with [blk_start, blk_end) | |
321 // representing a block of memory in the heap. | |
322 virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end); | |
323 void alloc_block(HeapWord* blk, size_t size) { | |
324 alloc_block(blk, blk + size); | |
325 } | |
326 | |
327 // If true, initialize array slots with no allocated blocks to zero. | |
328 // Otherwise, make them point back to the front. | |
329 bool init_to_zero() { return _init_to_zero; } | |
330 | |
331 // Debugging | |
332 // Return the index of the last entry in the "active" region. | |
333 virtual size_t last_active_index() const = 0; | |
334 // Verify the block offset table | |
335 void verify() const; | |
336 void check_all_cards(size_t left_card, size_t right_card) const; | |
337 }; | |
338 | |
339 //////////////////////////////////////////////////////////////////////////// | |
340 // A subtype of BlockOffsetArray that takes advantage of the fact | |
341 // that its underlying space is a NonContiguousSpace, so that some | |
342 // specialized interfaces can be made available for spaces that | |
343 // manipulate the table. | |
344 //////////////////////////////////////////////////////////////////////////// | |
345 class BlockOffsetArrayNonContigSpace: public BlockOffsetArray { | |
346 friend class VMStructs; | |
347 private: | |
348 // The portion [_unallocated_block, _sp.end()) of the space that | |
349 // is a single block known not to contain any objects. | |
350 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag. | |
351 HeapWord* _unallocated_block; | |
352 | |
353 public: | |
354 BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr): | |
355 BlockOffsetArray(array, mr, false), | |
356 _unallocated_block(_bottom) { } | |
357 | |
358 // accessor | |
359 HeapWord* unallocated_block() const { | |
360 assert(BlockOffsetArrayUseUnallocatedBlock, | |
361 "_unallocated_block is not being maintained"); | |
362 return _unallocated_block; | |
363 } | |
364 | |
365 void set_unallocated_block(HeapWord* block) { | |
366 assert(BlockOffsetArrayUseUnallocatedBlock, | |
367 "_unallocated_block is not being maintained"); | |
368 assert(block >= _bottom && block <= _end, "out of range"); | |
369 _unallocated_block = block; | |
370 } | |
371 | |
372 // These methods expect to be called with [blk_start, blk_end) | |
373 // representing a block of memory in the heap. | |
374 void alloc_block(HeapWord* blk_start, HeapWord* blk_end); | |
375 void alloc_block(HeapWord* blk, size_t size) { | |
376 alloc_block(blk, blk + size); | |
377 } | |
378 | |
379 // The following methods are useful and optimized for a | |
380 // non-contiguous space. | |
381 | |
382 // Given a block [blk_start, blk_start + full_blk_size), and | |
383 // a left_blk_size < full_blk_size, adjust the BOT to show two | |
384 // blocks [blk_start, blk_start + left_blk_size) and | |
385 // [blk_start + left_blk_size, blk_start + full_blk_size). | |
386 // It is assumed (and verified in the non-product VM) that the | |
387 // BOT was correct for the original block. | |
388 void split_block(HeapWord* blk_start, size_t full_blk_size, | |
389 size_t left_blk_size); | |
390 | |
391 // Adjust BOT to show that it has a block in the range | |
392 // [blk_start, blk_start + size). Only the first card | |
393 // of BOT is touched. It is assumed (and verified in the | |
394 // non-product VM) that the remaining cards of the block | |
395 // are correct. | |
396 void mark_block(HeapWord* blk_start, HeapWord* blk_end); | |
397 void mark_block(HeapWord* blk, size_t size) { | |
398 mark_block(blk, blk + size); | |
399 } | |
400 | |
401 // Adjust _unallocated_block to indicate that a particular | |
402 // block has been newly allocated or freed. It is assumed (and | |
403 // verified in the non-product VM) that the BOT is correct for | |
404 // the given block. | |
405 void allocated(HeapWord* blk_start, HeapWord* blk_end) { | |
406 // Verify that the BOT shows [blk, blk + blk_size) to be one block. | |
407 verify_single_block(blk_start, blk_end); | |
408 if (BlockOffsetArrayUseUnallocatedBlock) { | |
409 _unallocated_block = MAX2(_unallocated_block, blk_end); | |
410 } | |
411 } | |
412 | |
413 void allocated(HeapWord* blk, size_t size) { | |
414 allocated(blk, blk + size); | |
415 } | |
416 | |
417 void freed(HeapWord* blk_start, HeapWord* blk_end); | |
418 void freed(HeapWord* blk, size_t size) { | |
419 freed(blk, blk + size); | |
420 } | |
421 | |
422 HeapWord* block_start_unsafe(const void* addr) const; | |
423 | |
424 // Requires "addr" to be the start of a card and returns the | |
425 // start of the block that contains the given address. | |
426 HeapWord* block_start_careful(const void* addr) const; | |
427 | |
428 | |
429 // Verification & debugging: ensure that the offset table reflects | |
430 // the fact that the block [blk_start, blk_end) or [blk, blk + size) | |
431 // is a single block of storage. NOTE: can't const this because of | |
432 // call to non-const do_block_internal() below. | |
433 void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) | |
434 PRODUCT_RETURN; | |
435 void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN; | |
436 | |
437 // Verify that the given block is before _unallocated_block | |
438 void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end) | |
439 const PRODUCT_RETURN; | |
440 void verify_not_unallocated(HeapWord* blk, size_t size) | |
441 const PRODUCT_RETURN; | |
442 | |
443 // Debugging support | |
444 virtual size_t last_active_index() const; | |
445 }; | |
446 | |
447 //////////////////////////////////////////////////////////////////////////// | |
448 // A subtype of BlockOffsetArray that takes advantage of the fact | |
449 // that its underlying space is a ContiguousSpace, so that its "active" | |
450 // region can be more efficiently tracked (than for a non-contiguous space). | |
451 //////////////////////////////////////////////////////////////////////////// | |
452 class BlockOffsetArrayContigSpace: public BlockOffsetArray { | |
453 friend class VMStructs; | |
454 private: | |
455 // allocation boundary at which offset array must be updated | |
456 HeapWord* _next_offset_threshold; | |
457 size_t _next_offset_index; // index corresponding to that boundary | |
458 | |
459 // Work function when allocation start crosses threshold. | |
460 void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end); | |
461 | |
462 public: | |
463 BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr): | |
464 BlockOffsetArray(array, mr, true) { | |
465 _next_offset_threshold = NULL; | |
466 _next_offset_index = 0; | |
467 } | |
468 | |
469 void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); } | |
470 | |
471 // Initialize the threshold for an empty heap. | |
472 HeapWord* initialize_threshold(); | |
473 // Zero out the entry for _bottom (offset will be zero) | |
474 void zero_bottom_entry(); | |
475 | |
476 // Return the next threshold, the point at which the table should be | |
477 // updated. | |
478 HeapWord* threshold() const { return _next_offset_threshold; } | |
479 | |
480 // In general, these methods expect to be called with | |
481 // [blk_start, blk_end) representing a block of memory in the heap. | |
482 // In this implementation, however, we are OK even if blk_start and/or | |
483 // blk_end are NULL because NULL is represented as 0, and thus | |
484 // never exceeds the "_next_offset_threshold". | |
485 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) { | |
486 if (blk_end > _next_offset_threshold) { | |
487 alloc_block_work(blk_start, blk_end); | |
488 } | |
489 } | |
490 void alloc_block(HeapWord* blk, size_t size) { | |
491 alloc_block(blk, blk + size); | |
492 } | |
493 | |
494 HeapWord* block_start_unsafe(const void* addr) const; | |
495 | |
496 void serialize(SerializeOopClosure* soc); | |
497 | |
498 // Debugging support | |
499 virtual size_t last_active_index() const; | |
500 }; |