Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp @ 20504:6948da6d7c13
8052172: Evacuation failure handling in G1 does not evacuate all objects if -XX:-G1DeferredRSUpdate is set
Summary: Remove -XX:-G1DeferredRSUpdate functionality as it is racy. During evacuation failure handling, threads where evacuation failure handling occurred may try to add remembered sets to regions which remembered sets are currently being scanned. The iterator to handle the remembered set scan does not support addition of entries during scan and so may skip valid references.
Reviewed-by: iveresov, brutisso, mgerdin
author | tschatzl |
---|---|
date | Tue, 30 Sep 2014 09:44:36 +0200 |
parents | 1f1d373cd044 |
children | 0fcaab91d485 |
rev | line source |
---|---|
342 | 1 /* |
17937
78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
7181
diff
changeset
|
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
342
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
342
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
342
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP |
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP | |
27 | |
20337 | 28 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" |
1972 | 29 #include "memory/memRegion.hpp" |
30 #include "runtime/virtualspace.hpp" | |
31 #include "utilities/globalDefinitions.hpp" | |
32 | |
342 | 33 // The CollectedHeap type requires subtypes to implement a method |
34 // "block_start". For some subtypes, notably generational | |
35 // systems using card-table-based write barriers, the efficiency of this | |
36 // operation may be important. Implementations of the "BlockOffsetArray" | |
37 // class may be useful in providing such efficient implementations. | |
38 // | |
39 // While generally mirroring the structure of the BOT for GenCollectedHeap, | |
40 // the following types are tailored more towards G1's uses; these should, | |
41 // however, be merged back into a common BOT to avoid code duplication | |
42 // and reduce maintenance overhead. | |
43 // | |
44 // G1BlockOffsetTable (abstract) | |
45 // -- G1BlockOffsetArray (uses G1BlockOffsetSharedArray) | |
46 // -- G1BlockOffsetArrayContigSpace | |
47 // | |
48 // A main impediment to the consolidation of this code might be the | |
49 // effect of making some of the block_start*() calls non-const as | |
50 // below. Whether that might adversely affect performance optimizations | |
51 // that compilers might normally perform in the case of non-G1 | |
52 // collectors needs to be carefully investigated prior to any such | |
53 // consolidation. | |
54 | |
55 // Forward declarations | |
56 class G1BlockOffsetSharedArray; | |
20273
9441d22e429a
8047820: G1 Block offset table does not need to support generic Space classes
mgerdin
parents:
17937
diff
changeset
|
57 class G1OffsetTableContigSpace; |
342 | 58 |
59 class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC { | |
60 friend class VMStructs; | |
61 protected: | |
62 // These members describe the region covered by the table. | |
63 | |
64 // The space this table is covering. | |
65 HeapWord* _bottom; // == reserved.start | |
66 HeapWord* _end; // End of currently allocated region. | |
67 | |
68 public: | |
69 // Initialize the table to cover the given space. | |
70 // The contents of the initial table are undefined. | |
71 G1BlockOffsetTable(HeapWord* bottom, HeapWord* end) : | |
72 _bottom(bottom), _end(end) | |
73 { | |
74 assert(_bottom <= _end, "arguments out of order"); | |
75 } | |
76 | |
77 // Note that the committed size of the covered space may have changed, | |
78 // so the table size might also wish to change. | |
79 virtual void resize(size_t new_word_size) = 0; | |
80 | |
81 virtual void set_bottom(HeapWord* new_bottom) { | |
7181
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6610
diff
changeset
|
82 assert(new_bottom <= _end, |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6610
diff
changeset
|
83 err_msg("new_bottom (" PTR_FORMAT ") > _end (" PTR_FORMAT ")", |
17937
78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
7181
diff
changeset
|
84 p2i(new_bottom), p2i(_end))); |
342 | 85 _bottom = new_bottom; |
86 resize(pointer_delta(_end, _bottom)); | |
87 } | |
88 | |
89 // Requires "addr" to be contained by a block, and returns the address of | |
90 // the start of that block. (May have side effects, namely updating of | |
91 // shared array entries that "point" too far backwards. This can occur, | |
92 // for example, when LAB allocation is used in a space covered by the | |
93 // table.) | |
94 virtual HeapWord* block_start_unsafe(const void* addr) = 0; | |
95 // Same as above, but does not have any of the possible side effects | |
96 // discussed above. | |
97 virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0; | |
98 | |
99 // Returns the address of the start of the block containing "addr", or | |
100 // else "null" if it is covered by no block. (May have side effects, | |
101 // namely updating of shared array entries that "point" too far | |
102 // backwards. This can occur, for example, when lab allocation is used | |
103 // in a space covered by the table.) | |
104 inline HeapWord* block_start(const void* addr); | |
105 // Same as above, but does not have any of the possible side effects | |
106 // discussed above. | |
107 inline HeapWord* block_start_const(const void* addr) const; | |
108 }; | |
109 | |
20337 | 110 class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener { |
111 public: | |
112 virtual void on_commit(uint start_idx, size_t num_regions); | |
113 }; | |
114 | |
342 | 115 // This implementation of "G1BlockOffsetTable" divides the covered region |
116 // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry | |
117 // for each such subregion indicates how far back one must go to find the | |
118 // start of the chunk that includes the first word of the subregion. | |
119 // | |
120 // Each BlockOffsetArray is owned by a Space. However, the actual array | |
121 // may be shared by several BlockOffsetArrays; this is useful | |
122 // when a single resizable area (such as a generation) is divided up into | |
123 // several spaces in which contiguous allocation takes place, | |
124 // such as, for example, in G1 or in the train generation.) | |
125 | |
126 // Here is the shared array type. | |
127 | |
6197 | 128 class G1BlockOffsetSharedArray: public CHeapObj<mtGC> { |
342 | 129 friend class G1BlockOffsetArray; |
130 friend class G1BlockOffsetArrayContigSpace; | |
131 friend class VMStructs; | |
132 | |
133 private: | |
20337 | 134 G1BlockOffsetSharedArrayMappingChangedListener _listener; |
342 | 135 // The reserved region covered by the shared array. |
136 MemRegion _reserved; | |
137 | |
138 // End of the current committed region. | |
139 HeapWord* _end; | |
140 | |
141 // Array for keeping offsets for retrieving object start fast given an | |
142 // address. | |
143 u_char* _offset_array; // byte array keeping backwards offsets | |
144 | |
7181
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6610
diff
changeset
|
145 void check_offset(size_t offset, const char* msg) const { |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6610
diff
changeset
|
146 assert(offset <= N_words, |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6610
diff
changeset
|
147 err_msg("%s - " |
17937
78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
7181
diff
changeset
|
148 "offset: " SIZE_FORMAT ", N_words: " UINT32_FORMAT, |
7181
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6610
diff
changeset
|
149 msg, offset, N_words)); |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6610
diff
changeset
|
150 } |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6610
diff
changeset
|
151 |
342 | 152 // Bounds checking accessors: |
153 // For performance these have to devolve to array accesses in product builds. | |
20337 | 154 inline u_char offset_array(size_t index) const; |
342 | 155 |
20273
9441d22e429a
8047820: G1 Block offset table does not need to support generic Space classes
mgerdin
parents:
17937
diff
changeset
|
156 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset); |
9441d22e429a
8047820: G1 Block offset table does not need to support generic Space classes
mgerdin
parents:
17937
diff
changeset
|
157 |
20337 | 158 void set_offset_array_raw(size_t index, u_char offset) { |
342 | 159 _offset_array[index] = offset; |
160 } | |
161 | |
20337 | 162 inline void set_offset_array(size_t index, u_char offset); |
163 | |
164 inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low); | |
342 | 165 |
20337 | 166 inline void set_offset_array(size_t left, size_t right, u_char offset); |
342 | 167 |
20337 | 168 inline void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const; |
342 | 169 |
170 bool is_card_boundary(HeapWord* p) const; | |
171 | |
20337 | 172 public: |
173 | |
342 | 174 // Return the number of slots needed for an offset array |
175 // that covers mem_region_words words. | |
20337 | 176 static size_t compute_size(size_t mem_region_words) { |
177 size_t number_of_slots = (mem_region_words / N_words); | |
178 return ReservedSpace::allocation_align_size_up(number_of_slots); | |
342 | 179 } |
180 | |
181 enum SomePublicConstants { | |
182 LogN = 9, | |
183 LogN_words = LogN - LogHeapWordSize, | |
184 N_bytes = 1 << LogN, | |
185 N_words = 1 << LogN_words | |
186 }; | |
187 | |
188 // Initialize the table to cover from "base" to (at least) | |
189 // "base + init_word_size". In the future, the table may be expanded | |
190 // (see "resize" below) up to the size of "_reserved" (which must be at | |
191 // least "init_word_size".) The contents of the initial table are | |
192 // undefined; it is the responsibility of the constituent | |
193 // G1BlockOffsetTable(s) to initialize cards. | |
20337 | 194 G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage); |
342 | 195 |
196 void set_bottom(HeapWord* new_bottom); | |
197 | |
198 // Return the appropriate index into "_offset_array" for "p". | |
199 inline size_t index_for(const void* p) const; | |
20337 | 200 inline size_t index_for_raw(const void* p) const; |
342 | 201 |
202 // Return the address indicating the start of the region corresponding to | |
203 // "index" in "_offset_array". | |
204 inline HeapWord* address_for_index(size_t index) const; | |
20337 | 205 // Variant of address_for_index that does not check the index for validity. |
206 inline HeapWord* address_for_index_raw(size_t index) const { | |
207 return _reserved.start() + (index << LogN_words); | |
208 } | |
342 | 209 }; |
210 | |
211 // And here is the G1BlockOffsetTable subtype that uses the array. | |
212 | |
213 class G1BlockOffsetArray: public G1BlockOffsetTable { | |
214 friend class G1BlockOffsetSharedArray; | |
215 friend class G1BlockOffsetArrayContigSpace; | |
216 friend class VMStructs; | |
217 private: | |
218 enum SomePrivateConstants { | |
219 N_words = G1BlockOffsetSharedArray::N_words, | |
220 LogN = G1BlockOffsetSharedArray::LogN | |
221 }; | |
222 | |
223 // The following enums are used by do_block_helper | |
224 enum Action { | |
225 Action_single, // BOT records a single block (see single_block()) | |
226 Action_mark, // BOT marks the start of a block (see mark_block()) | |
227 Action_check // Check that BOT records block correctly | |
228 // (see verify_single_block()). | |
229 }; | |
230 | |
231 // This is the array, which can be shared by several BlockOffsetArray's | |
232 // servicing different | |
233 G1BlockOffsetSharedArray* _array; | |
234 | |
235 // The space that owns this subregion. | |
20273
9441d22e429a
8047820: G1 Block offset table does not need to support generic Space classes
mgerdin
parents:
17937
diff
changeset
|
236 G1OffsetTableContigSpace* _gsp; |
342 | 237 |
238 // If true, array entries are initialized to 0; otherwise, they are | |
239 // initialized to point backwards to the beginning of the covered region. | |
240 bool _init_to_zero; | |
241 | |
242 // The portion [_unallocated_block, _sp.end()) of the space that | |
243 // is a single block known not to contain any objects. | |
244 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag. | |
245 HeapWord* _unallocated_block; | |
246 | |
247 // Sets the entries | |
248 // corresponding to the cards starting at "start" and ending at "end" | |
249 // to point back to the card before "start": the interval [start, end) | |
250 // is right-open. | |
251 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end); | |
252 // Same as above, except that the args here are a card _index_ interval | |
253 // that is closed: [start_index, end_index] | |
254 void set_remainder_to_point_to_start_incl(size_t start, size_t end); | |
255 | |
256 // A helper function for BOT adjustment/verification work | |
257 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action); | |
258 | |
259 protected: | |
260 | |
20273
9441d22e429a
8047820: G1 Block offset table does not need to support generic Space classes
mgerdin
parents:
17937
diff
changeset
|
261 G1OffsetTableContigSpace* gsp() const { return _gsp; } |
9441d22e429a
8047820: G1 Block offset table does not need to support generic Space classes
mgerdin
parents:
17937
diff
changeset
|
262 |
9441d22e429a
8047820: G1 Block offset table does not need to support generic Space classes
mgerdin
parents:
17937
diff
changeset
|
263 inline size_t block_size(const HeapWord* p) const; |
342 | 264 |
265 // Returns the address of a block whose start is at most "addr". | |
266 // If "has_max_index" is true, "assumes "max_index" is the last valid one | |
267 // in the array. | |
268 inline HeapWord* block_at_or_preceding(const void* addr, | |
269 bool has_max_index, | |
270 size_t max_index) const; | |
271 | |
272 // "q" is a block boundary that is <= "addr"; "n" is the address of the | |
273 // next block (or the end of the space.) Return the address of the | |
274 // beginning of the block that contains "addr". Does so without side | |
275 // effects (see, e.g., spec of block_start.) | |
276 inline HeapWord* | |
277 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n, | |
278 const void* addr) const; | |
279 | |
280 // "q" is a block boundary that is <= "addr"; return the address of the | |
281 // beginning of the block that contains "addr". May have side effects | |
282 // on "this", by updating imprecise entries. | |
283 inline HeapWord* forward_to_block_containing_addr(HeapWord* q, | |
284 const void* addr); | |
285 | |
286 // "q" is a block boundary that is <= "addr"; "n" is the address of the | |
287 // next block (or the end of the space.) Return the address of the | |
288 // beginning of the block that contains "addr". May have side effects | |
289 // on "this", by updating imprecise entries. | |
290 HeapWord* forward_to_block_containing_addr_slow(HeapWord* q, | |
291 HeapWord* n, | |
292 const void* addr); | |
293 | |
294 // Requires that "*threshold_" be the first array entry boundary at or | |
295 // above "blk_start", and that "*index_" be the corresponding array | |
296 // index. If the block starts at or crosses "*threshold_", records | |
297 // "blk_start" as the appropriate block start for the array index | |
298 // starting at "*threshold_", and for any other indices crossed by the | |
299 // block. Updates "*threshold_" and "*index_" to correspond to the first | |
300 // index after the block end. | |
301 void alloc_block_work2(HeapWord** threshold_, size_t* index_, | |
302 HeapWord* blk_start, HeapWord* blk_end); | |
303 | |
304 public: | |
305 // The space may not have it's bottom and top set yet, which is why the | |
306 // region is passed as a parameter. If "init_to_zero" is true, the | |
307 // elements of the array are initialized to zero. Otherwise, they are | |
308 // initialized to point backwards to the beginning. | |
309 G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr, | |
310 bool init_to_zero); | |
311 | |
312 // Note: this ought to be part of the constructor, but that would require | |
313 // "this" to be passed as a parameter to a member constructor for | |
314 // the containing concrete subtype of Space. | |
315 // This would be legal C++, but MS VC++ doesn't allow it. | |
20273
9441d22e429a
8047820: G1 Block offset table does not need to support generic Space classes
mgerdin
parents:
17937
diff
changeset
|
316 void set_space(G1OffsetTableContigSpace* sp); |
342 | 317 |
318 // Resets the covered region to the given "mr". | |
319 void set_region(MemRegion mr); | |
320 | |
321 // Resets the covered region to one with the same _bottom as before but | |
322 // the "new_word_size". | |
323 void resize(size_t new_word_size); | |
324 | |
325 // These must be guaranteed to work properly (i.e., do nothing) | |
326 // when "blk_start" ("blk" for second version) is "NULL". | |
327 virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end); | |
328 virtual void alloc_block(HeapWord* blk, size_t size) { | |
329 alloc_block(blk, blk + size); | |
330 } | |
331 | |
332 // The following methods are useful and optimized for a | |
333 // general, non-contiguous space. | |
334 | |
335 // Given a block [blk_start, blk_start + full_blk_size), and | |
336 // a left_blk_size < full_blk_size, adjust the BOT to show two | |
337 // blocks [blk_start, blk_start + left_blk_size) and | |
338 // [blk_start + left_blk_size, blk_start + full_blk_size). | |
339 // It is assumed (and verified in the non-product VM) that the | |
340 // BOT was correct for the original block. | |
341 void split_block(HeapWord* blk_start, size_t full_blk_size, | |
342 size_t left_blk_size); | |
343 | |
344 // Adjust the BOT to show that it has a single block in the | |
345 // range [blk_start, blk_start + size). All necessary BOT | |
346 // cards are adjusted, but _unallocated_block isn't. | |
347 void single_block(HeapWord* blk_start, HeapWord* blk_end); | |
348 void single_block(HeapWord* blk, size_t size) { | |
349 single_block(blk, blk + size); | |
350 } | |
351 | |
352 // Adjust BOT to show that it has a block in the range | |
353 // [blk_start, blk_start + size). Only the first card | |
354 // of BOT is touched. It is assumed (and verified in the | |
355 // non-product VM) that the remaining cards of the block | |
356 // are correct. | |
357 void mark_block(HeapWord* blk_start, HeapWord* blk_end); | |
358 void mark_block(HeapWord* blk, size_t size) { | |
359 mark_block(blk, blk + size); | |
360 } | |
361 | |
362 // Adjust _unallocated_block to indicate that a particular | |
363 // block has been newly allocated or freed. It is assumed (and | |
364 // verified in the non-product VM) that the BOT is correct for | |
365 // the given block. | |
366 inline void allocated(HeapWord* blk_start, HeapWord* blk_end) { | |
367 // Verify that the BOT shows [blk, blk + blk_size) to be one block. | |
368 verify_single_block(blk_start, blk_end); | |
369 if (BlockOffsetArrayUseUnallocatedBlock) { | |
370 _unallocated_block = MAX2(_unallocated_block, blk_end); | |
371 } | |
372 } | |
373 | |
374 inline void allocated(HeapWord* blk, size_t size) { | |
375 allocated(blk, blk + size); | |
376 } | |
377 | |
378 inline void freed(HeapWord* blk_start, HeapWord* blk_end); | |
379 | |
380 inline void freed(HeapWord* blk, size_t size); | |
381 | |
382 virtual HeapWord* block_start_unsafe(const void* addr); | |
383 virtual HeapWord* block_start_unsafe_const(const void* addr) const; | |
384 | |
385 // Requires "addr" to be the start of a card and returns the | |
386 // start of the block that contains the given address. | |
387 HeapWord* block_start_careful(const void* addr) const; | |
388 | |
389 // If true, initialize array slots with no allocated blocks to zero. | |
390 // Otherwise, make them point back to the front. | |
391 bool init_to_zero() { return _init_to_zero; } | |
392 | |
393 // Verification & debugging - ensure that the offset table reflects the fact | |
394 // that the block [blk_start, blk_end) or [blk, blk + size) is a | |
395 // single block of storage. NOTE: can;t const this because of | |
396 // call to non-const do_block_internal() below. | |
397 inline void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) { | |
398 if (VerifyBlockOffsetArray) { | |
399 do_block_internal(blk_start, blk_end, Action_check); | |
400 } | |
401 } | |
402 | |
403 inline void verify_single_block(HeapWord* blk, size_t size) { | |
404 verify_single_block(blk, blk + size); | |
405 } | |
406 | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
407 // Used by region verification. Checks that the contents of the |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
408 // BOT reflect that there's a single object that spans the address |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
409 // range [obj_start, obj_start + word_size); returns true if this is |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
410 // the case, returns false if it's not. |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
411 bool verify_for_object(HeapWord* obj_start, size_t word_size) const; |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
412 |
342 | 413 // Verify that the given block is before _unallocated_block |
414 inline void verify_not_unallocated(HeapWord* blk_start, | |
415 HeapWord* blk_end) const { | |
416 if (BlockOffsetArrayUseUnallocatedBlock) { | |
417 assert(blk_start < blk_end, "Block inconsistency?"); | |
418 assert(blk_end <= _unallocated_block, "_unallocated_block problem"); | |
419 } | |
420 } | |
421 | |
422 inline void verify_not_unallocated(HeapWord* blk, size_t size) const { | |
423 verify_not_unallocated(blk, blk + size); | |
424 } | |
425 | |
426 void check_all_cards(size_t left_card, size_t right_card) const; | |
1886
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1552
diff
changeset
|
427 |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
428 virtual void print_on(outputStream* out) PRODUCT_RETURN; |
342 | 429 }; |
430 | |
431 // A subtype of BlockOffsetArray that takes advantage of the fact | |
432 // that its underlying space is a ContiguousSpace, so that its "active" | |
433 // region can be more efficiently tracked (than for a non-contiguous space). | |
434 class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray { | |
435 friend class VMStructs; | |
436 | |
437 // allocation boundary at which offset array must be updated | |
438 HeapWord* _next_offset_threshold; | |
439 size_t _next_offset_index; // index corresponding to that boundary | |
440 | |
441 // Work function to be called when allocation start crosses the next | |
442 // threshold in the contig space. | |
443 void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) { | |
444 alloc_block_work2(&_next_offset_threshold, &_next_offset_index, | |
445 blk_start, blk_end); | |
446 } | |
447 | |
20337 | 448 // Variant of zero_bottom_entry that does not check for availability of the |
449 // memory first. | |
450 void zero_bottom_entry_raw(); | |
451 // Variant of initialize_threshold that does not check for availability of the | |
452 // memory first. | |
453 HeapWord* initialize_threshold_raw(); | |
20336
6701abbc4441
8054818: Refactor HeapRegionSeq to manage heap region and auxiliary data
tschatzl
parents:
20273
diff
changeset
|
454 // Zero out the entry for _bottom (offset will be zero). |
6701abbc4441
8054818: Refactor HeapRegionSeq to manage heap region and auxiliary data
tschatzl
parents:
20273
diff
changeset
|
455 void zero_bottom_entry(); |
342 | 456 public: |
457 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr); | |
458 | |
459 // Initialize the threshold to reflect the first boundary after the | |
460 // bottom of the covered region. | |
461 HeapWord* initialize_threshold(); | |
462 | |
20336
6701abbc4441
8054818: Refactor HeapRegionSeq to manage heap region and auxiliary data
tschatzl
parents:
20273
diff
changeset
|
463 void reset_bot() { |
20337 | 464 zero_bottom_entry_raw(); |
465 initialize_threshold_raw(); | |
20336
6701abbc4441
8054818: Refactor HeapRegionSeq to manage heap region and auxiliary data
tschatzl
parents:
20273
diff
changeset
|
466 } |
342 | 467 |
468 // Return the next threshold, the point at which the table should be | |
469 // updated. | |
470 HeapWord* threshold() const { return _next_offset_threshold; } | |
471 | |
472 // These must be guaranteed to work properly (i.e., do nothing) | |
473 // when "blk_start" ("blk" for second version) is "NULL". In this | |
474 // implementation, that's true because NULL is represented as 0, and thus | |
475 // never exceeds the "_next_offset_threshold". | |
476 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) { | |
477 if (blk_end > _next_offset_threshold) | |
478 alloc_block_work1(blk_start, blk_end); | |
479 } | |
480 void alloc_block(HeapWord* blk, size_t size) { | |
481 alloc_block(blk, blk+size); | |
482 } | |
483 | |
484 HeapWord* block_start_unsafe(const void* addr); | |
485 HeapWord* block_start_unsafe_const(const void* addr) const; | |
1886
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1552
diff
changeset
|
486 |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
487 void set_for_starts_humongous(HeapWord* new_top); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
488 |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
489 virtual void print_on(outputStream* out) PRODUCT_RETURN; |
342 | 490 }; |
1972 | 491 |
492 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP |