Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp @ 3830:f44782f04dd4
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
Summary: Refactor the allocation code during GC to use the G1AllocRegion abstraction. Use separate subclasses of G1AllocRegion for survivor and old regions. Avoid BOT updates and dirty survivor cards incrementally for the former.
Reviewed-by: brutisso, johnc, ysr
author | tonyp |
---|---|
date | Fri, 12 Aug 2011 11:31:06 -0400 |
parents | 2250ee17e258 |
children | d2a62e0f25eb |
rev | line source |
---|---|
342 | 1 /* |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
342
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
342
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
342
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP |
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP | |
27 | |
28 #include "memory/memRegion.hpp" | |
29 #include "runtime/virtualspace.hpp" | |
30 #include "utilities/globalDefinitions.hpp" | |
31 | |
342 | 32 // The CollectedHeap type requires subtypes to implement a method |
33 // "block_start". For some subtypes, notably generational | |
34 // systems using card-table-based write barriers, the efficiency of this | |
35 // operation may be important. Implementations of the "BlockOffsetArray" | |
36 // class may be useful in providing such efficient implementations. | |
37 // | |
38 // While generally mirroring the structure of the BOT for GenCollectedHeap, | |
39 // the following types are tailored more towards G1's uses; these should, | |
40 // however, be merged back into a common BOT to avoid code duplication | |
41 // and reduce maintenance overhead. | |
42 // | |
43 // G1BlockOffsetTable (abstract) | |
44 // -- G1BlockOffsetArray (uses G1BlockOffsetSharedArray) | |
45 // -- G1BlockOffsetArrayContigSpace | |
46 // | |
47 // A main impediment to the consolidation of this code might be the | |
48 // effect of making some of the block_start*() calls non-const as | |
49 // below. Whether that might adversely affect performance optimizations | |
50 // that compilers might normally perform in the case of non-G1 | |
51 // collectors needs to be carefully investigated prior to any such | |
52 // consolidation. | |
53 | |
54 // Forward declarations | |
55 class ContiguousSpace; | |
56 class G1BlockOffsetSharedArray; | |
57 | |
58 class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC { | |
59 friend class VMStructs; | |
60 protected: | |
61 // These members describe the region covered by the table. | |
62 | |
63 // The space this table is covering. | |
64 HeapWord* _bottom; // == reserved.start | |
65 HeapWord* _end; // End of currently allocated region. | |
66 | |
67 public: | |
68 // Initialize the table to cover the given space. | |
69 // The contents of the initial table are undefined. | |
70 G1BlockOffsetTable(HeapWord* bottom, HeapWord* end) : | |
71 _bottom(bottom), _end(end) | |
72 { | |
73 assert(_bottom <= _end, "arguments out of order"); | |
74 } | |
75 | |
76 // Note that the committed size of the covered space may have changed, | |
77 // so the table size might also wish to change. | |
78 virtual void resize(size_t new_word_size) = 0; | |
79 | |
80 virtual void set_bottom(HeapWord* new_bottom) { | |
81 assert(new_bottom <= _end, "new_bottom > _end"); | |
82 _bottom = new_bottom; | |
83 resize(pointer_delta(_end, _bottom)); | |
84 } | |
85 | |
86 // Requires "addr" to be contained by a block, and returns the address of | |
87 // the start of that block. (May have side effects, namely updating of | |
88 // shared array entries that "point" too far backwards. This can occur, | |
89 // for example, when LAB allocation is used in a space covered by the | |
90 // table.) | |
91 virtual HeapWord* block_start_unsafe(const void* addr) = 0; | |
92 // Same as above, but does not have any of the possible side effects | |
93 // discussed above. | |
94 virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0; | |
95 | |
96 // Returns the address of the start of the block containing "addr", or | |
97 // else "null" if it is covered by no block. (May have side effects, | |
98 // namely updating of shared array entries that "point" too far | |
99 // backwards. This can occur, for example, when lab allocation is used | |
100 // in a space covered by the table.) | |
101 inline HeapWord* block_start(const void* addr); | |
102 // Same as above, but does not have any of the possible side effects | |
103 // discussed above. | |
104 inline HeapWord* block_start_const(const void* addr) const; | |
105 }; | |
106 | |
107 // This implementation of "G1BlockOffsetTable" divides the covered region | |
108 // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry | |
109 // for each such subregion indicates how far back one must go to find the | |
110 // start of the chunk that includes the first word of the subregion. | |
111 // | |
112 // Each BlockOffsetArray is owned by a Space. However, the actual array | |
113 // may be shared by several BlockOffsetArrays; this is useful | |
114 // when a single resizable area (such as a generation) is divided up into | |
115 // several spaces in which contiguous allocation takes place, | |
116 // such as, for example, in G1 or in the train generation.) | |
117 | |
118 // Here is the shared array type. | |
119 | |
120 class G1BlockOffsetSharedArray: public CHeapObj { | |
121 friend class G1BlockOffsetArray; | |
122 friend class G1BlockOffsetArrayContigSpace; | |
123 friend class VMStructs; | |
124 | |
125 private: | |
126 // The reserved region covered by the shared array. | |
127 MemRegion _reserved; | |
128 | |
129 // End of the current committed region. | |
130 HeapWord* _end; | |
131 | |
132 // Array for keeping offsets for retrieving object start fast given an | |
133 // address. | |
134 VirtualSpace _vs; | |
135 u_char* _offset_array; // byte array keeping backwards offsets | |
136 | |
137 // Bounds checking accessors: | |
138 // For performance these have to devolve to array accesses in product builds. | |
139 u_char offset_array(size_t index) const { | |
140 assert(index < _vs.committed_size(), "index out of range"); | |
141 return _offset_array[index]; | |
142 } | |
143 | |
144 void set_offset_array(size_t index, u_char offset) { | |
145 assert(index < _vs.committed_size(), "index out of range"); | |
146 assert(offset <= N_words, "offset too large"); | |
147 _offset_array[index] = offset; | |
148 } | |
149 | |
150 void set_offset_array(size_t index, HeapWord* high, HeapWord* low) { | |
151 assert(index < _vs.committed_size(), "index out of range"); | |
152 assert(high >= low, "addresses out of order"); | |
153 assert(pointer_delta(high, low) <= N_words, "offset too large"); | |
154 _offset_array[index] = (u_char) pointer_delta(high, low); | |
155 } | |
156 | |
157 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) { | |
158 assert(index_for(right - 1) < _vs.committed_size(), | |
159 "right address out of range"); | |
160 assert(left < right, "Heap addresses out of order"); | |
161 size_t num_cards = pointer_delta(right, left) >> LogN_words; | |
162 memset(&_offset_array[index_for(left)], offset, num_cards); | |
163 } | |
164 | |
165 void set_offset_array(size_t left, size_t right, u_char offset) { | |
166 assert(right < _vs.committed_size(), "right address out of range"); | |
167 assert(left <= right, "indexes out of order"); | |
168 size_t num_cards = right - left + 1; | |
169 memset(&_offset_array[left], offset, num_cards); | |
170 } | |
171 | |
172 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const { | |
173 assert(index < _vs.committed_size(), "index out of range"); | |
174 assert(high >= low, "addresses out of order"); | |
175 assert(pointer_delta(high, low) <= N_words, "offset too large"); | |
176 assert(_offset_array[index] == pointer_delta(high, low), | |
177 "Wrong offset"); | |
178 } | |
179 | |
180 bool is_card_boundary(HeapWord* p) const; | |
181 | |
182 // Return the number of slots needed for an offset array | |
183 // that covers mem_region_words words. | |
184 // We always add an extra slot because if an object | |
185 // ends on a card boundary we put a 0 in the next | |
186 // offset array slot, so we want that slot always | |
187 // to be reserved. | |
188 | |
189 size_t compute_size(size_t mem_region_words) { | |
190 size_t number_of_slots = (mem_region_words / N_words) + 1; | |
191 return ReservedSpace::page_align_size_up(number_of_slots); | |
192 } | |
193 | |
194 public: | |
195 enum SomePublicConstants { | |
196 LogN = 9, | |
197 LogN_words = LogN - LogHeapWordSize, | |
198 N_bytes = 1 << LogN, | |
199 N_words = 1 << LogN_words | |
200 }; | |
201 | |
202 // Initialize the table to cover from "base" to (at least) | |
203 // "base + init_word_size". In the future, the table may be expanded | |
204 // (see "resize" below) up to the size of "_reserved" (which must be at | |
205 // least "init_word_size".) The contents of the initial table are | |
206 // undefined; it is the responsibility of the constituent | |
207 // G1BlockOffsetTable(s) to initialize cards. | |
208 G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size); | |
209 | |
210 // Notes a change in the committed size of the region covered by the | |
211 // table. The "new_word_size" may not be larger than the size of the | |
212 // reserved region this table covers. | |
213 void resize(size_t new_word_size); | |
214 | |
215 void set_bottom(HeapWord* new_bottom); | |
216 | |
217 // Updates all the BlockOffsetArray's sharing this shared array to | |
218 // reflect the current "top"'s of their spaces. | |
219 void update_offset_arrays(); | |
220 | |
221 // Return the appropriate index into "_offset_array" for "p". | |
222 inline size_t index_for(const void* p) const; | |
223 | |
224 // Return the address indicating the start of the region corresponding to | |
225 // "index" in "_offset_array". | |
226 inline HeapWord* address_for_index(size_t index) const; | |
227 }; | |
228 | |
229 // And here is the G1BlockOffsetTable subtype that uses the array. | |
230 | |
231 class G1BlockOffsetArray: public G1BlockOffsetTable { | |
232 friend class G1BlockOffsetSharedArray; | |
233 friend class G1BlockOffsetArrayContigSpace; | |
234 friend class VMStructs; | |
235 private: | |
236 enum SomePrivateConstants { | |
237 N_words = G1BlockOffsetSharedArray::N_words, | |
238 LogN = G1BlockOffsetSharedArray::LogN | |
239 }; | |
240 | |
241 // The following enums are used by do_block_helper | |
242 enum Action { | |
243 Action_single, // BOT records a single block (see single_block()) | |
244 Action_mark, // BOT marks the start of a block (see mark_block()) | |
245 Action_check // Check that BOT records block correctly | |
246 // (see verify_single_block()). | |
247 }; | |
248 | |
249 // This is the array, which can be shared by several BlockOffsetArray's | |
250 // servicing different | |
251 G1BlockOffsetSharedArray* _array; | |
252 | |
253 // The space that owns this subregion. | |
254 Space* _sp; | |
255 | |
256 // If "_sp" is a contiguous space, the field below is the view of "_sp" | |
257 // as a contiguous space, else NULL. | |
258 ContiguousSpace* _csp; | |
259 | |
260 // If true, array entries are initialized to 0; otherwise, they are | |
261 // initialized to point backwards to the beginning of the covered region. | |
262 bool _init_to_zero; | |
263 | |
264 // The portion [_unallocated_block, _sp.end()) of the space that | |
265 // is a single block known not to contain any objects. | |
266 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag. | |
267 HeapWord* _unallocated_block; | |
268 | |
269 // Sets the entries | |
270 // corresponding to the cards starting at "start" and ending at "end" | |
271 // to point back to the card before "start": the interval [start, end) | |
272 // is right-open. | |
273 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end); | |
274 // Same as above, except that the args here are a card _index_ interval | |
275 // that is closed: [start_index, end_index] | |
276 void set_remainder_to_point_to_start_incl(size_t start, size_t end); | |
277 | |
278 // A helper function for BOT adjustment/verification work | |
279 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action); | |
280 | |
281 protected: | |
282 | |
283 ContiguousSpace* csp() const { return _csp; } | |
284 | |
285 // Returns the address of a block whose start is at most "addr". | |
286 // If "has_max_index" is true, "assumes "max_index" is the last valid one | |
287 // in the array. | |
288 inline HeapWord* block_at_or_preceding(const void* addr, | |
289 bool has_max_index, | |
290 size_t max_index) const; | |
291 | |
292 // "q" is a block boundary that is <= "addr"; "n" is the address of the | |
293 // next block (or the end of the space.) Return the address of the | |
294 // beginning of the block that contains "addr". Does so without side | |
295 // effects (see, e.g., spec of block_start.) | |
296 inline HeapWord* | |
297 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n, | |
298 const void* addr) const; | |
299 | |
300 // "q" is a block boundary that is <= "addr"; return the address of the | |
301 // beginning of the block that contains "addr". May have side effects | |
302 // on "this", by updating imprecise entries. | |
303 inline HeapWord* forward_to_block_containing_addr(HeapWord* q, | |
304 const void* addr); | |
305 | |
306 // "q" is a block boundary that is <= "addr"; "n" is the address of the | |
307 // next block (or the end of the space.) Return the address of the | |
308 // beginning of the block that contains "addr". May have side effects | |
309 // on "this", by updating imprecise entries. | |
310 HeapWord* forward_to_block_containing_addr_slow(HeapWord* q, | |
311 HeapWord* n, | |
312 const void* addr); | |
313 | |
314 // Requires that "*threshold_" be the first array entry boundary at or | |
315 // above "blk_start", and that "*index_" be the corresponding array | |
316 // index. If the block starts at or crosses "*threshold_", records | |
317 // "blk_start" as the appropriate block start for the array index | |
318 // starting at "*threshold_", and for any other indices crossed by the | |
319 // block. Updates "*threshold_" and "*index_" to correspond to the first | |
320 // index after the block end. | |
321 void alloc_block_work2(HeapWord** threshold_, size_t* index_, | |
322 HeapWord* blk_start, HeapWord* blk_end); | |
323 | |
324 public: | |
325 // The space may not have it's bottom and top set yet, which is why the | |
326 // region is passed as a parameter. If "init_to_zero" is true, the | |
327 // elements of the array are initialized to zero. Otherwise, they are | |
328 // initialized to point backwards to the beginning. | |
329 G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr, | |
330 bool init_to_zero); | |
331 | |
332 // Note: this ought to be part of the constructor, but that would require | |
333 // "this" to be passed as a parameter to a member constructor for | |
334 // the containing concrete subtype of Space. | |
335 // This would be legal C++, but MS VC++ doesn't allow it. | |
336 void set_space(Space* sp); | |
337 | |
338 // Resets the covered region to the given "mr". | |
339 void set_region(MemRegion mr); | |
340 | |
341 // Resets the covered region to one with the same _bottom as before but | |
342 // the "new_word_size". | |
343 void resize(size_t new_word_size); | |
344 | |
345 // These must be guaranteed to work properly (i.e., do nothing) | |
346 // when "blk_start" ("blk" for second version) is "NULL". | |
347 virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end); | |
348 virtual void alloc_block(HeapWord* blk, size_t size) { | |
349 alloc_block(blk, blk + size); | |
350 } | |
351 | |
352 // The following methods are useful and optimized for a | |
353 // general, non-contiguous space. | |
354 | |
355 // Given a block [blk_start, blk_start + full_blk_size), and | |
356 // a left_blk_size < full_blk_size, adjust the BOT to show two | |
357 // blocks [blk_start, blk_start + left_blk_size) and | |
358 // [blk_start + left_blk_size, blk_start + full_blk_size). | |
359 // It is assumed (and verified in the non-product VM) that the | |
360 // BOT was correct for the original block. | |
361 void split_block(HeapWord* blk_start, size_t full_blk_size, | |
362 size_t left_blk_size); | |
363 | |
364 // Adjust the BOT to show that it has a single block in the | |
365 // range [blk_start, blk_start + size). All necessary BOT | |
366 // cards are adjusted, but _unallocated_block isn't. | |
367 void single_block(HeapWord* blk_start, HeapWord* blk_end); | |
368 void single_block(HeapWord* blk, size_t size) { | |
369 single_block(blk, blk + size); | |
370 } | |
371 | |
372 // Adjust BOT to show that it has a block in the range | |
373 // [blk_start, blk_start + size). Only the first card | |
374 // of BOT is touched. It is assumed (and verified in the | |
375 // non-product VM) that the remaining cards of the block | |
376 // are correct. | |
377 void mark_block(HeapWord* blk_start, HeapWord* blk_end); | |
378 void mark_block(HeapWord* blk, size_t size) { | |
379 mark_block(blk, blk + size); | |
380 } | |
381 | |
382 // Adjust _unallocated_block to indicate that a particular | |
383 // block has been newly allocated or freed. It is assumed (and | |
384 // verified in the non-product VM) that the BOT is correct for | |
385 // the given block. | |
386 inline void allocated(HeapWord* blk_start, HeapWord* blk_end) { | |
387 // Verify that the BOT shows [blk, blk + blk_size) to be one block. | |
388 verify_single_block(blk_start, blk_end); | |
389 if (BlockOffsetArrayUseUnallocatedBlock) { | |
390 _unallocated_block = MAX2(_unallocated_block, blk_end); | |
391 } | |
392 } | |
393 | |
394 inline void allocated(HeapWord* blk, size_t size) { | |
395 allocated(blk, blk + size); | |
396 } | |
397 | |
398 inline void freed(HeapWord* blk_start, HeapWord* blk_end); | |
399 | |
400 inline void freed(HeapWord* blk, size_t size); | |
401 | |
402 virtual HeapWord* block_start_unsafe(const void* addr); | |
403 virtual HeapWord* block_start_unsafe_const(const void* addr) const; | |
404 | |
405 // Requires "addr" to be the start of a card and returns the | |
406 // start of the block that contains the given address. | |
407 HeapWord* block_start_careful(const void* addr) const; | |
408 | |
409 // If true, initialize array slots with no allocated blocks to zero. | |
410 // Otherwise, make them point back to the front. | |
411 bool init_to_zero() { return _init_to_zero; } | |
412 | |
413 // Verification & debugging - ensure that the offset table reflects the fact | |
414 // that the block [blk_start, blk_end) or [blk, blk + size) is a | |
415 // single block of storage. NOTE: can;t const this because of | |
416 // call to non-const do_block_internal() below. | |
417 inline void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) { | |
418 if (VerifyBlockOffsetArray) { | |
419 do_block_internal(blk_start, blk_end, Action_check); | |
420 } | |
421 } | |
422 | |
423 inline void verify_single_block(HeapWord* blk, size_t size) { | |
424 verify_single_block(blk, blk + size); | |
425 } | |
426 | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
427 // Used by region verification. Checks that the contents of the |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
428 // BOT reflect that there's a single object that spans the address |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
429 // range [obj_start, obj_start + word_size); returns true if this is |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
430 // the case, returns false if it's not. |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
431 bool verify_for_object(HeapWord* obj_start, size_t word_size) const; |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
432 |
342 | 433 // Verify that the given block is before _unallocated_block |
434 inline void verify_not_unallocated(HeapWord* blk_start, | |
435 HeapWord* blk_end) const { | |
436 if (BlockOffsetArrayUseUnallocatedBlock) { | |
437 assert(blk_start < blk_end, "Block inconsistency?"); | |
438 assert(blk_end <= _unallocated_block, "_unallocated_block problem"); | |
439 } | |
440 } | |
441 | |
442 inline void verify_not_unallocated(HeapWord* blk, size_t size) const { | |
443 verify_not_unallocated(blk, blk + size); | |
444 } | |
445 | |
446 void check_all_cards(size_t left_card, size_t right_card) const; | |
1886
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1552
diff
changeset
|
447 |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
448 virtual void print_on(outputStream* out) PRODUCT_RETURN; |
342 | 449 }; |
450 | |
451 // A subtype of BlockOffsetArray that takes advantage of the fact | |
452 // that its underlying space is a ContiguousSpace, so that its "active" | |
453 // region can be more efficiently tracked (than for a non-contiguous space). | |
454 class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray { | |
455 friend class VMStructs; | |
456 | |
457 // allocation boundary at which offset array must be updated | |
458 HeapWord* _next_offset_threshold; | |
459 size_t _next_offset_index; // index corresponding to that boundary | |
460 | |
461 // Work function to be called when allocation start crosses the next | |
462 // threshold in the contig space. | |
463 void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) { | |
464 alloc_block_work2(&_next_offset_threshold, &_next_offset_index, | |
465 blk_start, blk_end); | |
466 } | |
467 | |
468 | |
469 public: | |
470 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr); | |
471 | |
472 // Initialize the threshold to reflect the first boundary after the | |
473 // bottom of the covered region. | |
474 HeapWord* initialize_threshold(); | |
475 | |
476 // Zero out the entry for _bottom (offset will be zero). | |
477 void zero_bottom_entry(); | |
478 | |
479 // Return the next threshold, the point at which the table should be | |
480 // updated. | |
481 HeapWord* threshold() const { return _next_offset_threshold; } | |
482 | |
483 // These must be guaranteed to work properly (i.e., do nothing) | |
484 // when "blk_start" ("blk" for second version) is "NULL". In this | |
485 // implementation, that's true because NULL is represented as 0, and thus | |
486 // never exceeds the "_next_offset_threshold". | |
487 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) { | |
488 if (blk_end > _next_offset_threshold) | |
489 alloc_block_work1(blk_start, blk_end); | |
490 } | |
491 void alloc_block(HeapWord* blk, size_t size) { | |
492 alloc_block(blk, blk+size); | |
493 } | |
494 | |
495 HeapWord* block_start_unsafe(const void* addr); | |
496 HeapWord* block_start_unsafe_const(const void* addr) const; | |
1886
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1552
diff
changeset
|
497 |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
498 void set_for_starts_humongous(HeapWord* new_top); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
499 |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
1972
diff
changeset
|
500 virtual void print_on(outputStream* out) PRODUCT_RETURN; |
342 | 501 }; |
1972 | 502 |
503 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP |