Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp @ 1959:9eecf81a02fb
7000578: CMS: assert(SafepointSynchronize::is_at_safepoint()) failed: Else races are possible
Summary: Weakened assert in onj_is_alive() to allow its use at initialization time when is_at_safepoint() normally reports false; added some related asserts to check order of is_init_completed() after Universe::is_fully_initialized().
Reviewed-by: jcoomes
author | ysr |
---|---|
date | Tue, 16 Nov 2010 13:58:48 -0800 |
parents | be3f9c242c9d |
children | f95d63e2154a |
rev | line source |
---|---|
0 | 1 /* |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
25 // Classes in support of keeping track of promotions into a non-Contiguous | |
26 // space, in this case a CompactibleFreeListSpace. | |
27 | |
28 // Forward declarations | |
29 class CompactibleFreeListSpace; | |
30 class BlkClosure; | |
31 class BlkClosureCareful; | |
32 class UpwardsObjectClosure; | |
33 class ObjectClosureCareful; | |
34 class Klass; | |
35 | |
36 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC { | |
37 public: | |
38 LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), | |
39 _allocation_size_limit(0) {} | |
40 void set(HeapWord* ptr, size_t word_size, size_t refill_size, | |
41 size_t allocation_size_limit) { | |
42 _ptr = ptr; | |
43 _word_size = word_size; | |
44 _refillSize = refill_size; | |
45 _allocation_size_limit = allocation_size_limit; | |
46 } | |
47 HeapWord* _ptr; | |
48 size_t _word_size; | |
49 size_t _refillSize; | |
50 size_t _allocation_size_limit; // largest size that will be allocated | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
51 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
52 void print_on(outputStream* st) const; |
0 | 53 }; |
54 | |
55 // Concrete subclass of CompactibleSpace that implements | |
56 // a free list space, such as used in the concurrent mark sweep | |
57 // generation. | |
58 | |
59 class CompactibleFreeListSpace: public CompactibleSpace { | |
60 friend class VMStructs; | |
61 friend class ConcurrentMarkSweepGeneration; | |
62 friend class ASConcurrentMarkSweepGeneration; | |
63 friend class CMSCollector; | |
64 friend class CMSPermGenGen; | |
65 // Local alloc buffer for promotion into this space. | |
66 friend class CFLS_LAB; | |
67 | |
68 // "Size" of chunks of work (executed during parallel remark phases | |
69 // of CMS collection); this probably belongs in CMSCollector, although | |
70 // it's cached here because it's used in | |
71 // initialize_sequential_subtasks_for_rescan() which modifies | |
72 // par_seq_tasks which also lives in Space. XXX | |
73 const size_t _rescan_task_size; | |
74 const size_t _marking_task_size; | |
75 | |
76 // Yet another sequential tasks done structure. This supports | |
77 // CMS GC, where we have threads dynamically | |
78 // claiming sub-tasks from a larger parallel task. | |
79 SequentialSubTasksDone _conc_par_seq_tasks; | |
80 | |
81 BlockOffsetArrayNonContigSpace _bt; | |
82 | |
83 CMSCollector* _collector; | |
84 ConcurrentMarkSweepGeneration* _gen; | |
85 | |
86 // Data structures for free blocks (used during allocation/sweeping) | |
87 | |
88 // Allocation is done linearly from two different blocks depending on | |
89 // whether the request is small or large, in an effort to reduce | |
90 // fragmentation. We assume that any locking for allocation is done | |
91 // by the containing generation. Thus, none of the methods in this | |
92 // space are re-entrant. | |
93 enum SomeConstants { | |
94 SmallForLinearAlloc = 16, // size < this then use _sLAB | |
95 SmallForDictionary = 257, // size < this then use _indexedFreeList | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
96 IndexSetSize = SmallForDictionary // keep this odd-sized |
0 | 97 }; |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
98 static int IndexSetStart; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
99 static int IndexSetStride; |
0 | 100 |
101 private: | |
102 enum FitStrategyOptions { | |
103 FreeBlockStrategyNone = 0, | |
104 FreeBlockBestFitFirst | |
105 }; | |
106 | |
107 PromotionInfo _promoInfo; | |
108 | |
109 // helps to impose a global total order on freelistLock ranks; | |
110 // assumes that CFLSpace's are allocated in global total order | |
111 static int _lockRank; | |
112 | |
113 // a lock protecting the free lists and free blocks; | |
114 // mutable because of ubiquity of locking even for otherwise const methods | |
115 mutable Mutex _freelistLock; | |
116 // locking verifier convenience function | |
117 void assert_locked() const PRODUCT_RETURN; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
118 void assert_locked(const Mutex* lock) const PRODUCT_RETURN; |
0 | 119 |
120 // Linear allocation blocks | |
121 LinearAllocBlock _smallLinearAllocBlock; | |
122 | |
123 FreeBlockDictionary::DictionaryChoice _dictionaryChoice; | |
124 FreeBlockDictionary* _dictionary; // ptr to dictionary for large size blocks | |
125 | |
126 FreeList _indexedFreeList[IndexSetSize]; | |
127 // indexed array for small size blocks | |
128 // allocation stategy | |
129 bool _fitStrategy; // Use best fit strategy. | |
130 bool _adaptive_freelists; // Use adaptive freelists | |
131 | |
132 // This is an address close to the largest free chunk in the heap. | |
133 // It is currently assumed to be at the end of the heap. Free | |
134 // chunks with addresses greater than nearLargestChunk are coalesced | |
135 // in an effort to maintain a large chunk at the end of the heap. | |
136 HeapWord* _nearLargestChunk; | |
137 | |
138 // Used to keep track of limit of sweep for the space | |
139 HeapWord* _sweep_limit; | |
140 | |
141 // Support for compacting cms | |
142 HeapWord* cross_threshold(HeapWord* start, HeapWord* end); | |
143 HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); | |
144 | |
145 // Initialization helpers. | |
146 void initializeIndexedFreeListArray(); | |
147 | |
148 // Extra stuff to manage promotion parallelism. | |
149 | |
150 // a lock protecting the dictionary during par promotion allocation. | |
151 mutable Mutex _parDictionaryAllocLock; | |
152 Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } | |
153 | |
154 // Locks protecting the exact lists during par promotion allocation. | |
155 Mutex* _indexedFreeListParLocks[IndexSetSize]; | |
156 | |
157 // Attempt to obtain up to "n" blocks of the size "word_sz" (which is | |
158 // required to be smaller than "IndexSetSize".) If successful, | |
159 // adds them to "fl", which is required to be an empty free list. | |
160 // If the count of "fl" is negative, it's absolute value indicates a | |
161 // number of free chunks that had been previously "borrowed" from global | |
162 // list of size "word_sz", and must now be decremented. | |
163 void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl); | |
164 | |
165 // Allocation helper functions | |
166 // Allocate using a strategy that takes from the indexed free lists | |
167 // first. This allocation strategy assumes a companion sweeping | |
168 // strategy that attempts to keep the needed number of chunks in each | |
169 // indexed free lists. | |
170 HeapWord* allocate_adaptive_freelists(size_t size); | |
171 // Allocate from the linear allocation buffers first. This allocation | |
172 // strategy assumes maximal coalescing can maintain chunks large enough | |
173 // to be used as linear allocation buffers. | |
174 HeapWord* allocate_non_adaptive_freelists(size_t size); | |
175 | |
176 // Gets a chunk from the linear allocation block (LinAB). If there | |
177 // is not enough space in the LinAB, refills it. | |
178 HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size); | |
179 HeapWord* getChunkFromSmallLinearAllocBlock(size_t size); | |
180 // Get a chunk from the space remaining in the linear allocation block. Do | |
181 // not attempt to refill if the space is not available, return NULL. Do the | |
182 // repairs on the linear allocation block as appropriate. | |
183 HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size); | |
184 inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size); | |
185 | |
186 // Helper function for getChunkFromIndexedFreeList. | |
187 // Replenish the indexed free list for this "size". Do not take from an | |
188 // underpopulated size. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
189 FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true); |
0 | 190 |
191 // Get a chunk from the indexed free list. If the indexed free list | |
192 // does not have a free chunk, try to replenish the indexed free list | |
193 // then get the free chunk from the replenished indexed free list. | |
194 inline FreeChunk* getChunkFromIndexedFreeList(size_t size); | |
195 | |
196 // The returned chunk may be larger than requested (or null). | |
197 FreeChunk* getChunkFromDictionary(size_t size); | |
198 // The returned chunk is the exact size requested (or null). | |
199 FreeChunk* getChunkFromDictionaryExact(size_t size); | |
200 | |
201 // Find a chunk in the indexed free list that is the best | |
202 // fit for size "numWords". | |
203 FreeChunk* bestFitSmall(size_t numWords); | |
204 // For free list "fl" of chunks of size > numWords, | |
205 // remove a chunk, split off a chunk of size numWords | |
206 // and return it. The split off remainder is returned to | |
207 // the free lists. The old name for getFromListGreater | |
208 // was lookInListGreater. | |
209 FreeChunk* getFromListGreater(FreeList* fl, size_t numWords); | |
210 // Get a chunk in the indexed free list or dictionary, | |
211 // by considering a larger chunk and splitting it. | |
212 FreeChunk* getChunkFromGreater(size_t numWords); | |
213 // Verify that the given chunk is in the indexed free lists. | |
214 bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const; | |
215 // Remove the specified chunk from the indexed free lists. | |
216 void removeChunkFromIndexedFreeList(FreeChunk* fc); | |
217 // Remove the specified chunk from the dictionary. | |
218 void removeChunkFromDictionary(FreeChunk* fc); | |
219 // Split a free chunk into a smaller free chunk of size "new_size". | |
220 // Return the smaller free chunk and return the remainder to the | |
221 // free lists. | |
222 FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size); | |
223 // Add a chunk to the free lists. | |
224 void addChunkToFreeLists(HeapWord* chunk, size_t size); | |
225 // Add a chunk to the free lists, preferring to suffix it | |
226 // to the last free chunk at end of space if possible, and | |
227 // updating the block census stats as well as block offset table. | |
228 // Take any locks as appropriate if we are multithreaded. | |
229 void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size); | |
230 // Add a free chunk to the indexed free lists. | |
231 void returnChunkToFreeList(FreeChunk* chunk); | |
232 // Add a free chunk to the dictionary. | |
233 void returnChunkToDictionary(FreeChunk* chunk); | |
234 | |
235 // Functions for maintaining the linear allocation buffers (LinAB). | |
236 // Repairing a linear allocation block refers to operations | |
237 // performed on the remainder of a LinAB after an allocation | |
238 // has been made from it. | |
239 void repairLinearAllocationBlocks(); | |
240 void repairLinearAllocBlock(LinearAllocBlock* blk); | |
241 void refillLinearAllocBlock(LinearAllocBlock* blk); | |
242 void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk); | |
243 void refillLinearAllocBlocksIfNeeded(); | |
244 | |
245 void verify_objects_initialized() const; | |
246 | |
247 // Statistics reporting helper functions | |
248 void reportFreeListStatistics() const; | |
249 void reportIndexedFreeListStatistics() const; | |
250 size_t maxChunkSizeInIndexedFreeLists() const; | |
251 size_t numFreeBlocksInIndexedFreeLists() const; | |
252 // Accessor | |
253 HeapWord* unallocated_block() const { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
254 if (BlockOffsetArrayUseUnallocatedBlock) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
255 HeapWord* ub = _bt.unallocated_block(); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
256 assert(ub >= bottom() && |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
257 ub <= end(), "space invariant"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
258 return ub; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
259 } else { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
260 return end(); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
261 } |
0 | 262 } |
263 void freed(HeapWord* start, size_t size) { | |
264 _bt.freed(start, size); | |
265 } | |
266 | |
267 protected: | |
268 // reset the indexed free list to its initial empty condition. | |
269 void resetIndexedFreeListArray(); | |
270 // reset to an initial state with a single free block described | |
271 // by the MemRegion parameter. | |
272 void reset(MemRegion mr); | |
273 // Return the total number of words in the indexed free lists. | |
274 size_t totalSizeInIndexedFreeLists() const; | |
275 | |
276 public: | |
277 // Constructor... | |
278 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr, | |
279 bool use_adaptive_freelists, | |
280 FreeBlockDictionary::DictionaryChoice); | |
281 // accessors | |
282 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } | |
283 FreeBlockDictionary* dictionary() const { return _dictionary; } | |
284 HeapWord* nearLargestChunk() const { return _nearLargestChunk; } | |
285 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } | |
286 | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
287 // Set CMS global values |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
288 static void set_cms_values(); |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
289 |
0 | 290 // Return the free chunk at the end of the space. If no such |
291 // chunk exists, return NULL. | |
292 FreeChunk* find_chunk_at_end(); | |
293 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
294 bool adaptive_freelists() const { return _adaptive_freelists; } |
0 | 295 |
296 void set_collector(CMSCollector* collector) { _collector = collector; } | |
297 | |
298 // Support for parallelization of rescan and marking | |
299 const size_t rescan_task_size() const { return _rescan_task_size; } | |
300 const size_t marking_task_size() const { return _marking_task_size; } | |
301 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } | |
302 void initialize_sequential_subtasks_for_rescan(int n_threads); | |
303 void initialize_sequential_subtasks_for_marking(int n_threads, | |
304 HeapWord* low = NULL); | |
305 | |
306 // Space enquiries | |
307 size_t used() const; | |
308 size_t free() const; | |
309 size_t max_alloc_in_words() const; | |
310 // XXX: should have a less conservative used_region() than that of | |
311 // Space; we could consider keeping track of highest allocated | |
312 // address and correcting that at each sweep, as the sweeper | |
313 // goes through the entire allocated part of the generation. We | |
314 // could also use that information to keep the sweeper from | |
315 // sweeping more than is necessary. The allocator and sweeper will | |
316 // of course need to synchronize on this, since the sweeper will | |
317 // try to bump down the address and the allocator will try to bump it up. | |
318 // For now, however, we'll just use the default used_region() | |
319 // which overestimates the region by returning the entire | |
320 // committed region (this is safe, but inefficient). | |
321 | |
322 // Returns a subregion of the space containing all the objects in | |
323 // the space. | |
324 MemRegion used_region() const { | |
325 return MemRegion(bottom(), | |
326 BlockOffsetArrayUseUnallocatedBlock ? | |
327 unallocated_block() : end()); | |
328 } | |
329 | |
330 // This is needed because the default implementation uses block_start() | |
331 // which can;t be used at certain times (for example phase 3 of mark-sweep). | |
332 // A better fix is to change the assertions in phase 3 of mark-sweep to | |
333 // use is_in_reserved(), but that is deferred since the is_in() assertions | |
334 // are buried through several layers of callers and are used elsewhere | |
335 // as well. | |
336 bool is_in(const void* p) const { | |
337 return used_region().contains(p); | |
338 } | |
339 | |
340 virtual bool is_free_block(const HeapWord* p) const; | |
341 | |
342 // Resizing support | |
343 void set_end(HeapWord* value); // override | |
344 | |
345 // mutual exclusion support | |
346 Mutex* freelistLock() const { return &_freelistLock; } | |
347 | |
348 // Iteration support | |
349 void oop_iterate(MemRegion mr, OopClosure* cl); | |
350 void oop_iterate(OopClosure* cl); | |
351 | |
352 void object_iterate(ObjectClosure* blk); | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
353 // Apply the closure to each object in the space whose references |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
354 // point to objects in the heap. The usage of CompactibleFreeListSpace |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
355 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
356 // objects in the space with references to objects that are no longer |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
357 // valid. For example, an object may reference another object |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
358 // that has already been sweep up (collected). This method uses |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
359 // obj_is_alive() to determine whether it is safe to iterate of |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
360 // an object. |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
361 void safe_object_iterate(ObjectClosure* blk); |
0 | 362 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
363 | |
364 // Requires that "mr" be entirely within the space. | |
365 // Apply "cl->do_object" to all objects that intersect with "mr". | |
366 // If the iteration encounters an unparseable portion of the region, | |
367 // terminate the iteration and return the address of the start of the | |
368 // subregion that isn't done. Return of "NULL" indicates that the | |
369 // interation completed. | |
370 virtual HeapWord* | |
371 object_iterate_careful_m(MemRegion mr, | |
372 ObjectClosureCareful* cl); | |
373 virtual HeapWord* | |
374 object_iterate_careful(ObjectClosureCareful* cl); | |
375 | |
376 // Override: provides a DCTO_CL specific to this kind of space. | |
377 DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, | |
378 CardTableModRefBS::PrecisionStyle precision, | |
379 HeapWord* boundary); | |
380 | |
381 void blk_iterate(BlkClosure* cl); | |
382 void blk_iterate_careful(BlkClosureCareful* cl); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
383 HeapWord* block_start_const(const void* p) const; |
0 | 384 HeapWord* block_start_careful(const void* p) const; |
385 size_t block_size(const HeapWord* p) const; | |
386 size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; | |
387 bool block_is_obj(const HeapWord* p) const; | |
388 bool obj_is_alive(const HeapWord* p) const; | |
389 size_t block_size_nopar(const HeapWord* p) const; | |
390 bool block_is_obj_nopar(const HeapWord* p) const; | |
391 | |
392 // iteration support for promotion | |
393 void save_marks(); | |
394 bool no_allocs_since_save_marks(); | |
395 void object_iterate_since_last_GC(ObjectClosure* cl); | |
396 | |
397 // iteration support for sweeping | |
398 void save_sweep_limit() { | |
399 _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? | |
400 unallocated_block() : end(); | |
401 } | |
402 NOT_PRODUCT( | |
403 void clear_sweep_limit() { _sweep_limit = NULL; } | |
404 ) | |
405 HeapWord* sweep_limit() { return _sweep_limit; } | |
406 | |
407 // Apply "blk->do_oop" to the addresses of all reference fields in objects | |
408 // promoted into this generation since the most recent save_marks() call. | |
409 // Fields in objects allocated by applications of the closure | |
410 // *are* included in the iteration. Thus, when the iteration completes | |
411 // there should be no further such objects remaining. | |
412 #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ | |
413 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); | |
414 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL) | |
415 #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL | |
416 | |
417 // Allocation support | |
418 HeapWord* allocate(size_t size); | |
419 HeapWord* par_allocate(size_t size); | |
420 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
421 oop promote(oop obj, size_t obj_size); |
0 | 422 void gc_prologue(); |
423 void gc_epilogue(); | |
424 | |
425 // This call is used by a containing CMS generation / collector | |
426 // to inform the CFLS space that a sweep has been completed | |
427 // and that the space can do any related house-keeping functions. | |
428 void sweep_completed(); | |
429 | |
430 // For an object in this space, the mark-word's two | |
431 // LSB's having the value [11] indicates that it has been | |
432 // promoted since the most recent call to save_marks() on | |
433 // this generation and has not subsequently been iterated | |
434 // over (using oop_since_save_marks_iterate() above). | |
1521 | 435 // This property holds only for single-threaded collections, |
436 // and is typically used for Cheney scans; for MT scavenges, | |
437 // the property holds for all objects promoted during that | |
438 // scavenge for the duration of the scavenge and is used | |
439 // by card-scanning to avoid scanning objects (being) promoted | |
440 // during that scavenge. | |
0 | 441 bool obj_allocated_since_save_marks(const oop obj) const { |
442 assert(is_in_reserved(obj), "Wrong space?"); | |
443 return ((PromotedObject*)obj)->hasPromotedMark(); | |
444 } | |
445 | |
446 // A worst-case estimate of the space required (in HeapWords) to expand the | |
447 // heap when promoting an obj of size obj_size. | |
448 size_t expansionSpaceRequired(size_t obj_size) const; | |
449 | |
450 FreeChunk* allocateScratch(size_t size); | |
451 | |
452 // returns true if either the small or large linear allocation buffer is empty. | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
453 bool linearAllocationWouldFail() const; |
0 | 454 |
455 // Adjust the chunk for the minimum size. This version is called in | |
456 // most cases in CompactibleFreeListSpace methods. | |
457 inline static size_t adjustObjectSize(size_t size) { | |
458 return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize)); | |
459 } | |
460 // This is a virtual version of adjustObjectSize() that is called | |
461 // only occasionally when the compaction space changes and the type | |
462 // of the new compaction space is is only known to be CompactibleSpace. | |
463 size_t adjust_object_size_v(size_t size) const { | |
464 return adjustObjectSize(size); | |
465 } | |
466 // Minimum size of a free block. | |
467 virtual size_t minimum_free_block_size() const { return MinChunkSize; } | |
468 void removeFreeChunkFromFreeLists(FreeChunk* chunk); | |
469 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, | |
470 bool coalesced); | |
471 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
472 // Support for decisions regarding concurrent collection policy |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
473 bool should_concurrent_collect() const; |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
474 |
0 | 475 // Support for compaction |
476 void prepare_for_compaction(CompactPoint* cp); | |
477 void adjust_pointers(); | |
478 void compact(); | |
479 // reset the space to reflect the fact that a compaction of the | |
480 // space has been done. | |
481 virtual void reset_after_compaction(); | |
482 | |
483 // Debugging support | |
484 void print() const; | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
485 void print_on(outputStream* st) const; |
0 | 486 void prepare_for_verify(); |
487 void verify(bool allow_dirty) const; | |
488 void verifyFreeLists() const PRODUCT_RETURN; | |
489 void verifyIndexedFreeLists() const; | |
490 void verifyIndexedFreeList(size_t size) const; | |
491 // verify that the given chunk is in the free lists. | |
492 bool verifyChunkInFreeLists(FreeChunk* fc) const; | |
493 // Do some basic checks on the the free lists. | |
494 void checkFreeListConsistency() const PRODUCT_RETURN; | |
495 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
496 // Printing support |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
497 void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
498 void print_indexed_free_lists(outputStream* st) const; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
499 void print_dictionary_free_lists(outputStream* st) const; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
500 void print_promo_info_blocks(outputStream* st) const; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
501 |
0 | 502 NOT_PRODUCT ( |
503 void initializeIndexedFreeListArrayReturnedBytes(); | |
504 size_t sumIndexedFreeListArrayReturnedBytes(); | |
505 // Return the total number of chunks in the indexed free lists. | |
506 size_t totalCountInIndexedFreeLists() const; | |
507 // Return the total numberof chunks in the space. | |
508 size_t totalCount(); | |
509 ) | |
510 | |
511 // The census consists of counts of the quantities such as | |
512 // the current count of the free chunks, number of chunks | |
513 // created as a result of the split of a larger chunk or | |
514 // coalescing of smaller chucks, etc. The counts in the | |
515 // census is used to make decisions on splitting and | |
516 // coalescing of chunks during the sweep of garbage. | |
517 | |
518 // Print the statistics for the free lists. | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
519 void printFLCensus(size_t sweep_count) const; |
0 | 520 |
521 // Statistics functions | |
522 // Initialize census for lists before the sweep. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
523 void beginSweepFLCensus(float inter_sweep_current, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
524 float inter_sweep_estimate, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
525 float intra_sweep_estimate); |
0 | 526 // Set the surplus for each of the free lists. |
527 void setFLSurplus(); | |
528 // Set the hint for each of the free lists. | |
529 void setFLHints(); | |
530 // Clear the census for each of the free lists. | |
531 void clearFLCensus(); | |
532 // Perform functions for the census after the end of the sweep. | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
533 void endSweepFLCensus(size_t sweep_count); |
0 | 534 // Return true if the count of free chunks is greater |
535 // than the desired number of free chunks. | |
536 bool coalOverPopulated(size_t size); | |
537 | |
538 // Record (for each size): | |
539 // | |
540 // split-births = #chunks added due to splits in (prev-sweep-end, | |
541 // this-sweep-start) | |
542 // split-deaths = #chunks removed for splits in (prev-sweep-end, | |
543 // this-sweep-start) | |
544 // num-curr = #chunks at start of this sweep | |
545 // num-prev = #chunks at end of previous sweep | |
546 // | |
547 // The above are quantities that are measured. Now define: | |
548 // | |
549 // num-desired := num-prev + split-births - split-deaths - num-curr | |
550 // | |
551 // Roughly, num-prev + split-births is the supply, | |
552 // split-deaths is demand due to other sizes | |
553 // and num-curr is what we have left. | |
554 // | |
555 // Thus, num-desired is roughly speaking the "legitimate demand" | |
556 // for blocks of this size and what we are striving to reach at the | |
557 // end of the current sweep. | |
558 // | |
559 // For a given list, let num-len be its current population. | |
560 // Define, for a free list of a given size: | |
561 // | |
562 // coal-overpopulated := num-len >= num-desired * coal-surplus | |
563 // (coal-surplus is set to 1.05, i.e. we allow a little slop when | |
564 // coalescing -- we do not coalesce unless we think that the current | |
565 // supply has exceeded the estimated demand by more than 5%). | |
566 // | |
567 // For the set of sizes in the binary tree, which is neither dense nor | |
568 // closed, it may be the case that for a particular size we have never | |
569 // had, or do not now have, or did not have at the previous sweep, | |
570 // chunks of that size. We need to extend the definition of | |
571 // coal-overpopulated to such sizes as well: | |
572 // | |
573 // For a chunk in/not in the binary tree, extend coal-overpopulated | |
574 // defined above to include all sizes as follows: | |
575 // | |
576 // . a size that is non-existent is coal-overpopulated | |
577 // . a size that has a num-desired <= 0 as defined above is | |
578 // coal-overpopulated. | |
579 // | |
580 // Also define, for a chunk heap-offset C and mountain heap-offset M: | |
581 // | |
582 // close-to-mountain := C >= 0.99 * M | |
583 // | |
584 // Now, the coalescing strategy is: | |
585 // | |
586 // Coalesce left-hand chunk with right-hand chunk if and | |
587 // only if: | |
588 // | |
589 // EITHER | |
590 // . left-hand chunk is of a size that is coal-overpopulated | |
591 // OR | |
592 // . right-hand chunk is close-to-mountain | |
593 void smallCoalBirth(size_t size); | |
594 void smallCoalDeath(size_t size); | |
595 void coalBirth(size_t size); | |
596 void coalDeath(size_t size); | |
597 void smallSplitBirth(size_t size); | |
598 void smallSplitDeath(size_t size); | |
599 void splitBirth(size_t size); | |
600 void splitDeath(size_t size); | |
601 void split(size_t from, size_t to1); | |
602 | |
603 double flsFrag() const; | |
604 }; | |
605 | |
606 // A parallel-GC-thread-local allocation buffer for allocation into a | |
607 // CompactibleFreeListSpace. | |
608 class CFLS_LAB : public CHeapObj { | |
609 // The space that this buffer allocates into. | |
610 CompactibleFreeListSpace* _cfls; | |
611 | |
612 // Our local free lists. | |
613 FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; | |
614 | |
615 // Initialized from a command-line arg. | |
616 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
617 // Allocation statistics in support of dynamic adjustment of |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
618 // #blocks to claim per get_from_global_pool() call below. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
619 static AdaptiveWeightedAverage |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
620 _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize]; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
621 static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize]; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
622 static int _global_num_workers[CompactibleFreeListSpace::IndexSetSize]; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
623 size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize]; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
624 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
625 // Internal work method |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
626 void get_from_global_pool(size_t word_sz, FreeList* fl); |
0 | 627 |
628 public: | |
629 CFLS_LAB(CompactibleFreeListSpace* cfls); | |
630 | |
631 // Allocate and return a block of the given size, or else return NULL. | |
632 HeapWord* alloc(size_t word_sz); | |
633 | |
634 // Return any unused portions of the buffer to the global pool. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
635 void retire(int tid); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
636 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
637 // Dynamic OldPLABSize sizing |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
638 static void compute_desired_plab_size(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
639 // When the settings are modified from default static initialization |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
640 static void modify_initialization(size_t n, unsigned wt); |
0 | 641 }; |
642 | |
643 size_t PromotionInfo::refillSize() const { | |
644 const size_t CMSSpoolBlockSize = 256; | |
645 const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop) | |
646 * CMSSpoolBlockSize); | |
647 return CompactibleFreeListSpace::adjustObjectSize(sz); | |
648 } |