Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp @ 6862:8a5ea0a9ccc4
7127708: G1: change task num types from int to uint in concurrent mark
Summary: Change the type of various task num fields, parameters etc to unsigned and rename them to be more consistent with the other collectors. Code changes were also reviewed by Vitaly Davidovich.
Reviewed-by: johnc
Contributed-by: Kaushik Srenevasan <kaushik@twitter.com>
author | johnc |
---|---|
date | Sat, 06 Oct 2012 01:17:44 -0700 |
parents | da91efe96a93 |
children | 685df3c6f84b |
rev | line source |
---|---|
0 | 1 /* |
6008 | 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP |
26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP | |
27 | |
28 #include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp" | |
6026 | 29 #include "memory/binaryTreeDictionary.hpp" |
1972 | 30 #include "memory/blockOffsetTable.inline.hpp" |
6026 | 31 #include "memory/freeList.hpp" |
1972 | 32 #include "memory/space.hpp" |
33 | |
0 | 34 // Classes in support of keeping track of promotions into a non-Contiguous |
35 // space, in this case a CompactibleFreeListSpace. | |
36 | |
37 // Forward declarations | |
38 class CompactibleFreeListSpace; | |
39 class BlkClosure; | |
40 class BlkClosureCareful; | |
41 class UpwardsObjectClosure; | |
42 class ObjectClosureCareful; | |
43 class Klass; | |
44 | |
45 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC { | |
46 public: | |
47 LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), | |
48 _allocation_size_limit(0) {} | |
49 void set(HeapWord* ptr, size_t word_size, size_t refill_size, | |
50 size_t allocation_size_limit) { | |
51 _ptr = ptr; | |
52 _word_size = word_size; | |
53 _refillSize = refill_size; | |
54 _allocation_size_limit = allocation_size_limit; | |
55 } | |
56 HeapWord* _ptr; | |
57 size_t _word_size; | |
58 size_t _refillSize; | |
59 size_t _allocation_size_limit; // largest size that will be allocated | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
60 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
61 void print_on(outputStream* st) const; |
0 | 62 }; |
63 | |
64 // Concrete subclass of CompactibleSpace that implements | |
65 // a free list space, such as used in the concurrent mark sweep | |
66 // generation. | |
67 | |
68 class CompactibleFreeListSpace: public CompactibleSpace { | |
69 friend class VMStructs; | |
70 friend class ConcurrentMarkSweepGeneration; | |
71 friend class ASConcurrentMarkSweepGeneration; | |
72 friend class CMSCollector; | |
73 // Local alloc buffer for promotion into this space. | |
74 friend class CFLS_LAB; | |
75 | |
76 // "Size" of chunks of work (executed during parallel remark phases | |
77 // of CMS collection); this probably belongs in CMSCollector, although | |
78 // it's cached here because it's used in | |
79 // initialize_sequential_subtasks_for_rescan() which modifies | |
80 // par_seq_tasks which also lives in Space. XXX | |
81 const size_t _rescan_task_size; | |
82 const size_t _marking_task_size; | |
83 | |
84 // Yet another sequential tasks done structure. This supports | |
85 // CMS GC, where we have threads dynamically | |
86 // claiming sub-tasks from a larger parallel task. | |
87 SequentialSubTasksDone _conc_par_seq_tasks; | |
88 | |
89 BlockOffsetArrayNonContigSpace _bt; | |
90 | |
91 CMSCollector* _collector; | |
92 ConcurrentMarkSweepGeneration* _gen; | |
93 | |
94 // Data structures for free blocks (used during allocation/sweeping) | |
95 | |
96 // Allocation is done linearly from two different blocks depending on | |
97 // whether the request is small or large, in an effort to reduce | |
98 // fragmentation. We assume that any locking for allocation is done | |
99 // by the containing generation. Thus, none of the methods in this | |
100 // space are re-entrant. | |
101 enum SomeConstants { | |
102 SmallForLinearAlloc = 16, // size < this then use _sLAB | |
103 SmallForDictionary = 257, // size < this then use _indexedFreeList | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
104 IndexSetSize = SmallForDictionary // keep this odd-sized |
0 | 105 }; |
4068
5a5ed80bea5b
7105163: CMS: some mentions of MinChunkSize should be IndexSetStart
ysr
parents:
4024
diff
changeset
|
106 static size_t IndexSetStart; |
5a5ed80bea5b
7105163: CMS: some mentions of MinChunkSize should be IndexSetStart
ysr
parents:
4024
diff
changeset
|
107 static size_t IndexSetStride; |
0 | 108 |
109 private: | |
110 enum FitStrategyOptions { | |
111 FreeBlockStrategyNone = 0, | |
112 FreeBlockBestFitFirst | |
113 }; | |
114 | |
115 PromotionInfo _promoInfo; | |
116 | |
117 // helps to impose a global total order on freelistLock ranks; | |
118 // assumes that CFLSpace's are allocated in global total order | |
119 static int _lockRank; | |
120 | |
121 // a lock protecting the free lists and free blocks; | |
122 // mutable because of ubiquity of locking even for otherwise const methods | |
123 mutable Mutex _freelistLock; | |
124 // locking verifier convenience function | |
125 void assert_locked() const PRODUCT_RETURN; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
126 void assert_locked(const Mutex* lock) const PRODUCT_RETURN; |
0 | 127 |
128 // Linear allocation blocks | |
129 LinearAllocBlock _smallLinearAllocBlock; | |
130 | |
6026 | 131 FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice; |
132 FreeBlockDictionary<FreeChunk>* _dictionary; // ptr to dictionary for large size blocks | |
0 | 133 |
6026 | 134 FreeList<FreeChunk> _indexedFreeList[IndexSetSize]; |
0 | 135 // indexed array for small size blocks |
136 // allocation stategy | |
137 bool _fitStrategy; // Use best fit strategy. | |
138 bool _adaptive_freelists; // Use adaptive freelists | |
139 | |
140 // This is an address close to the largest free chunk in the heap. | |
141 // It is currently assumed to be at the end of the heap. Free | |
142 // chunks with addresses greater than nearLargestChunk are coalesced | |
143 // in an effort to maintain a large chunk at the end of the heap. | |
144 HeapWord* _nearLargestChunk; | |
145 | |
146 // Used to keep track of limit of sweep for the space | |
147 HeapWord* _sweep_limit; | |
148 | |
149 // Support for compacting cms | |
150 HeapWord* cross_threshold(HeapWord* start, HeapWord* end); | |
151 HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); | |
152 | |
153 // Initialization helpers. | |
154 void initializeIndexedFreeListArray(); | |
155 | |
156 // Extra stuff to manage promotion parallelism. | |
157 | |
158 // a lock protecting the dictionary during par promotion allocation. | |
159 mutable Mutex _parDictionaryAllocLock; | |
160 Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } | |
161 | |
162 // Locks protecting the exact lists during par promotion allocation. | |
163 Mutex* _indexedFreeListParLocks[IndexSetSize]; | |
164 | |
165 // Attempt to obtain up to "n" blocks of the size "word_sz" (which is | |
166 // required to be smaller than "IndexSetSize".) If successful, | |
167 // adds them to "fl", which is required to be an empty free list. | |
168 // If the count of "fl" is negative, it's absolute value indicates a | |
169 // number of free chunks that had been previously "borrowed" from global | |
170 // list of size "word_sz", and must now be decremented. | |
6026 | 171 void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl); |
0 | 172 |
173 // Allocation helper functions | |
174 // Allocate using a strategy that takes from the indexed free lists | |
175 // first. This allocation strategy assumes a companion sweeping | |
176 // strategy that attempts to keep the needed number of chunks in each | |
177 // indexed free lists. | |
178 HeapWord* allocate_adaptive_freelists(size_t size); | |
179 // Allocate from the linear allocation buffers first. This allocation | |
180 // strategy assumes maximal coalescing can maintain chunks large enough | |
181 // to be used as linear allocation buffers. | |
182 HeapWord* allocate_non_adaptive_freelists(size_t size); | |
183 | |
184 // Gets a chunk from the linear allocation block (LinAB). If there | |
185 // is not enough space in the LinAB, refills it. | |
186 HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size); | |
187 HeapWord* getChunkFromSmallLinearAllocBlock(size_t size); | |
188 // Get a chunk from the space remaining in the linear allocation block. Do | |
189 // not attempt to refill if the space is not available, return NULL. Do the | |
190 // repairs on the linear allocation block as appropriate. | |
191 HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size); | |
192 inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size); | |
193 | |
194 // Helper function for getChunkFromIndexedFreeList. | |
195 // Replenish the indexed free list for this "size". Do not take from an | |
196 // underpopulated size. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
197 FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true); |
0 | 198 |
199 // Get a chunk from the indexed free list. If the indexed free list | |
200 // does not have a free chunk, try to replenish the indexed free list | |
201 // then get the free chunk from the replenished indexed free list. | |
202 inline FreeChunk* getChunkFromIndexedFreeList(size_t size); | |
203 | |
204 // The returned chunk may be larger than requested (or null). | |
205 FreeChunk* getChunkFromDictionary(size_t size); | |
206 // The returned chunk is the exact size requested (or null). | |
207 FreeChunk* getChunkFromDictionaryExact(size_t size); | |
208 | |
209 // Find a chunk in the indexed free list that is the best | |
210 // fit for size "numWords". | |
211 FreeChunk* bestFitSmall(size_t numWords); | |
212 // For free list "fl" of chunks of size > numWords, | |
213 // remove a chunk, split off a chunk of size numWords | |
214 // and return it. The split off remainder is returned to | |
215 // the free lists. The old name for getFromListGreater | |
216 // was lookInListGreater. | |
6026 | 217 FreeChunk* getFromListGreater(FreeList<FreeChunk>* fl, size_t numWords); |
0 | 218 // Get a chunk in the indexed free list or dictionary, |
219 // by considering a larger chunk and splitting it. | |
220 FreeChunk* getChunkFromGreater(size_t numWords); | |
221 // Verify that the given chunk is in the indexed free lists. | |
222 bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const; | |
223 // Remove the specified chunk from the indexed free lists. | |
224 void removeChunkFromIndexedFreeList(FreeChunk* fc); | |
225 // Remove the specified chunk from the dictionary. | |
226 void removeChunkFromDictionary(FreeChunk* fc); | |
227 // Split a free chunk into a smaller free chunk of size "new_size". | |
228 // Return the smaller free chunk and return the remainder to the | |
229 // free lists. | |
230 FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size); | |
231 // Add a chunk to the free lists. | |
232 void addChunkToFreeLists(HeapWord* chunk, size_t size); | |
233 // Add a chunk to the free lists, preferring to suffix it | |
234 // to the last free chunk at end of space if possible, and | |
235 // updating the block census stats as well as block offset table. | |
236 // Take any locks as appropriate if we are multithreaded. | |
237 void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size); | |
238 // Add a free chunk to the indexed free lists. | |
239 void returnChunkToFreeList(FreeChunk* chunk); | |
240 // Add a free chunk to the dictionary. | |
241 void returnChunkToDictionary(FreeChunk* chunk); | |
242 | |
243 // Functions for maintaining the linear allocation buffers (LinAB). | |
244 // Repairing a linear allocation block refers to operations | |
245 // performed on the remainder of a LinAB after an allocation | |
246 // has been made from it. | |
247 void repairLinearAllocationBlocks(); | |
248 void repairLinearAllocBlock(LinearAllocBlock* blk); | |
249 void refillLinearAllocBlock(LinearAllocBlock* blk); | |
250 void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk); | |
251 void refillLinearAllocBlocksIfNeeded(); | |
252 | |
253 void verify_objects_initialized() const; | |
254 | |
255 // Statistics reporting helper functions | |
256 void reportFreeListStatistics() const; | |
257 void reportIndexedFreeListStatistics() const; | |
258 size_t maxChunkSizeInIndexedFreeLists() const; | |
259 size_t numFreeBlocksInIndexedFreeLists() const; | |
260 // Accessor | |
261 HeapWord* unallocated_block() const { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
262 if (BlockOffsetArrayUseUnallocatedBlock) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
263 HeapWord* ub = _bt.unallocated_block(); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
264 assert(ub >= bottom() && |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
265 ub <= end(), "space invariant"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
266 return ub; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
267 } else { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
268 return end(); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
269 } |
0 | 270 } |
271 void freed(HeapWord* start, size_t size) { | |
272 _bt.freed(start, size); | |
273 } | |
274 | |
275 protected: | |
276 // reset the indexed free list to its initial empty condition. | |
277 void resetIndexedFreeListArray(); | |
278 // reset to an initial state with a single free block described | |
279 // by the MemRegion parameter. | |
280 void reset(MemRegion mr); | |
281 // Return the total number of words in the indexed free lists. | |
282 size_t totalSizeInIndexedFreeLists() const; | |
283 | |
284 public: | |
285 // Constructor... | |
286 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr, | |
287 bool use_adaptive_freelists, | |
6026 | 288 FreeBlockDictionary<FreeChunk>::DictionaryChoice); |
0 | 289 // accessors |
290 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } | |
6026 | 291 FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; } |
0 | 292 HeapWord* nearLargestChunk() const { return _nearLargestChunk; } |
293 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } | |
294 | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
295 // Set CMS global values |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
296 static void set_cms_values(); |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
297 |
0 | 298 // Return the free chunk at the end of the space. If no such |
299 // chunk exists, return NULL. | |
300 FreeChunk* find_chunk_at_end(); | |
301 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
302 bool adaptive_freelists() const { return _adaptive_freelists; } |
0 | 303 |
304 void set_collector(CMSCollector* collector) { _collector = collector; } | |
305 | |
306 // Support for parallelization of rescan and marking | |
307 const size_t rescan_task_size() const { return _rescan_task_size; } | |
308 const size_t marking_task_size() const { return _marking_task_size; } | |
309 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } | |
310 void initialize_sequential_subtasks_for_rescan(int n_threads); | |
311 void initialize_sequential_subtasks_for_marking(int n_threads, | |
312 HeapWord* low = NULL); | |
313 | |
314 // Space enquiries | |
315 size_t used() const; | |
316 size_t free() const; | |
317 size_t max_alloc_in_words() const; | |
318 // XXX: should have a less conservative used_region() than that of | |
319 // Space; we could consider keeping track of highest allocated | |
320 // address and correcting that at each sweep, as the sweeper | |
321 // goes through the entire allocated part of the generation. We | |
322 // could also use that information to keep the sweeper from | |
323 // sweeping more than is necessary. The allocator and sweeper will | |
324 // of course need to synchronize on this, since the sweeper will | |
325 // try to bump down the address and the allocator will try to bump it up. | |
326 // For now, however, we'll just use the default used_region() | |
327 // which overestimates the region by returning the entire | |
328 // committed region (this is safe, but inefficient). | |
329 | |
330 // Returns a subregion of the space containing all the objects in | |
331 // the space. | |
332 MemRegion used_region() const { | |
333 return MemRegion(bottom(), | |
334 BlockOffsetArrayUseUnallocatedBlock ? | |
335 unallocated_block() : end()); | |
336 } | |
337 | |
338 bool is_in(const void* p) const { | |
339 return used_region().contains(p); | |
340 } | |
341 | |
342 virtual bool is_free_block(const HeapWord* p) const; | |
343 | |
344 // Resizing support | |
345 void set_end(HeapWord* value); // override | |
346 | |
347 // mutual exclusion support | |
348 Mutex* freelistLock() const { return &_freelistLock; } | |
349 | |
350 // Iteration support | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
351 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
352 void oop_iterate(ExtendedOopClosure* cl); |
0 | 353 |
354 void object_iterate(ObjectClosure* blk); | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
355 // Apply the closure to each object in the space whose references |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
356 // point to objects in the heap. The usage of CompactibleFreeListSpace |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
357 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
358 // objects in the space with references to objects that are no longer |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
359 // valid. For example, an object may reference another object |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
360 // that has already been sweep up (collected). This method uses |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
361 // obj_is_alive() to determine whether it is safe to iterate of |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
362 // an object. |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
363 void safe_object_iterate(ObjectClosure* blk); |
0 | 364 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
365 | |
366 // Requires that "mr" be entirely within the space. | |
367 // Apply "cl->do_object" to all objects that intersect with "mr". | |
368 // If the iteration encounters an unparseable portion of the region, | |
369 // terminate the iteration and return the address of the start of the | |
370 // subregion that isn't done. Return of "NULL" indicates that the | |
371 // interation completed. | |
372 virtual HeapWord* | |
373 object_iterate_careful_m(MemRegion mr, | |
374 ObjectClosureCareful* cl); | |
375 virtual HeapWord* | |
376 object_iterate_careful(ObjectClosureCareful* cl); | |
377 | |
378 // Override: provides a DCTO_CL specific to this kind of space. | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
379 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, |
0 | 380 CardTableModRefBS::PrecisionStyle precision, |
381 HeapWord* boundary); | |
382 | |
383 void blk_iterate(BlkClosure* cl); | |
384 void blk_iterate_careful(BlkClosureCareful* cl); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
385 HeapWord* block_start_const(const void* p) const; |
0 | 386 HeapWord* block_start_careful(const void* p) const; |
387 size_t block_size(const HeapWord* p) const; | |
388 size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; | |
389 bool block_is_obj(const HeapWord* p) const; | |
390 bool obj_is_alive(const HeapWord* p) const; | |
391 size_t block_size_nopar(const HeapWord* p) const; | |
392 bool block_is_obj_nopar(const HeapWord* p) const; | |
393 | |
394 // iteration support for promotion | |
395 void save_marks(); | |
396 bool no_allocs_since_save_marks(); | |
397 void object_iterate_since_last_GC(ObjectClosure* cl); | |
398 | |
399 // iteration support for sweeping | |
400 void save_sweep_limit() { | |
401 _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? | |
402 unallocated_block() : end(); | |
3746
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
1972
diff
changeset
|
403 if (CMSTraceSweeper) { |
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
1972
diff
changeset
|
404 gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT |
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
1972
diff
changeset
|
405 " for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<", |
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
1972
diff
changeset
|
406 _sweep_limit, bottom(), end()); |
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
1972
diff
changeset
|
407 } |
0 | 408 } |
409 NOT_PRODUCT( | |
410 void clear_sweep_limit() { _sweep_limit = NULL; } | |
411 ) | |
412 HeapWord* sweep_limit() { return _sweep_limit; } | |
413 | |
414 // Apply "blk->do_oop" to the addresses of all reference fields in objects | |
415 // promoted into this generation since the most recent save_marks() call. | |
416 // Fields in objects allocated by applications of the closure | |
417 // *are* included in the iteration. Thus, when the iteration completes | |
418 // there should be no further such objects remaining. | |
419 #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ | |
420 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); | |
421 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL) | |
422 #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL | |
423 | |
424 // Allocation support | |
425 HeapWord* allocate(size_t size); | |
426 HeapWord* par_allocate(size_t size); | |
427 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
428 oop promote(oop obj, size_t obj_size); |
0 | 429 void gc_prologue(); |
430 void gc_epilogue(); | |
431 | |
432 // This call is used by a containing CMS generation / collector | |
433 // to inform the CFLS space that a sweep has been completed | |
434 // and that the space can do any related house-keeping functions. | |
435 void sweep_completed(); | |
436 | |
437 // For an object in this space, the mark-word's two | |
438 // LSB's having the value [11] indicates that it has been | |
439 // promoted since the most recent call to save_marks() on | |
440 // this generation and has not subsequently been iterated | |
441 // over (using oop_since_save_marks_iterate() above). | |
1521 | 442 // This property holds only for single-threaded collections, |
443 // and is typically used for Cheney scans; for MT scavenges, | |
444 // the property holds for all objects promoted during that | |
445 // scavenge for the duration of the scavenge and is used | |
446 // by card-scanning to avoid scanning objects (being) promoted | |
447 // during that scavenge. | |
0 | 448 bool obj_allocated_since_save_marks(const oop obj) const { |
449 assert(is_in_reserved(obj), "Wrong space?"); | |
450 return ((PromotedObject*)obj)->hasPromotedMark(); | |
451 } | |
452 | |
453 // A worst-case estimate of the space required (in HeapWords) to expand the | |
454 // heap when promoting an obj of size obj_size. | |
455 size_t expansionSpaceRequired(size_t obj_size) const; | |
456 | |
457 FreeChunk* allocateScratch(size_t size); | |
458 | |
459 // returns true if either the small or large linear allocation buffer is empty. | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
460 bool linearAllocationWouldFail() const; |
0 | 461 |
462 // Adjust the chunk for the minimum size. This version is called in | |
463 // most cases in CompactibleFreeListSpace methods. | |
464 inline static size_t adjustObjectSize(size_t size) { | |
465 return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize)); | |
466 } | |
467 // This is a virtual version of adjustObjectSize() that is called | |
468 // only occasionally when the compaction space changes and the type | |
469 // of the new compaction space is is only known to be CompactibleSpace. | |
470 size_t adjust_object_size_v(size_t size) const { | |
471 return adjustObjectSize(size); | |
472 } | |
473 // Minimum size of a free block. | |
474 virtual size_t minimum_free_block_size() const { return MinChunkSize; } | |
475 void removeFreeChunkFromFreeLists(FreeChunk* chunk); | |
476 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, | |
477 bool coalesced); | |
478 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
479 // Support for decisions regarding concurrent collection policy |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
480 bool should_concurrent_collect() const; |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
481 |
0 | 482 // Support for compaction |
483 void prepare_for_compaction(CompactPoint* cp); | |
484 void adjust_pointers(); | |
485 void compact(); | |
486 // reset the space to reflect the fact that a compaction of the | |
487 // space has been done. | |
488 virtual void reset_after_compaction(); | |
489 | |
490 // Debugging support | |
491 void print() const; | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
492 void print_on(outputStream* st) const; |
0 | 493 void prepare_for_verify(); |
6008 | 494 void verify() const; |
0 | 495 void verifyFreeLists() const PRODUCT_RETURN; |
496 void verifyIndexedFreeLists() const; | |
497 void verifyIndexedFreeList(size_t size) const; | |
4024
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3746
diff
changeset
|
498 // Verify that the given chunk is in the free lists: |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3746
diff
changeset
|
499 // i.e. either the binary tree dictionary, the indexed free lists |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3746
diff
changeset
|
500 // or the linear allocation block. |
6028
f69a5d43dc19
7164144: Fix variable naming style in freeBlockDictionary.* and binaryTreeDictionary*
jmasa
parents:
6026
diff
changeset
|
501 bool verify_chunk_in_free_list(FreeChunk* fc) const; |
4024
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3746
diff
changeset
|
502 // Verify that the given chunk is the linear allocation block |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3746
diff
changeset
|
503 bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const; |
0 | 504 // Do some basic checks on the the free lists. |
4024
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3746
diff
changeset
|
505 void check_free_list_consistency() const PRODUCT_RETURN; |
0 | 506 |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
507 // Printing support |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
508 void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
509 void print_indexed_free_lists(outputStream* st) const; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
510 void print_dictionary_free_lists(outputStream* st) const; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
511 void print_promo_info_blocks(outputStream* st) const; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
512 |
0 | 513 NOT_PRODUCT ( |
514 void initializeIndexedFreeListArrayReturnedBytes(); | |
515 size_t sumIndexedFreeListArrayReturnedBytes(); | |
516 // Return the total number of chunks in the indexed free lists. | |
517 size_t totalCountInIndexedFreeLists() const; | |
518 // Return the total numberof chunks in the space. | |
519 size_t totalCount(); | |
520 ) | |
521 | |
522 // The census consists of counts of the quantities such as | |
523 // the current count of the free chunks, number of chunks | |
524 // created as a result of the split of a larger chunk or | |
525 // coalescing of smaller chucks, etc. The counts in the | |
526 // census is used to make decisions on splitting and | |
527 // coalescing of chunks during the sweep of garbage. | |
528 | |
529 // Print the statistics for the free lists. | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
530 void printFLCensus(size_t sweep_count) const; |
0 | 531 |
532 // Statistics functions | |
533 // Initialize census for lists before the sweep. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
534 void beginSweepFLCensus(float inter_sweep_current, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
535 float inter_sweep_estimate, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
536 float intra_sweep_estimate); |
0 | 537 // Set the surplus for each of the free lists. |
538 void setFLSurplus(); | |
539 // Set the hint for each of the free lists. | |
540 void setFLHints(); | |
541 // Clear the census for each of the free lists. | |
542 void clearFLCensus(); | |
543 // Perform functions for the census after the end of the sweep. | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
544 void endSweepFLCensus(size_t sweep_count); |
0 | 545 // Return true if the count of free chunks is greater |
546 // than the desired number of free chunks. | |
547 bool coalOverPopulated(size_t size); | |
548 | |
549 // Record (for each size): | |
550 // | |
551 // split-births = #chunks added due to splits in (prev-sweep-end, | |
552 // this-sweep-start) | |
553 // split-deaths = #chunks removed for splits in (prev-sweep-end, | |
554 // this-sweep-start) | |
555 // num-curr = #chunks at start of this sweep | |
556 // num-prev = #chunks at end of previous sweep | |
557 // | |
558 // The above are quantities that are measured. Now define: | |
559 // | |
560 // num-desired := num-prev + split-births - split-deaths - num-curr | |
561 // | |
562 // Roughly, num-prev + split-births is the supply, | |
563 // split-deaths is demand due to other sizes | |
564 // and num-curr is what we have left. | |
565 // | |
566 // Thus, num-desired is roughly speaking the "legitimate demand" | |
567 // for blocks of this size and what we are striving to reach at the | |
568 // end of the current sweep. | |
569 // | |
570 // For a given list, let num-len be its current population. | |
571 // Define, for a free list of a given size: | |
572 // | |
573 // coal-overpopulated := num-len >= num-desired * coal-surplus | |
574 // (coal-surplus is set to 1.05, i.e. we allow a little slop when | |
575 // coalescing -- we do not coalesce unless we think that the current | |
576 // supply has exceeded the estimated demand by more than 5%). | |
577 // | |
578 // For the set of sizes in the binary tree, which is neither dense nor | |
579 // closed, it may be the case that for a particular size we have never | |
580 // had, or do not now have, or did not have at the previous sweep, | |
581 // chunks of that size. We need to extend the definition of | |
582 // coal-overpopulated to such sizes as well: | |
583 // | |
584 // For a chunk in/not in the binary tree, extend coal-overpopulated | |
585 // defined above to include all sizes as follows: | |
586 // | |
587 // . a size that is non-existent is coal-overpopulated | |
588 // . a size that has a num-desired <= 0 as defined above is | |
589 // coal-overpopulated. | |
590 // | |
591 // Also define, for a chunk heap-offset C and mountain heap-offset M: | |
592 // | |
593 // close-to-mountain := C >= 0.99 * M | |
594 // | |
595 // Now, the coalescing strategy is: | |
596 // | |
597 // Coalesce left-hand chunk with right-hand chunk if and | |
598 // only if: | |
599 // | |
600 // EITHER | |
601 // . left-hand chunk is of a size that is coal-overpopulated | |
602 // OR | |
603 // . right-hand chunk is close-to-mountain | |
604 void smallCoalBirth(size_t size); | |
605 void smallCoalDeath(size_t size); | |
606 void coalBirth(size_t size); | |
607 void coalDeath(size_t size); | |
608 void smallSplitBirth(size_t size); | |
609 void smallSplitDeath(size_t size); | |
6028
f69a5d43dc19
7164144: Fix variable naming style in freeBlockDictionary.* and binaryTreeDictionary*
jmasa
parents:
6026
diff
changeset
|
610 void split_birth(size_t size); |
0 | 611 void splitDeath(size_t size); |
612 void split(size_t from, size_t to1); | |
613 | |
614 double flsFrag() const; | |
615 }; | |
616 | |
617 // A parallel-GC-thread-local allocation buffer for allocation into a | |
618 // CompactibleFreeListSpace. | |
6197 | 619 class CFLS_LAB : public CHeapObj<mtGC> { |
0 | 620 // The space that this buffer allocates into. |
621 CompactibleFreeListSpace* _cfls; | |
622 | |
623 // Our local free lists. | |
6026 | 624 FreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; |
0 | 625 |
626 // Initialized from a command-line arg. | |
627 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
628 // Allocation statistics in support of dynamic adjustment of |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
629 // #blocks to claim per get_from_global_pool() call below. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
630 static AdaptiveWeightedAverage |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
631 _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize]; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
632 static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize]; |
4728
441e946dc1af
7121618: Change type of number of GC workers to unsigned int.
jmasa
parents:
4708
diff
changeset
|
633 static uint _global_num_workers[CompactibleFreeListSpace::IndexSetSize]; |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
634 size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize]; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
635 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
636 // Internal work method |
6026 | 637 void get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl); |
0 | 638 |
639 public: | |
640 CFLS_LAB(CompactibleFreeListSpace* cfls); | |
641 | |
642 // Allocate and return a block of the given size, or else return NULL. | |
643 HeapWord* alloc(size_t word_sz); | |
644 | |
645 // Return any unused portions of the buffer to the global pool. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
646 void retire(int tid); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
647 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
648 // Dynamic OldPLABSize sizing |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
649 static void compute_desired_plab_size(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
650 // When the settings are modified from default static initialization |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
651 static void modify_initialization(size_t n, unsigned wt); |
0 | 652 }; |
653 | |
654 size_t PromotionInfo::refillSize() const { | |
655 const size_t CMSSpoolBlockSize = 256; | |
656 const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop) | |
657 * CMSSpoolBlockSize); | |
658 return CompactibleFreeListSpace::adjustObjectSize(sz); | |
659 } | |
1972 | 660 |
661 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP |