Mercurial > hg > graal-compiler
annotate src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp @ 1145:e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
Summary: Autonomic per-worker free block cache sizing, tunable coalition policies, fixes to per-size block statistics, retuned gain and bandwidth of some feedback loop filters to allow quicker reactivity to abrupt changes in ambient demand, and other heuristics to reduce fragmentation of the CMS old gen. Also tightened some assertions, including those related to locking.
Reviewed-by: jmasa
author | ysr |
---|---|
date | Wed, 23 Dec 2009 09:23:54 -0800 |
parents | 0fbdb4381b99 |
children | a8127dc669ba |
rev | line source |
---|---|
0 | 1 /* |
579 | 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // Classes in support of keeping track of promotions into a non-Contiguous | |
26 // space, in this case a CompactibleFreeListSpace. | |
27 | |
28 // Forward declarations | |
29 class CompactibleFreeListSpace; | |
30 class BlkClosure; | |
31 class BlkClosureCareful; | |
32 class UpwardsObjectClosure; | |
33 class ObjectClosureCareful; | |
34 class Klass; | |
35 | |
36 class PromotedObject VALUE_OBJ_CLASS_SPEC { | |
37 private: | |
38 enum { | |
39 promoted_mask = right_n_bits(2), // i.e. 0x3 | |
40 displaced_mark = nth_bit(2), // i.e. 0x4 | |
41 next_mask = ~(right_n_bits(3)) // i.e. ~(0x7) | |
42 }; | |
43 intptr_t _next; | |
44 public: | |
45 inline PromotedObject* next() const { | |
46 return (PromotedObject*)(_next & next_mask); | |
47 } | |
48 inline void setNext(PromotedObject* x) { | |
49 assert(((intptr_t)x & ~next_mask) == 0, | |
50 "Conflict in bit usage, " | |
51 " or insufficient alignment of objects"); | |
52 _next |= (intptr_t)x; | |
53 } | |
54 inline void setPromotedMark() { | |
55 _next |= promoted_mask; | |
56 } | |
57 inline bool hasPromotedMark() const { | |
58 return (_next & promoted_mask) == promoted_mask; | |
59 } | |
60 inline void setDisplacedMark() { | |
61 _next |= displaced_mark; | |
62 } | |
63 inline bool hasDisplacedMark() const { | |
64 return (_next & displaced_mark) != 0; | |
65 } | |
66 inline void clearNext() { _next = 0; } | |
67 debug_only(void *next_addr() { return (void *) &_next; }) | |
68 }; | |
69 | |
70 class SpoolBlock: public FreeChunk { | |
71 friend class PromotionInfo; | |
72 protected: | |
73 SpoolBlock* nextSpoolBlock; | |
74 size_t bufferSize; // number of usable words in this block | |
75 markOop* displacedHdr; // the displaced headers start here | |
76 | |
77 // Note about bufferSize: it denotes the number of entries available plus 1; | |
78 // legal indices range from 1 through BufferSize - 1. See the verification | |
79 // code verify() that counts the number of displaced headers spooled. | |
80 size_t computeBufferSize() { | |
81 return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markOop); | |
82 } | |
83 | |
84 public: | |
85 void init() { | |
86 bufferSize = computeBufferSize(); | |
87 displacedHdr = (markOop*)&displacedHdr; | |
88 nextSpoolBlock = NULL; | |
89 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
90 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
91 void print_on(outputStream* st) const; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
92 void print() const { print_on(gclog_or_tty); } |
0 | 93 }; |
94 | |
95 class PromotionInfo VALUE_OBJ_CLASS_SPEC { | |
96 bool _tracking; // set if tracking | |
97 CompactibleFreeListSpace* _space; // the space to which this belongs | |
98 PromotedObject* _promoHead; // head of list of promoted objects | |
99 PromotedObject* _promoTail; // tail of list of promoted objects | |
100 SpoolBlock* _spoolHead; // first spooling block | |
101 SpoolBlock* _spoolTail; // last non-full spooling block or null | |
102 SpoolBlock* _splice_point; // when _spoolTail is null, holds list tail | |
103 SpoolBlock* _spareSpool; // free spool buffer | |
104 size_t _firstIndex; // first active index in | |
105 // first spooling block (_spoolHead) | |
106 size_t _nextIndex; // last active index + 1 in last | |
107 // spooling block (_spoolTail) | |
108 private: | |
109 // ensure that spooling space exists; return true if there is spooling space | |
110 bool ensure_spooling_space_work(); | |
111 | |
112 public: | |
113 PromotionInfo() : | |
114 _tracking(0), _space(NULL), | |
115 _promoHead(NULL), _promoTail(NULL), | |
116 _spoolHead(NULL), _spoolTail(NULL), | |
117 _spareSpool(NULL), _firstIndex(1), | |
118 _nextIndex(1) {} | |
119 | |
120 bool noPromotions() const { | |
121 assert(_promoHead != NULL || _promoTail == NULL, "list inconsistency"); | |
122 return _promoHead == NULL; | |
123 } | |
124 void startTrackingPromotions(); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
125 void stopTrackingPromotions(uint worker_id = 0); |
0 | 126 bool tracking() const { return _tracking; } |
127 void track(PromotedObject* trackOop); // keep track of a promoted oop | |
128 // The following variant must be used when trackOop is not fully | |
129 // initialized and has a NULL klass: | |
130 void track(PromotedObject* trackOop, klassOop klassOfOop); // keep track of a promoted oop | |
131 void setSpace(CompactibleFreeListSpace* sp) { _space = sp; } | |
132 CompactibleFreeListSpace* space() const { return _space; } | |
133 markOop nextDisplacedHeader(); // get next header & forward spool pointer | |
134 void saveDisplacedHeader(markOop hdr); | |
135 // save header and forward spool | |
136 | |
137 inline size_t refillSize() const; | |
138 | |
139 SpoolBlock* getSpoolBlock(); // return a free spooling block | |
140 inline bool has_spooling_space() { | |
141 return _spoolTail != NULL && _spoolTail->bufferSize > _nextIndex; | |
142 } | |
143 // ensure that spooling space exists | |
144 bool ensure_spooling_space() { | |
145 return has_spooling_space() || ensure_spooling_space_work(); | |
146 } | |
147 #define PROMOTED_OOPS_ITERATE_DECL(OopClosureType, nv_suffix) \ | |
148 void promoted_oops_iterate##nv_suffix(OopClosureType* cl); | |
149 ALL_SINCE_SAVE_MARKS_CLOSURES(PROMOTED_OOPS_ITERATE_DECL) | |
150 #undef PROMOTED_OOPS_ITERATE_DECL | |
151 void promoted_oops_iterate(OopsInGenClosure* cl) { | |
152 promoted_oops_iterate_v(cl); | |
153 } | |
154 void verify() const; | |
155 void reset() { | |
156 _promoHead = NULL; | |
157 _promoTail = NULL; | |
158 _spoolHead = NULL; | |
159 _spoolTail = NULL; | |
160 _spareSpool = NULL; | |
161 _firstIndex = 0; | |
162 _nextIndex = 0; | |
163 | |
164 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
165 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
166 void print_on(outputStream* st) const; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
167 void print_statistics(uint worker_id) const; |
0 | 168 }; |
169 | |
170 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC { | |
171 public: | |
172 LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), | |
173 _allocation_size_limit(0) {} | |
174 void set(HeapWord* ptr, size_t word_size, size_t refill_size, | |
175 size_t allocation_size_limit) { | |
176 _ptr = ptr; | |
177 _word_size = word_size; | |
178 _refillSize = refill_size; | |
179 _allocation_size_limit = allocation_size_limit; | |
180 } | |
181 HeapWord* _ptr; | |
182 size_t _word_size; | |
183 size_t _refillSize; | |
184 size_t _allocation_size_limit; // largest size that will be allocated | |
185 }; | |
186 | |
187 // Concrete subclass of CompactibleSpace that implements | |
188 // a free list space, such as used in the concurrent mark sweep | |
189 // generation. | |
190 | |
191 class CompactibleFreeListSpace: public CompactibleSpace { | |
192 friend class VMStructs; | |
193 friend class ConcurrentMarkSweepGeneration; | |
194 friend class ASConcurrentMarkSweepGeneration; | |
195 friend class CMSCollector; | |
196 friend class CMSPermGenGen; | |
197 // Local alloc buffer for promotion into this space. | |
198 friend class CFLS_LAB; | |
199 | |
200 // "Size" of chunks of work (executed during parallel remark phases | |
201 // of CMS collection); this probably belongs in CMSCollector, although | |
202 // it's cached here because it's used in | |
203 // initialize_sequential_subtasks_for_rescan() which modifies | |
204 // par_seq_tasks which also lives in Space. XXX | |
205 const size_t _rescan_task_size; | |
206 const size_t _marking_task_size; | |
207 | |
208 // Yet another sequential tasks done structure. This supports | |
209 // CMS GC, where we have threads dynamically | |
210 // claiming sub-tasks from a larger parallel task. | |
211 SequentialSubTasksDone _conc_par_seq_tasks; | |
212 | |
213 BlockOffsetArrayNonContigSpace _bt; | |
214 | |
215 CMSCollector* _collector; | |
216 ConcurrentMarkSweepGeneration* _gen; | |
217 | |
218 // Data structures for free blocks (used during allocation/sweeping) | |
219 | |
220 // Allocation is done linearly from two different blocks depending on | |
221 // whether the request is small or large, in an effort to reduce | |
222 // fragmentation. We assume that any locking for allocation is done | |
223 // by the containing generation. Thus, none of the methods in this | |
224 // space are re-entrant. | |
225 enum SomeConstants { | |
226 SmallForLinearAlloc = 16, // size < this then use _sLAB | |
227 SmallForDictionary = 257, // size < this then use _indexedFreeList | |
228 IndexSetSize = SmallForDictionary, // keep this odd-sized | |
229 IndexSetStart = MinObjAlignment, | |
230 IndexSetStride = MinObjAlignment | |
231 }; | |
232 | |
233 private: | |
234 enum FitStrategyOptions { | |
235 FreeBlockStrategyNone = 0, | |
236 FreeBlockBestFitFirst | |
237 }; | |
238 | |
239 PromotionInfo _promoInfo; | |
240 | |
241 // helps to impose a global total order on freelistLock ranks; | |
242 // assumes that CFLSpace's are allocated in global total order | |
243 static int _lockRank; | |
244 | |
245 // a lock protecting the free lists and free blocks; | |
246 // mutable because of ubiquity of locking even for otherwise const methods | |
247 mutable Mutex _freelistLock; | |
248 // locking verifier convenience function | |
249 void assert_locked() const PRODUCT_RETURN; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
250 void assert_locked(const Mutex* lock) const PRODUCT_RETURN; |
0 | 251 |
252 // Linear allocation blocks | |
253 LinearAllocBlock _smallLinearAllocBlock; | |
254 | |
255 FreeBlockDictionary::DictionaryChoice _dictionaryChoice; | |
256 FreeBlockDictionary* _dictionary; // ptr to dictionary for large size blocks | |
257 | |
258 FreeList _indexedFreeList[IndexSetSize]; | |
259 // indexed array for small size blocks | |
260 // allocation stategy | |
261 bool _fitStrategy; // Use best fit strategy. | |
262 bool _adaptive_freelists; // Use adaptive freelists | |
263 | |
264 // This is an address close to the largest free chunk in the heap. | |
265 // It is currently assumed to be at the end of the heap. Free | |
266 // chunks with addresses greater than nearLargestChunk are coalesced | |
267 // in an effort to maintain a large chunk at the end of the heap. | |
268 HeapWord* _nearLargestChunk; | |
269 | |
270 // Used to keep track of limit of sweep for the space | |
271 HeapWord* _sweep_limit; | |
272 | |
273 // Support for compacting cms | |
274 HeapWord* cross_threshold(HeapWord* start, HeapWord* end); | |
275 HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); | |
276 | |
277 // Initialization helpers. | |
278 void initializeIndexedFreeListArray(); | |
279 | |
280 // Extra stuff to manage promotion parallelism. | |
281 | |
282 // a lock protecting the dictionary during par promotion allocation. | |
283 mutable Mutex _parDictionaryAllocLock; | |
284 Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } | |
285 | |
286 // Locks protecting the exact lists during par promotion allocation. | |
287 Mutex* _indexedFreeListParLocks[IndexSetSize]; | |
288 | |
289 // Attempt to obtain up to "n" blocks of the size "word_sz" (which is | |
290 // required to be smaller than "IndexSetSize".) If successful, | |
291 // adds them to "fl", which is required to be an empty free list. | |
292 // If the count of "fl" is negative, it's absolute value indicates a | |
293 // number of free chunks that had been previously "borrowed" from global | |
294 // list of size "word_sz", and must now be decremented. | |
295 void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl); | |
296 | |
297 // Allocation helper functions | |
298 // Allocate using a strategy that takes from the indexed free lists | |
299 // first. This allocation strategy assumes a companion sweeping | |
300 // strategy that attempts to keep the needed number of chunks in each | |
301 // indexed free lists. | |
302 HeapWord* allocate_adaptive_freelists(size_t size); | |
303 // Allocate from the linear allocation buffers first. This allocation | |
304 // strategy assumes maximal coalescing can maintain chunks large enough | |
305 // to be used as linear allocation buffers. | |
306 HeapWord* allocate_non_adaptive_freelists(size_t size); | |
307 | |
308 // Gets a chunk from the linear allocation block (LinAB). If there | |
309 // is not enough space in the LinAB, refills it. | |
310 HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size); | |
311 HeapWord* getChunkFromSmallLinearAllocBlock(size_t size); | |
312 // Get a chunk from the space remaining in the linear allocation block. Do | |
313 // not attempt to refill if the space is not available, return NULL. Do the | |
314 // repairs on the linear allocation block as appropriate. | |
315 HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size); | |
316 inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size); | |
317 | |
318 // Helper function for getChunkFromIndexedFreeList. | |
319 // Replenish the indexed free list for this "size". Do not take from an | |
320 // underpopulated size. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
321 FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true); |
0 | 322 |
323 // Get a chunk from the indexed free list. If the indexed free list | |
324 // does not have a free chunk, try to replenish the indexed free list | |
325 // then get the free chunk from the replenished indexed free list. | |
326 inline FreeChunk* getChunkFromIndexedFreeList(size_t size); | |
327 | |
328 // The returned chunk may be larger than requested (or null). | |
329 FreeChunk* getChunkFromDictionary(size_t size); | |
330 // The returned chunk is the exact size requested (or null). | |
331 FreeChunk* getChunkFromDictionaryExact(size_t size); | |
332 | |
333 // Find a chunk in the indexed free list that is the best | |
334 // fit for size "numWords". | |
335 FreeChunk* bestFitSmall(size_t numWords); | |
336 // For free list "fl" of chunks of size > numWords, | |
337 // remove a chunk, split off a chunk of size numWords | |
338 // and return it. The split off remainder is returned to | |
339 // the free lists. The old name for getFromListGreater | |
340 // was lookInListGreater. | |
341 FreeChunk* getFromListGreater(FreeList* fl, size_t numWords); | |
342 // Get a chunk in the indexed free list or dictionary, | |
343 // by considering a larger chunk and splitting it. | |
344 FreeChunk* getChunkFromGreater(size_t numWords); | |
345 // Verify that the given chunk is in the indexed free lists. | |
346 bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const; | |
347 // Remove the specified chunk from the indexed free lists. | |
348 void removeChunkFromIndexedFreeList(FreeChunk* fc); | |
349 // Remove the specified chunk from the dictionary. | |
350 void removeChunkFromDictionary(FreeChunk* fc); | |
351 // Split a free chunk into a smaller free chunk of size "new_size". | |
352 // Return the smaller free chunk and return the remainder to the | |
353 // free lists. | |
354 FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size); | |
355 // Add a chunk to the free lists. | |
356 void addChunkToFreeLists(HeapWord* chunk, size_t size); | |
357 // Add a chunk to the free lists, preferring to suffix it | |
358 // to the last free chunk at end of space if possible, and | |
359 // updating the block census stats as well as block offset table. | |
360 // Take any locks as appropriate if we are multithreaded. | |
361 void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size); | |
362 // Add a free chunk to the indexed free lists. | |
363 void returnChunkToFreeList(FreeChunk* chunk); | |
364 // Add a free chunk to the dictionary. | |
365 void returnChunkToDictionary(FreeChunk* chunk); | |
366 | |
367 // Functions for maintaining the linear allocation buffers (LinAB). | |
368 // Repairing a linear allocation block refers to operations | |
369 // performed on the remainder of a LinAB after an allocation | |
370 // has been made from it. | |
371 void repairLinearAllocationBlocks(); | |
372 void repairLinearAllocBlock(LinearAllocBlock* blk); | |
373 void refillLinearAllocBlock(LinearAllocBlock* blk); | |
374 void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk); | |
375 void refillLinearAllocBlocksIfNeeded(); | |
376 | |
377 void verify_objects_initialized() const; | |
378 | |
379 // Statistics reporting helper functions | |
380 void reportFreeListStatistics() const; | |
381 void reportIndexedFreeListStatistics() const; | |
382 size_t maxChunkSizeInIndexedFreeLists() const; | |
383 size_t numFreeBlocksInIndexedFreeLists() const; | |
384 // Accessor | |
385 HeapWord* unallocated_block() const { | |
386 HeapWord* ub = _bt.unallocated_block(); | |
387 assert(ub >= bottom() && | |
388 ub <= end(), "space invariant"); | |
389 return ub; | |
390 } | |
391 void freed(HeapWord* start, size_t size) { | |
392 _bt.freed(start, size); | |
393 } | |
394 | |
395 protected: | |
396 // reset the indexed free list to its initial empty condition. | |
397 void resetIndexedFreeListArray(); | |
398 // reset to an initial state with a single free block described | |
399 // by the MemRegion parameter. | |
400 void reset(MemRegion mr); | |
401 // Return the total number of words in the indexed free lists. | |
402 size_t totalSizeInIndexedFreeLists() const; | |
403 | |
404 public: | |
405 // Constructor... | |
406 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr, | |
407 bool use_adaptive_freelists, | |
408 FreeBlockDictionary::DictionaryChoice); | |
409 // accessors | |
410 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } | |
411 FreeBlockDictionary* dictionary() const { return _dictionary; } | |
412 HeapWord* nearLargestChunk() const { return _nearLargestChunk; } | |
413 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } | |
414 | |
415 // Return the free chunk at the end of the space. If no such | |
416 // chunk exists, return NULL. | |
417 FreeChunk* find_chunk_at_end(); | |
418 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
419 bool adaptive_freelists() const { return _adaptive_freelists; } |
0 | 420 |
421 void set_collector(CMSCollector* collector) { _collector = collector; } | |
422 | |
423 // Support for parallelization of rescan and marking | |
424 const size_t rescan_task_size() const { return _rescan_task_size; } | |
425 const size_t marking_task_size() const { return _marking_task_size; } | |
426 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } | |
427 void initialize_sequential_subtasks_for_rescan(int n_threads); | |
428 void initialize_sequential_subtasks_for_marking(int n_threads, | |
429 HeapWord* low = NULL); | |
430 | |
431 // Space enquiries | |
432 size_t used() const; | |
433 size_t free() const; | |
434 size_t max_alloc_in_words() const; | |
435 // XXX: should have a less conservative used_region() than that of | |
436 // Space; we could consider keeping track of highest allocated | |
437 // address and correcting that at each sweep, as the sweeper | |
438 // goes through the entire allocated part of the generation. We | |
439 // could also use that information to keep the sweeper from | |
440 // sweeping more than is necessary. The allocator and sweeper will | |
441 // of course need to synchronize on this, since the sweeper will | |
442 // try to bump down the address and the allocator will try to bump it up. | |
443 // For now, however, we'll just use the default used_region() | |
444 // which overestimates the region by returning the entire | |
445 // committed region (this is safe, but inefficient). | |
446 | |
447 // Returns a subregion of the space containing all the objects in | |
448 // the space. | |
449 MemRegion used_region() const { | |
450 return MemRegion(bottom(), | |
451 BlockOffsetArrayUseUnallocatedBlock ? | |
452 unallocated_block() : end()); | |
453 } | |
454 | |
455 // This is needed because the default implementation uses block_start() | |
456 // which can;t be used at certain times (for example phase 3 of mark-sweep). | |
457 // A better fix is to change the assertions in phase 3 of mark-sweep to | |
458 // use is_in_reserved(), but that is deferred since the is_in() assertions | |
459 // are buried through several layers of callers and are used elsewhere | |
460 // as well. | |
461 bool is_in(const void* p) const { | |
462 return used_region().contains(p); | |
463 } | |
464 | |
465 virtual bool is_free_block(const HeapWord* p) const; | |
466 | |
467 // Resizing support | |
468 void set_end(HeapWord* value); // override | |
469 | |
470 // mutual exclusion support | |
471 Mutex* freelistLock() const { return &_freelistLock; } | |
472 | |
473 // Iteration support | |
474 void oop_iterate(MemRegion mr, OopClosure* cl); | |
475 void oop_iterate(OopClosure* cl); | |
476 | |
477 void object_iterate(ObjectClosure* blk); | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
478 // Apply the closure to each object in the space whose references |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
479 // point to objects in the heap. The usage of CompactibleFreeListSpace |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
480 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
481 // objects in the space with references to objects that are no longer |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
482 // valid. For example, an object may reference another object |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
483 // that has already been sweep up (collected). This method uses |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
484 // obj_is_alive() to determine whether it is safe to iterate of |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
485 // an object. |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
486 void safe_object_iterate(ObjectClosure* blk); |
0 | 487 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
488 | |
489 // Requires that "mr" be entirely within the space. | |
490 // Apply "cl->do_object" to all objects that intersect with "mr". | |
491 // If the iteration encounters an unparseable portion of the region, | |
492 // terminate the iteration and return the address of the start of the | |
493 // subregion that isn't done. Return of "NULL" indicates that the | |
494 // interation completed. | |
495 virtual HeapWord* | |
496 object_iterate_careful_m(MemRegion mr, | |
497 ObjectClosureCareful* cl); | |
498 virtual HeapWord* | |
499 object_iterate_careful(ObjectClosureCareful* cl); | |
500 | |
501 // Override: provides a DCTO_CL specific to this kind of space. | |
502 DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, | |
503 CardTableModRefBS::PrecisionStyle precision, | |
504 HeapWord* boundary); | |
505 | |
506 void blk_iterate(BlkClosure* cl); | |
507 void blk_iterate_careful(BlkClosureCareful* cl); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
508 HeapWord* block_start_const(const void* p) const; |
0 | 509 HeapWord* block_start_careful(const void* p) const; |
510 size_t block_size(const HeapWord* p) const; | |
511 size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; | |
512 bool block_is_obj(const HeapWord* p) const; | |
513 bool obj_is_alive(const HeapWord* p) const; | |
514 size_t block_size_nopar(const HeapWord* p) const; | |
515 bool block_is_obj_nopar(const HeapWord* p) const; | |
516 | |
517 // iteration support for promotion | |
518 void save_marks(); | |
519 bool no_allocs_since_save_marks(); | |
520 void object_iterate_since_last_GC(ObjectClosure* cl); | |
521 | |
522 // iteration support for sweeping | |
523 void save_sweep_limit() { | |
524 _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? | |
525 unallocated_block() : end(); | |
526 } | |
527 NOT_PRODUCT( | |
528 void clear_sweep_limit() { _sweep_limit = NULL; } | |
529 ) | |
530 HeapWord* sweep_limit() { return _sweep_limit; } | |
531 | |
532 // Apply "blk->do_oop" to the addresses of all reference fields in objects | |
533 // promoted into this generation since the most recent save_marks() call. | |
534 // Fields in objects allocated by applications of the closure | |
535 // *are* included in the iteration. Thus, when the iteration completes | |
536 // there should be no further such objects remaining. | |
537 #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ | |
538 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); | |
539 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL) | |
540 #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL | |
541 | |
542 // Allocation support | |
543 HeapWord* allocate(size_t size); | |
544 HeapWord* par_allocate(size_t size); | |
545 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
546 oop promote(oop obj, size_t obj_size); |
0 | 547 void gc_prologue(); |
548 void gc_epilogue(); | |
549 | |
550 // This call is used by a containing CMS generation / collector | |
551 // to inform the CFLS space that a sweep has been completed | |
552 // and that the space can do any related house-keeping functions. | |
553 void sweep_completed(); | |
554 | |
555 // For an object in this space, the mark-word's two | |
556 // LSB's having the value [11] indicates that it has been | |
557 // promoted since the most recent call to save_marks() on | |
558 // this generation and has not subsequently been iterated | |
559 // over (using oop_since_save_marks_iterate() above). | |
560 bool obj_allocated_since_save_marks(const oop obj) const { | |
561 assert(is_in_reserved(obj), "Wrong space?"); | |
562 return ((PromotedObject*)obj)->hasPromotedMark(); | |
563 } | |
564 | |
565 // A worst-case estimate of the space required (in HeapWords) to expand the | |
566 // heap when promoting an obj of size obj_size. | |
567 size_t expansionSpaceRequired(size_t obj_size) const; | |
568 | |
569 FreeChunk* allocateScratch(size_t size); | |
570 | |
571 // returns true if either the small or large linear allocation buffer is empty. | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
572 bool linearAllocationWouldFail() const; |
0 | 573 |
574 // Adjust the chunk for the minimum size. This version is called in | |
575 // most cases in CompactibleFreeListSpace methods. | |
576 inline static size_t adjustObjectSize(size_t size) { | |
577 return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize)); | |
578 } | |
579 // This is a virtual version of adjustObjectSize() that is called | |
580 // only occasionally when the compaction space changes and the type | |
581 // of the new compaction space is is only known to be CompactibleSpace. | |
582 size_t adjust_object_size_v(size_t size) const { | |
583 return adjustObjectSize(size); | |
584 } | |
585 // Minimum size of a free block. | |
586 virtual size_t minimum_free_block_size() const { return MinChunkSize; } | |
587 void removeFreeChunkFromFreeLists(FreeChunk* chunk); | |
588 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, | |
589 bool coalesced); | |
590 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
591 // Support for decisions regarding concurrent collection policy |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
592 bool should_concurrent_collect() const; |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
593 |
0 | 594 // Support for compaction |
595 void prepare_for_compaction(CompactPoint* cp); | |
596 void adjust_pointers(); | |
597 void compact(); | |
598 // reset the space to reflect the fact that a compaction of the | |
599 // space has been done. | |
600 virtual void reset_after_compaction(); | |
601 | |
602 // Debugging support | |
603 void print() const; | |
604 void prepare_for_verify(); | |
605 void verify(bool allow_dirty) const; | |
606 void verifyFreeLists() const PRODUCT_RETURN; | |
607 void verifyIndexedFreeLists() const; | |
608 void verifyIndexedFreeList(size_t size) const; | |
609 // verify that the given chunk is in the free lists. | |
610 bool verifyChunkInFreeLists(FreeChunk* fc) const; | |
611 // Do some basic checks on the the free lists. | |
612 void checkFreeListConsistency() const PRODUCT_RETURN; | |
613 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
614 // Printing support |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
615 void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
616 void print_indexed_free_lists(outputStream* st) const; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
617 void print_dictionary_free_lists(outputStream* st) const; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
618 void print_promo_info_blocks(outputStream* st) const; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
619 |
0 | 620 NOT_PRODUCT ( |
621 void initializeIndexedFreeListArrayReturnedBytes(); | |
622 size_t sumIndexedFreeListArrayReturnedBytes(); | |
623 // Return the total number of chunks in the indexed free lists. | |
624 size_t totalCountInIndexedFreeLists() const; | |
625 // Return the total numberof chunks in the space. | |
626 size_t totalCount(); | |
627 ) | |
628 | |
629 // The census consists of counts of the quantities such as | |
630 // the current count of the free chunks, number of chunks | |
631 // created as a result of the split of a larger chunk or | |
632 // coalescing of smaller chucks, etc. The counts in the | |
633 // census is used to make decisions on splitting and | |
634 // coalescing of chunks during the sweep of garbage. | |
635 | |
636 // Print the statistics for the free lists. | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
637 void printFLCensus(size_t sweep_count) const; |
0 | 638 |
639 // Statistics functions | |
640 // Initialize census for lists before the sweep. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
641 void beginSweepFLCensus(float inter_sweep_current, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
642 float inter_sweep_estimate, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
643 float intra_sweep_estimate); |
0 | 644 // Set the surplus for each of the free lists. |
645 void setFLSurplus(); | |
646 // Set the hint for each of the free lists. | |
647 void setFLHints(); | |
648 // Clear the census for each of the free lists. | |
649 void clearFLCensus(); | |
650 // Perform functions for the census after the end of the sweep. | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
651 void endSweepFLCensus(size_t sweep_count); |
0 | 652 // Return true if the count of free chunks is greater |
653 // than the desired number of free chunks. | |
654 bool coalOverPopulated(size_t size); | |
655 | |
656 // Record (for each size): | |
657 // | |
658 // split-births = #chunks added due to splits in (prev-sweep-end, | |
659 // this-sweep-start) | |
660 // split-deaths = #chunks removed for splits in (prev-sweep-end, | |
661 // this-sweep-start) | |
662 // num-curr = #chunks at start of this sweep | |
663 // num-prev = #chunks at end of previous sweep | |
664 // | |
665 // The above are quantities that are measured. Now define: | |
666 // | |
667 // num-desired := num-prev + split-births - split-deaths - num-curr | |
668 // | |
669 // Roughly, num-prev + split-births is the supply, | |
670 // split-deaths is demand due to other sizes | |
671 // and num-curr is what we have left. | |
672 // | |
673 // Thus, num-desired is roughly speaking the "legitimate demand" | |
674 // for blocks of this size and what we are striving to reach at the | |
675 // end of the current sweep. | |
676 // | |
677 // For a given list, let num-len be its current population. | |
678 // Define, for a free list of a given size: | |
679 // | |
680 // coal-overpopulated := num-len >= num-desired * coal-surplus | |
681 // (coal-surplus is set to 1.05, i.e. we allow a little slop when | |
682 // coalescing -- we do not coalesce unless we think that the current | |
683 // supply has exceeded the estimated demand by more than 5%). | |
684 // | |
685 // For the set of sizes in the binary tree, which is neither dense nor | |
686 // closed, it may be the case that for a particular size we have never | |
687 // had, or do not now have, or did not have at the previous sweep, | |
688 // chunks of that size. We need to extend the definition of | |
689 // coal-overpopulated to such sizes as well: | |
690 // | |
691 // For a chunk in/not in the binary tree, extend coal-overpopulated | |
692 // defined above to include all sizes as follows: | |
693 // | |
694 // . a size that is non-existent is coal-overpopulated | |
695 // . a size that has a num-desired <= 0 as defined above is | |
696 // coal-overpopulated. | |
697 // | |
698 // Also define, for a chunk heap-offset C and mountain heap-offset M: | |
699 // | |
700 // close-to-mountain := C >= 0.99 * M | |
701 // | |
702 // Now, the coalescing strategy is: | |
703 // | |
704 // Coalesce left-hand chunk with right-hand chunk if and | |
705 // only if: | |
706 // | |
707 // EITHER | |
708 // . left-hand chunk is of a size that is coal-overpopulated | |
709 // OR | |
710 // . right-hand chunk is close-to-mountain | |
711 void smallCoalBirth(size_t size); | |
712 void smallCoalDeath(size_t size); | |
713 void coalBirth(size_t size); | |
714 void coalDeath(size_t size); | |
715 void smallSplitBirth(size_t size); | |
716 void smallSplitDeath(size_t size); | |
717 void splitBirth(size_t size); | |
718 void splitDeath(size_t size); | |
719 void split(size_t from, size_t to1); | |
720 | |
721 double flsFrag() const; | |
722 }; | |
723 | |
724 // A parallel-GC-thread-local allocation buffer for allocation into a | |
725 // CompactibleFreeListSpace. | |
726 class CFLS_LAB : public CHeapObj { | |
727 // The space that this buffer allocates into. | |
728 CompactibleFreeListSpace* _cfls; | |
729 | |
730 // Our local free lists. | |
731 FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; | |
732 | |
733 // Initialized from a command-line arg. | |
734 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
735 // Allocation statistics in support of dynamic adjustment of |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
736 // #blocks to claim per get_from_global_pool() call below. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
737 static AdaptiveWeightedAverage |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
738 _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize]; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
739 static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize]; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
740 static int _global_num_workers[CompactibleFreeListSpace::IndexSetSize]; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
741 size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize]; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
742 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
743 // Internal work method |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
744 void get_from_global_pool(size_t word_sz, FreeList* fl); |
0 | 745 |
746 public: | |
747 CFLS_LAB(CompactibleFreeListSpace* cfls); | |
748 | |
749 // Allocate and return a block of the given size, or else return NULL. | |
750 HeapWord* alloc(size_t word_sz); | |
751 | |
752 // Return any unused portions of the buffer to the global pool. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
753 void retire(int tid); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
754 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
755 // Dynamic OldPLABSize sizing |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
756 static void compute_desired_plab_size(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
757 // When the settings are modified from default static initialization |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
758 static void modify_initialization(size_t n, unsigned wt); |
0 | 759 }; |
760 | |
761 size_t PromotionInfo::refillSize() const { | |
762 const size_t CMSSpoolBlockSize = 256; | |
763 const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop) | |
764 * CMSSpoolBlockSize); | |
765 return CompactibleFreeListSpace::adjustObjectSize(sz); | |
766 } |