Mercurial > hg > graal-jvmci-8
annotate src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp @ 452:00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
Summary: When we encounter marking stack overflow during precleaning of Reference lists, we were using the overflow list mechanism, which can cause problems on account of mutating the mark word of the header because of conflicts with mutator accesses and updates of that field. Instead we should use the usual mechanism for overflow handling in concurrent phases, namely dirtying of the card on which the overflowed object lies. Since precleaning effectively does a form of discovered list processing, albeit with discovery enabled, we needed to adjust some code to be correct in the face of interleaved processing and discovery.
Reviewed-by: apetrusenko, jcoomes
author | ysr |
---|---|
date | Thu, 20 Nov 2008 12:27:41 -0800 |
parents | 1ee8caae33af |
children | e9be0e04635a |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // Classes in support of keeping track of promotions into a non-Contiguous | |
26 // space, in this case a CompactibleFreeListSpace. | |
27 | |
28 #define CFLS_LAB_REFILL_STATS 0 | |
29 | |
30 // Forward declarations | |
31 class CompactibleFreeListSpace; | |
32 class BlkClosure; | |
33 class BlkClosureCareful; | |
34 class UpwardsObjectClosure; | |
35 class ObjectClosureCareful; | |
36 class Klass; | |
37 | |
38 class PromotedObject VALUE_OBJ_CLASS_SPEC { | |
39 private: | |
40 enum { | |
41 promoted_mask = right_n_bits(2), // i.e. 0x3 | |
42 displaced_mark = nth_bit(2), // i.e. 0x4 | |
43 next_mask = ~(right_n_bits(3)) // i.e. ~(0x7) | |
44 }; | |
45 intptr_t _next; | |
46 public: | |
47 inline PromotedObject* next() const { | |
48 return (PromotedObject*)(_next & next_mask); | |
49 } | |
50 inline void setNext(PromotedObject* x) { | |
51 assert(((intptr_t)x & ~next_mask) == 0, | |
52 "Conflict in bit usage, " | |
53 " or insufficient alignment of objects"); | |
54 _next |= (intptr_t)x; | |
55 } | |
56 inline void setPromotedMark() { | |
57 _next |= promoted_mask; | |
58 } | |
59 inline bool hasPromotedMark() const { | |
60 return (_next & promoted_mask) == promoted_mask; | |
61 } | |
62 inline void setDisplacedMark() { | |
63 _next |= displaced_mark; | |
64 } | |
65 inline bool hasDisplacedMark() const { | |
66 return (_next & displaced_mark) != 0; | |
67 } | |
68 inline void clearNext() { _next = 0; } | |
69 debug_only(void *next_addr() { return (void *) &_next; }) | |
70 }; | |
71 | |
72 class SpoolBlock: public FreeChunk { | |
73 friend class PromotionInfo; | |
74 protected: | |
75 SpoolBlock* nextSpoolBlock; | |
76 size_t bufferSize; // number of usable words in this block | |
77 markOop* displacedHdr; // the displaced headers start here | |
78 | |
79 // Note about bufferSize: it denotes the number of entries available plus 1; | |
80 // legal indices range from 1 through BufferSize - 1. See the verification | |
81 // code verify() that counts the number of displaced headers spooled. | |
82 size_t computeBufferSize() { | |
83 return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markOop); | |
84 } | |
85 | |
86 public: | |
87 void init() { | |
88 bufferSize = computeBufferSize(); | |
89 displacedHdr = (markOop*)&displacedHdr; | |
90 nextSpoolBlock = NULL; | |
91 } | |
92 }; | |
93 | |
94 class PromotionInfo VALUE_OBJ_CLASS_SPEC { | |
95 bool _tracking; // set if tracking | |
96 CompactibleFreeListSpace* _space; // the space to which this belongs | |
97 PromotedObject* _promoHead; // head of list of promoted objects | |
98 PromotedObject* _promoTail; // tail of list of promoted objects | |
99 SpoolBlock* _spoolHead; // first spooling block | |
100 SpoolBlock* _spoolTail; // last non-full spooling block or null | |
101 SpoolBlock* _splice_point; // when _spoolTail is null, holds list tail | |
102 SpoolBlock* _spareSpool; // free spool buffer | |
103 size_t _firstIndex; // first active index in | |
104 // first spooling block (_spoolHead) | |
105 size_t _nextIndex; // last active index + 1 in last | |
106 // spooling block (_spoolTail) | |
107 private: | |
108 // ensure that spooling space exists; return true if there is spooling space | |
109 bool ensure_spooling_space_work(); | |
110 | |
111 public: | |
112 PromotionInfo() : | |
113 _tracking(0), _space(NULL), | |
114 _promoHead(NULL), _promoTail(NULL), | |
115 _spoolHead(NULL), _spoolTail(NULL), | |
116 _spareSpool(NULL), _firstIndex(1), | |
117 _nextIndex(1) {} | |
118 | |
119 bool noPromotions() const { | |
120 assert(_promoHead != NULL || _promoTail == NULL, "list inconsistency"); | |
121 return _promoHead == NULL; | |
122 } | |
123 void startTrackingPromotions(); | |
124 void stopTrackingPromotions(); | |
125 bool tracking() const { return _tracking; } | |
126 void track(PromotedObject* trackOop); // keep track of a promoted oop | |
127 // The following variant must be used when trackOop is not fully | |
128 // initialized and has a NULL klass: | |
129 void track(PromotedObject* trackOop, klassOop klassOfOop); // keep track of a promoted oop | |
130 void setSpace(CompactibleFreeListSpace* sp) { _space = sp; } | |
131 CompactibleFreeListSpace* space() const { return _space; } | |
132 markOop nextDisplacedHeader(); // get next header & forward spool pointer | |
133 void saveDisplacedHeader(markOop hdr); | |
134 // save header and forward spool | |
135 | |
136 inline size_t refillSize() const; | |
137 | |
138 SpoolBlock* getSpoolBlock(); // return a free spooling block | |
139 inline bool has_spooling_space() { | |
140 return _spoolTail != NULL && _spoolTail->bufferSize > _nextIndex; | |
141 } | |
142 // ensure that spooling space exists | |
143 bool ensure_spooling_space() { | |
144 return has_spooling_space() || ensure_spooling_space_work(); | |
145 } | |
146 #define PROMOTED_OOPS_ITERATE_DECL(OopClosureType, nv_suffix) \ | |
147 void promoted_oops_iterate##nv_suffix(OopClosureType* cl); | |
148 ALL_SINCE_SAVE_MARKS_CLOSURES(PROMOTED_OOPS_ITERATE_DECL) | |
149 #undef PROMOTED_OOPS_ITERATE_DECL | |
150 void promoted_oops_iterate(OopsInGenClosure* cl) { | |
151 promoted_oops_iterate_v(cl); | |
152 } | |
153 void verify() const; | |
154 void reset() { | |
155 _promoHead = NULL; | |
156 _promoTail = NULL; | |
157 _spoolHead = NULL; | |
158 _spoolTail = NULL; | |
159 _spareSpool = NULL; | |
160 _firstIndex = 0; | |
161 _nextIndex = 0; | |
162 | |
163 } | |
164 }; | |
165 | |
166 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC { | |
167 public: | |
168 LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), | |
169 _allocation_size_limit(0) {} | |
170 void set(HeapWord* ptr, size_t word_size, size_t refill_size, | |
171 size_t allocation_size_limit) { | |
172 _ptr = ptr; | |
173 _word_size = word_size; | |
174 _refillSize = refill_size; | |
175 _allocation_size_limit = allocation_size_limit; | |
176 } | |
177 HeapWord* _ptr; | |
178 size_t _word_size; | |
179 size_t _refillSize; | |
180 size_t _allocation_size_limit; // largest size that will be allocated | |
181 }; | |
182 | |
183 // Concrete subclass of CompactibleSpace that implements | |
184 // a free list space, such as used in the concurrent mark sweep | |
185 // generation. | |
186 | |
187 class CompactibleFreeListSpace: public CompactibleSpace { | |
188 friend class VMStructs; | |
189 friend class ConcurrentMarkSweepGeneration; | |
190 friend class ASConcurrentMarkSweepGeneration; | |
191 friend class CMSCollector; | |
192 friend class CMSPermGenGen; | |
193 // Local alloc buffer for promotion into this space. | |
194 friend class CFLS_LAB; | |
195 | |
196 // "Size" of chunks of work (executed during parallel remark phases | |
197 // of CMS collection); this probably belongs in CMSCollector, although | |
198 // it's cached here because it's used in | |
199 // initialize_sequential_subtasks_for_rescan() which modifies | |
200 // par_seq_tasks which also lives in Space. XXX | |
201 const size_t _rescan_task_size; | |
202 const size_t _marking_task_size; | |
203 | |
204 // Yet another sequential tasks done structure. This supports | |
205 // CMS GC, where we have threads dynamically | |
206 // claiming sub-tasks from a larger parallel task. | |
207 SequentialSubTasksDone _conc_par_seq_tasks; | |
208 | |
209 BlockOffsetArrayNonContigSpace _bt; | |
210 | |
211 CMSCollector* _collector; | |
212 ConcurrentMarkSweepGeneration* _gen; | |
213 | |
214 // Data structures for free blocks (used during allocation/sweeping) | |
215 | |
216 // Allocation is done linearly from two different blocks depending on | |
217 // whether the request is small or large, in an effort to reduce | |
218 // fragmentation. We assume that any locking for allocation is done | |
219 // by the containing generation. Thus, none of the methods in this | |
220 // space are re-entrant. | |
221 enum SomeConstants { | |
222 SmallForLinearAlloc = 16, // size < this then use _sLAB | |
223 SmallForDictionary = 257, // size < this then use _indexedFreeList | |
224 IndexSetSize = SmallForDictionary, // keep this odd-sized | |
225 IndexSetStart = MinObjAlignment, | |
226 IndexSetStride = MinObjAlignment | |
227 }; | |
228 | |
229 private: | |
230 enum FitStrategyOptions { | |
231 FreeBlockStrategyNone = 0, | |
232 FreeBlockBestFitFirst | |
233 }; | |
234 | |
235 PromotionInfo _promoInfo; | |
236 | |
237 // helps to impose a global total order on freelistLock ranks; | |
238 // assumes that CFLSpace's are allocated in global total order | |
239 static int _lockRank; | |
240 | |
241 // a lock protecting the free lists and free blocks; | |
242 // mutable because of ubiquity of locking even for otherwise const methods | |
243 mutable Mutex _freelistLock; | |
244 // locking verifier convenience function | |
245 void assert_locked() const PRODUCT_RETURN; | |
246 | |
247 // Linear allocation blocks | |
248 LinearAllocBlock _smallLinearAllocBlock; | |
249 | |
250 FreeBlockDictionary::DictionaryChoice _dictionaryChoice; | |
251 FreeBlockDictionary* _dictionary; // ptr to dictionary for large size blocks | |
252 | |
253 FreeList _indexedFreeList[IndexSetSize]; | |
254 // indexed array for small size blocks | |
255 // allocation stategy | |
256 bool _fitStrategy; // Use best fit strategy. | |
257 bool _adaptive_freelists; // Use adaptive freelists | |
258 | |
259 // This is an address close to the largest free chunk in the heap. | |
260 // It is currently assumed to be at the end of the heap. Free | |
261 // chunks with addresses greater than nearLargestChunk are coalesced | |
262 // in an effort to maintain a large chunk at the end of the heap. | |
263 HeapWord* _nearLargestChunk; | |
264 | |
265 // Used to keep track of limit of sweep for the space | |
266 HeapWord* _sweep_limit; | |
267 | |
268 // Support for compacting cms | |
269 HeapWord* cross_threshold(HeapWord* start, HeapWord* end); | |
270 HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); | |
271 | |
272 // Initialization helpers. | |
273 void initializeIndexedFreeListArray(); | |
274 | |
275 // Extra stuff to manage promotion parallelism. | |
276 | |
277 // a lock protecting the dictionary during par promotion allocation. | |
278 mutable Mutex _parDictionaryAllocLock; | |
279 Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } | |
280 | |
281 // Locks protecting the exact lists during par promotion allocation. | |
282 Mutex* _indexedFreeListParLocks[IndexSetSize]; | |
283 | |
284 #if CFLS_LAB_REFILL_STATS | |
285 // Some statistics. | |
286 jint _par_get_chunk_from_small; | |
287 jint _par_get_chunk_from_large; | |
288 #endif | |
289 | |
290 | |
291 // Attempt to obtain up to "n" blocks of the size "word_sz" (which is | |
292 // required to be smaller than "IndexSetSize".) If successful, | |
293 // adds them to "fl", which is required to be an empty free list. | |
294 // If the count of "fl" is negative, it's absolute value indicates a | |
295 // number of free chunks that had been previously "borrowed" from global | |
296 // list of size "word_sz", and must now be decremented. | |
297 void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl); | |
298 | |
299 // Allocation helper functions | |
300 // Allocate using a strategy that takes from the indexed free lists | |
301 // first. This allocation strategy assumes a companion sweeping | |
302 // strategy that attempts to keep the needed number of chunks in each | |
303 // indexed free lists. | |
304 HeapWord* allocate_adaptive_freelists(size_t size); | |
305 // Allocate from the linear allocation buffers first. This allocation | |
306 // strategy assumes maximal coalescing can maintain chunks large enough | |
307 // to be used as linear allocation buffers. | |
308 HeapWord* allocate_non_adaptive_freelists(size_t size); | |
309 | |
310 // Gets a chunk from the linear allocation block (LinAB). If there | |
311 // is not enough space in the LinAB, refills it. | |
312 HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size); | |
313 HeapWord* getChunkFromSmallLinearAllocBlock(size_t size); | |
314 // Get a chunk from the space remaining in the linear allocation block. Do | |
315 // not attempt to refill if the space is not available, return NULL. Do the | |
316 // repairs on the linear allocation block as appropriate. | |
317 HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size); | |
318 inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size); | |
319 | |
320 // Helper function for getChunkFromIndexedFreeList. | |
321 // Replenish the indexed free list for this "size". Do not take from an | |
322 // underpopulated size. | |
323 FreeChunk* getChunkFromIndexedFreeListHelper(size_t size); | |
324 | |
325 // Get a chunk from the indexed free list. If the indexed free list | |
326 // does not have a free chunk, try to replenish the indexed free list | |
327 // then get the free chunk from the replenished indexed free list. | |
328 inline FreeChunk* getChunkFromIndexedFreeList(size_t size); | |
329 | |
330 // The returned chunk may be larger than requested (or null). | |
331 FreeChunk* getChunkFromDictionary(size_t size); | |
332 // The returned chunk is the exact size requested (or null). | |
333 FreeChunk* getChunkFromDictionaryExact(size_t size); | |
334 | |
335 // Find a chunk in the indexed free list that is the best | |
336 // fit for size "numWords". | |
337 FreeChunk* bestFitSmall(size_t numWords); | |
338 // For free list "fl" of chunks of size > numWords, | |
339 // remove a chunk, split off a chunk of size numWords | |
340 // and return it. The split off remainder is returned to | |
341 // the free lists. The old name for getFromListGreater | |
342 // was lookInListGreater. | |
343 FreeChunk* getFromListGreater(FreeList* fl, size_t numWords); | |
344 // Get a chunk in the indexed free list or dictionary, | |
345 // by considering a larger chunk and splitting it. | |
346 FreeChunk* getChunkFromGreater(size_t numWords); | |
347 // Verify that the given chunk is in the indexed free lists. | |
348 bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const; | |
349 // Remove the specified chunk from the indexed free lists. | |
350 void removeChunkFromIndexedFreeList(FreeChunk* fc); | |
351 // Remove the specified chunk from the dictionary. | |
352 void removeChunkFromDictionary(FreeChunk* fc); | |
353 // Split a free chunk into a smaller free chunk of size "new_size". | |
354 // Return the smaller free chunk and return the remainder to the | |
355 // free lists. | |
356 FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size); | |
357 // Add a chunk to the free lists. | |
358 void addChunkToFreeLists(HeapWord* chunk, size_t size); | |
359 // Add a chunk to the free lists, preferring to suffix it | |
360 // to the last free chunk at end of space if possible, and | |
361 // updating the block census stats as well as block offset table. | |
362 // Take any locks as appropriate if we are multithreaded. | |
363 void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size); | |
364 // Add a free chunk to the indexed free lists. | |
365 void returnChunkToFreeList(FreeChunk* chunk); | |
366 // Add a free chunk to the dictionary. | |
367 void returnChunkToDictionary(FreeChunk* chunk); | |
368 | |
369 // Functions for maintaining the linear allocation buffers (LinAB). | |
370 // Repairing a linear allocation block refers to operations | |
371 // performed on the remainder of a LinAB after an allocation | |
372 // has been made from it. | |
373 void repairLinearAllocationBlocks(); | |
374 void repairLinearAllocBlock(LinearAllocBlock* blk); | |
375 void refillLinearAllocBlock(LinearAllocBlock* blk); | |
376 void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk); | |
377 void refillLinearAllocBlocksIfNeeded(); | |
378 | |
379 void verify_objects_initialized() const; | |
380 | |
381 // Statistics reporting helper functions | |
382 void reportFreeListStatistics() const; | |
383 void reportIndexedFreeListStatistics() const; | |
384 size_t maxChunkSizeInIndexedFreeLists() const; | |
385 size_t numFreeBlocksInIndexedFreeLists() const; | |
386 // Accessor | |
387 HeapWord* unallocated_block() const { | |
388 HeapWord* ub = _bt.unallocated_block(); | |
389 assert(ub >= bottom() && | |
390 ub <= end(), "space invariant"); | |
391 return ub; | |
392 } | |
393 void freed(HeapWord* start, size_t size) { | |
394 _bt.freed(start, size); | |
395 } | |
396 | |
397 protected: | |
398 // reset the indexed free list to its initial empty condition. | |
399 void resetIndexedFreeListArray(); | |
400 // reset to an initial state with a single free block described | |
401 // by the MemRegion parameter. | |
402 void reset(MemRegion mr); | |
403 // Return the total number of words in the indexed free lists. | |
404 size_t totalSizeInIndexedFreeLists() const; | |
405 | |
406 public: | |
407 // Constructor... | |
408 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr, | |
409 bool use_adaptive_freelists, | |
410 FreeBlockDictionary::DictionaryChoice); | |
411 // accessors | |
412 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } | |
413 FreeBlockDictionary* dictionary() const { return _dictionary; } | |
414 HeapWord* nearLargestChunk() const { return _nearLargestChunk; } | |
415 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } | |
416 | |
417 // Return the free chunk at the end of the space. If no such | |
418 // chunk exists, return NULL. | |
419 FreeChunk* find_chunk_at_end(); | |
420 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
421 bool adaptive_freelists() const { return _adaptive_freelists; } |
0 | 422 |
423 void set_collector(CMSCollector* collector) { _collector = collector; } | |
424 | |
425 // Support for parallelization of rescan and marking | |
426 const size_t rescan_task_size() const { return _rescan_task_size; } | |
427 const size_t marking_task_size() const { return _marking_task_size; } | |
428 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } | |
429 void initialize_sequential_subtasks_for_rescan(int n_threads); | |
430 void initialize_sequential_subtasks_for_marking(int n_threads, | |
431 HeapWord* low = NULL); | |
432 | |
433 #if CFLS_LAB_REFILL_STATS | |
434 void print_par_alloc_stats(); | |
435 #endif | |
436 | |
437 // Space enquiries | |
438 size_t used() const; | |
439 size_t free() const; | |
440 size_t max_alloc_in_words() const; | |
441 // XXX: should have a less conservative used_region() than that of | |
442 // Space; we could consider keeping track of highest allocated | |
443 // address and correcting that at each sweep, as the sweeper | |
444 // goes through the entire allocated part of the generation. We | |
445 // could also use that information to keep the sweeper from | |
446 // sweeping more than is necessary. The allocator and sweeper will | |
447 // of course need to synchronize on this, since the sweeper will | |
448 // try to bump down the address and the allocator will try to bump it up. | |
449 // For now, however, we'll just use the default used_region() | |
450 // which overestimates the region by returning the entire | |
451 // committed region (this is safe, but inefficient). | |
452 | |
453 // Returns a subregion of the space containing all the objects in | |
454 // the space. | |
455 MemRegion used_region() const { | |
456 return MemRegion(bottom(), | |
457 BlockOffsetArrayUseUnallocatedBlock ? | |
458 unallocated_block() : end()); | |
459 } | |
460 | |
461 // This is needed because the default implementation uses block_start() | |
462 // which can;t be used at certain times (for example phase 3 of mark-sweep). | |
463 // A better fix is to change the assertions in phase 3 of mark-sweep to | |
464 // use is_in_reserved(), but that is deferred since the is_in() assertions | |
465 // are buried through several layers of callers and are used elsewhere | |
466 // as well. | |
467 bool is_in(const void* p) const { | |
468 return used_region().contains(p); | |
469 } | |
470 | |
471 virtual bool is_free_block(const HeapWord* p) const; | |
472 | |
473 // Resizing support | |
474 void set_end(HeapWord* value); // override | |
475 | |
476 // mutual exclusion support | |
477 Mutex* freelistLock() const { return &_freelistLock; } | |
478 | |
479 // Iteration support | |
480 void oop_iterate(MemRegion mr, OopClosure* cl); | |
481 void oop_iterate(OopClosure* cl); | |
482 | |
483 void object_iterate(ObjectClosure* blk); | |
484 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); | |
485 | |
486 // Requires that "mr" be entirely within the space. | |
487 // Apply "cl->do_object" to all objects that intersect with "mr". | |
488 // If the iteration encounters an unparseable portion of the region, | |
489 // terminate the iteration and return the address of the start of the | |
490 // subregion that isn't done. Return of "NULL" indicates that the | |
491 // interation completed. | |
492 virtual HeapWord* | |
493 object_iterate_careful_m(MemRegion mr, | |
494 ObjectClosureCareful* cl); | |
495 virtual HeapWord* | |
496 object_iterate_careful(ObjectClosureCareful* cl); | |
497 | |
498 // Override: provides a DCTO_CL specific to this kind of space. | |
499 DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, | |
500 CardTableModRefBS::PrecisionStyle precision, | |
501 HeapWord* boundary); | |
502 | |
503 void blk_iterate(BlkClosure* cl); | |
504 void blk_iterate_careful(BlkClosureCareful* cl); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
505 HeapWord* block_start_const(const void* p) const; |
0 | 506 HeapWord* block_start_careful(const void* p) const; |
507 size_t block_size(const HeapWord* p) const; | |
508 size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; | |
509 bool block_is_obj(const HeapWord* p) const; | |
510 bool obj_is_alive(const HeapWord* p) const; | |
511 size_t block_size_nopar(const HeapWord* p) const; | |
512 bool block_is_obj_nopar(const HeapWord* p) const; | |
513 | |
514 // iteration support for promotion | |
515 void save_marks(); | |
516 bool no_allocs_since_save_marks(); | |
517 void object_iterate_since_last_GC(ObjectClosure* cl); | |
518 | |
519 // iteration support for sweeping | |
520 void save_sweep_limit() { | |
521 _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? | |
522 unallocated_block() : end(); | |
523 } | |
524 NOT_PRODUCT( | |
525 void clear_sweep_limit() { _sweep_limit = NULL; } | |
526 ) | |
527 HeapWord* sweep_limit() { return _sweep_limit; } | |
528 | |
529 // Apply "blk->do_oop" to the addresses of all reference fields in objects | |
530 // promoted into this generation since the most recent save_marks() call. | |
531 // Fields in objects allocated by applications of the closure | |
532 // *are* included in the iteration. Thus, when the iteration completes | |
533 // there should be no further such objects remaining. | |
534 #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ | |
535 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); | |
536 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL) | |
537 #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL | |
538 | |
539 // Allocation support | |
540 HeapWord* allocate(size_t size); | |
541 HeapWord* par_allocate(size_t size); | |
542 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
543 oop promote(oop obj, size_t obj_size); |
0 | 544 void gc_prologue(); |
545 void gc_epilogue(); | |
546 | |
547 // This call is used by a containing CMS generation / collector | |
548 // to inform the CFLS space that a sweep has been completed | |
549 // and that the space can do any related house-keeping functions. | |
550 void sweep_completed(); | |
551 | |
552 // For an object in this space, the mark-word's two | |
553 // LSB's having the value [11] indicates that it has been | |
554 // promoted since the most recent call to save_marks() on | |
555 // this generation and has not subsequently been iterated | |
556 // over (using oop_since_save_marks_iterate() above). | |
557 bool obj_allocated_since_save_marks(const oop obj) const { | |
558 assert(is_in_reserved(obj), "Wrong space?"); | |
559 return ((PromotedObject*)obj)->hasPromotedMark(); | |
560 } | |
561 | |
562 // A worst-case estimate of the space required (in HeapWords) to expand the | |
563 // heap when promoting an obj of size obj_size. | |
564 size_t expansionSpaceRequired(size_t obj_size) const; | |
565 | |
566 FreeChunk* allocateScratch(size_t size); | |
567 | |
568 // returns true if either the small or large linear allocation buffer is empty. | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
569 bool linearAllocationWouldFail() const; |
0 | 570 |
571 // Adjust the chunk for the minimum size. This version is called in | |
572 // most cases in CompactibleFreeListSpace methods. | |
573 inline static size_t adjustObjectSize(size_t size) { | |
574 return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize)); | |
575 } | |
576 // This is a virtual version of adjustObjectSize() that is called | |
577 // only occasionally when the compaction space changes and the type | |
578 // of the new compaction space is is only known to be CompactibleSpace. | |
579 size_t adjust_object_size_v(size_t size) const { | |
580 return adjustObjectSize(size); | |
581 } | |
582 // Minimum size of a free block. | |
583 virtual size_t minimum_free_block_size() const { return MinChunkSize; } | |
584 void removeFreeChunkFromFreeLists(FreeChunk* chunk); | |
585 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, | |
586 bool coalesced); | |
587 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
588 // Support for decisions regarding concurrent collection policy |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
589 bool should_concurrent_collect() const; |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
590 |
0 | 591 // Support for compaction |
592 void prepare_for_compaction(CompactPoint* cp); | |
593 void adjust_pointers(); | |
594 void compact(); | |
595 // reset the space to reflect the fact that a compaction of the | |
596 // space has been done. | |
597 virtual void reset_after_compaction(); | |
598 | |
599 // Debugging support | |
600 void print() const; | |
601 void prepare_for_verify(); | |
602 void verify(bool allow_dirty) const; | |
603 void verifyFreeLists() const PRODUCT_RETURN; | |
604 void verifyIndexedFreeLists() const; | |
605 void verifyIndexedFreeList(size_t size) const; | |
606 // verify that the given chunk is in the free lists. | |
607 bool verifyChunkInFreeLists(FreeChunk* fc) const; | |
608 // Do some basic checks on the the free lists. | |
609 void checkFreeListConsistency() const PRODUCT_RETURN; | |
610 | |
611 NOT_PRODUCT ( | |
612 void initializeIndexedFreeListArrayReturnedBytes(); | |
613 size_t sumIndexedFreeListArrayReturnedBytes(); | |
614 // Return the total number of chunks in the indexed free lists. | |
615 size_t totalCountInIndexedFreeLists() const; | |
616 // Return the total numberof chunks in the space. | |
617 size_t totalCount(); | |
618 ) | |
619 | |
620 // The census consists of counts of the quantities such as | |
621 // the current count of the free chunks, number of chunks | |
622 // created as a result of the split of a larger chunk or | |
623 // coalescing of smaller chucks, etc. The counts in the | |
624 // census is used to make decisions on splitting and | |
625 // coalescing of chunks during the sweep of garbage. | |
626 | |
627 // Print the statistics for the free lists. | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
628 void printFLCensus(size_t sweep_count) const; |
0 | 629 |
630 // Statistics functions | |
631 // Initialize census for lists before the sweep. | |
632 void beginSweepFLCensus(float sweep_current, | |
633 float sweep_estimate); | |
634 // Set the surplus for each of the free lists. | |
635 void setFLSurplus(); | |
636 // Set the hint for each of the free lists. | |
637 void setFLHints(); | |
638 // Clear the census for each of the free lists. | |
639 void clearFLCensus(); | |
640 // Perform functions for the census after the end of the sweep. | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
641 void endSweepFLCensus(size_t sweep_count); |
0 | 642 // Return true if the count of free chunks is greater |
643 // than the desired number of free chunks. | |
644 bool coalOverPopulated(size_t size); | |
645 | |
646 // Record (for each size): | |
647 // | |
648 // split-births = #chunks added due to splits in (prev-sweep-end, | |
649 // this-sweep-start) | |
650 // split-deaths = #chunks removed for splits in (prev-sweep-end, | |
651 // this-sweep-start) | |
652 // num-curr = #chunks at start of this sweep | |
653 // num-prev = #chunks at end of previous sweep | |
654 // | |
655 // The above are quantities that are measured. Now define: | |
656 // | |
657 // num-desired := num-prev + split-births - split-deaths - num-curr | |
658 // | |
659 // Roughly, num-prev + split-births is the supply, | |
660 // split-deaths is demand due to other sizes | |
661 // and num-curr is what we have left. | |
662 // | |
663 // Thus, num-desired is roughly speaking the "legitimate demand" | |
664 // for blocks of this size and what we are striving to reach at the | |
665 // end of the current sweep. | |
666 // | |
667 // For a given list, let num-len be its current population. | |
668 // Define, for a free list of a given size: | |
669 // | |
670 // coal-overpopulated := num-len >= num-desired * coal-surplus | |
671 // (coal-surplus is set to 1.05, i.e. we allow a little slop when | |
672 // coalescing -- we do not coalesce unless we think that the current | |
673 // supply has exceeded the estimated demand by more than 5%). | |
674 // | |
675 // For the set of sizes in the binary tree, which is neither dense nor | |
676 // closed, it may be the case that for a particular size we have never | |
677 // had, or do not now have, or did not have at the previous sweep, | |
678 // chunks of that size. We need to extend the definition of | |
679 // coal-overpopulated to such sizes as well: | |
680 // | |
681 // For a chunk in/not in the binary tree, extend coal-overpopulated | |
682 // defined above to include all sizes as follows: | |
683 // | |
684 // . a size that is non-existent is coal-overpopulated | |
685 // . a size that has a num-desired <= 0 as defined above is | |
686 // coal-overpopulated. | |
687 // | |
688 // Also define, for a chunk heap-offset C and mountain heap-offset M: | |
689 // | |
690 // close-to-mountain := C >= 0.99 * M | |
691 // | |
692 // Now, the coalescing strategy is: | |
693 // | |
694 // Coalesce left-hand chunk with right-hand chunk if and | |
695 // only if: | |
696 // | |
697 // EITHER | |
698 // . left-hand chunk is of a size that is coal-overpopulated | |
699 // OR | |
700 // . right-hand chunk is close-to-mountain | |
701 void smallCoalBirth(size_t size); | |
702 void smallCoalDeath(size_t size); | |
703 void coalBirth(size_t size); | |
704 void coalDeath(size_t size); | |
705 void smallSplitBirth(size_t size); | |
706 void smallSplitDeath(size_t size); | |
707 void splitBirth(size_t size); | |
708 void splitDeath(size_t size); | |
709 void split(size_t from, size_t to1); | |
710 | |
711 double flsFrag() const; | |
712 }; | |
713 | |
714 // A parallel-GC-thread-local allocation buffer for allocation into a | |
715 // CompactibleFreeListSpace. | |
716 class CFLS_LAB : public CHeapObj { | |
717 // The space that this buffer allocates into. | |
718 CompactibleFreeListSpace* _cfls; | |
719 | |
720 // Our local free lists. | |
721 FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; | |
722 | |
723 // Initialized from a command-line arg. | |
724 size_t _blocks_to_claim; | |
725 | |
726 #if CFLS_LAB_REFILL_STATS | |
727 // Some statistics. | |
728 int _refills; | |
729 int _blocksTaken; | |
730 static int _tot_refills; | |
731 static int _tot_blocksTaken; | |
732 static int _next_threshold; | |
733 #endif | |
734 | |
735 public: | |
736 CFLS_LAB(CompactibleFreeListSpace* cfls); | |
737 | |
738 // Allocate and return a block of the given size, or else return NULL. | |
739 HeapWord* alloc(size_t word_sz); | |
740 | |
741 // Return any unused portions of the buffer to the global pool. | |
742 void retire(); | |
743 }; | |
744 | |
745 size_t PromotionInfo::refillSize() const { | |
746 const size_t CMSSpoolBlockSize = 256; | |
747 const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop) | |
748 * CMSSpoolBlockSize); | |
749 return CompactibleFreeListSpace::adjustObjectSize(sz); | |
750 } |