Mercurial > hg > graal-compiler
annotate src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp @ 1571:2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
Summary: Added new product ObjectAlignmentInBytes flag to control object alignment.
Reviewed-by: twisti, ysr, iveresov
author | kvn |
---|---|
date | Thu, 27 May 2010 18:01:56 -0700 |
parents | a8127dc669ba |
children | e9ff18c4ace7 |
rev | line source |
---|---|
0 | 1 /* |
579 | 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_compactibleFreeListSpace.cpp.incl" | |
27 | |
28 ///////////////////////////////////////////////////////////////////////// | |
29 //// CompactibleFreeListSpace | |
30 ///////////////////////////////////////////////////////////////////////// | |
31 | |
32 // highest ranked free list lock rank | |
33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3; | |
34 | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
35 // Defaults are 0 so things will break badly if incorrectly initialized. |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
36 int CompactibleFreeListSpace::IndexSetStart = 0; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
37 int CompactibleFreeListSpace::IndexSetStride = 0; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
38 |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
39 size_t MinChunkSize = 0; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
40 |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
41 void CompactibleFreeListSpace::set_cms_values() { |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
42 // Set CMS global values |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
43 assert(MinChunkSize == 0, "already set"); |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
44 #define numQuanta(x,y) ((x+y-1)/y) |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
45 MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
46 |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
47 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set"); |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
48 IndexSetStart = MinObjAlignment; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
49 IndexSetStride = MinObjAlignment; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
50 } |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
51 |
0 | 52 // Constructor |
53 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, | |
54 MemRegion mr, bool use_adaptive_freelists, | |
55 FreeBlockDictionary::DictionaryChoice dictionaryChoice) : | |
56 _dictionaryChoice(dictionaryChoice), | |
57 _adaptive_freelists(use_adaptive_freelists), | |
58 _bt(bs, mr), | |
59 // free list locks are in the range of values taken by _lockRank | |
60 // This range currently is [_leaf+2, _leaf+3] | |
61 // Note: this requires that CFLspace c'tors | |
62 // are called serially in the order in which the locks are | |
63 // are acquired in the program text. This is true today. | |
64 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true), | |
65 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1 | |
66 "CompactibleFreeListSpace._dict_par_lock", true), | |
67 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * | |
68 CMSRescanMultiple), | |
69 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * | |
70 CMSConcMarkMultiple), | |
71 _collector(NULL) | |
72 { | |
73 _bt.set_space(this); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
187
diff
changeset
|
74 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
0 | 75 // We have all of "mr", all of which we place in the dictionary |
76 // as one big chunk. We'll need to decide here which of several | |
77 // possible alternative dictionary implementations to use. For | |
78 // now the choice is easy, since we have only one working | |
79 // implementation, namely, the simple binary tree (splaying | |
80 // temporarily disabled). | |
81 switch (dictionaryChoice) { | |
82 case FreeBlockDictionary::dictionarySplayTree: | |
83 case FreeBlockDictionary::dictionarySkipList: | |
84 default: | |
85 warning("dictionaryChoice: selected option not understood; using" | |
86 " default BinaryTreeDictionary implementation instead."); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
87 case FreeBlockDictionary::dictionaryBinaryTree: |
0 | 88 _dictionary = new BinaryTreeDictionary(mr); |
89 break; | |
90 } | |
91 assert(_dictionary != NULL, "CMS dictionary initialization"); | |
92 // The indexed free lists are initially all empty and are lazily | |
93 // filled in on demand. Initialize the array elements to NULL. | |
94 initializeIndexedFreeListArray(); | |
95 | |
96 // Not using adaptive free lists assumes that allocation is first | |
97 // from the linAB's. Also a cms perm gen which can be compacted | |
98 // has to have the klass's klassKlass allocated at a lower | |
99 // address in the heap than the klass so that the klassKlass is | |
100 // moved to its new location before the klass is moved. | |
101 // Set the _refillSize for the linear allocation blocks | |
102 if (!use_adaptive_freelists) { | |
103 FreeChunk* fc = _dictionary->getChunk(mr.word_size()); | |
104 // The small linAB initially has all the space and will allocate | |
105 // a chunk of any size. | |
106 HeapWord* addr = (HeapWord*) fc; | |
107 _smallLinearAllocBlock.set(addr, fc->size() , | |
108 1024*SmallForLinearAlloc, fc->size()); | |
109 // Note that _unallocated_block is not updated here. | |
110 // Allocations from the linear allocation block should | |
111 // update it. | |
112 } else { | |
113 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, | |
114 SmallForLinearAlloc); | |
115 } | |
116 // CMSIndexedFreeListReplenish should be at least 1 | |
117 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish); | |
118 _promoInfo.setSpace(this); | |
119 if (UseCMSBestFit) { | |
120 _fitStrategy = FreeBlockBestFitFirst; | |
121 } else { | |
122 _fitStrategy = FreeBlockStrategyNone; | |
123 } | |
124 checkFreeListConsistency(); | |
125 | |
126 // Initialize locks for parallel case. | |
127 if (ParallelGCThreads > 0) { | |
128 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
129 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1 | |
130 "a freelist par lock", | |
131 true); | |
132 if (_indexedFreeListParLocks[i] == NULL) | |
133 vm_exit_during_initialization("Could not allocate a par lock"); | |
134 DEBUG_ONLY( | |
135 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]); | |
136 ) | |
137 } | |
138 _dictionary->set_par_lock(&_parDictionaryAllocLock); | |
139 } | |
140 } | |
141 | |
142 // Like CompactibleSpace forward() but always calls cross_threshold() to | |
143 // update the block offset table. Removed initialize_threshold call because | |
144 // CFLS does not use a block offset array for contiguous spaces. | |
145 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size, | |
146 CompactPoint* cp, HeapWord* compact_top) { | |
147 // q is alive | |
148 // First check if we should switch compaction space | |
149 assert(this == cp->space, "'this' should be current compaction space."); | |
150 size_t compaction_max_size = pointer_delta(end(), compact_top); | |
151 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size), | |
152 "virtual adjustObjectSize_v() method is not correct"); | |
153 size_t adjusted_size = adjustObjectSize(size); | |
154 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0, | |
155 "no small fragments allowed"); | |
156 assert(minimum_free_block_size() == MinChunkSize, | |
157 "for de-virtualized reference below"); | |
158 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize | |
159 if (adjusted_size + MinChunkSize > compaction_max_size && | |
160 adjusted_size != compaction_max_size) { | |
161 do { | |
162 // switch to next compaction space | |
163 cp->space->set_compaction_top(compact_top); | |
164 cp->space = cp->space->next_compaction_space(); | |
165 if (cp->space == NULL) { | |
166 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); | |
167 assert(cp->gen != NULL, "compaction must succeed"); | |
168 cp->space = cp->gen->first_compaction_space(); | |
169 assert(cp->space != NULL, "generation must have a first compaction space"); | |
170 } | |
171 compact_top = cp->space->bottom(); | |
172 cp->space->set_compaction_top(compact_top); | |
173 // The correct adjusted_size may not be the same as that for this method | |
174 // (i.e., cp->space may no longer be "this" so adjust the size again. | |
175 // Use the virtual method which is not used above to save the virtual | |
176 // dispatch. | |
177 adjusted_size = cp->space->adjust_object_size_v(size); | |
178 compaction_max_size = pointer_delta(cp->space->end(), compact_top); | |
179 assert(cp->space->minimum_free_block_size() == 0, "just checking"); | |
180 } while (adjusted_size > compaction_max_size); | |
181 } | |
182 | |
183 // store the forwarding pointer into the mark word | |
184 if ((HeapWord*)q != compact_top) { | |
185 q->forward_to(oop(compact_top)); | |
186 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); | |
187 } else { | |
188 // if the object isn't moving we can just set the mark to the default | |
189 // mark and handle it specially later on. | |
190 q->init_mark(); | |
191 assert(q->forwardee() == NULL, "should be forwarded to NULL"); | |
192 } | |
193 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
194 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size)); |
0 | 195 compact_top += adjusted_size; |
196 | |
197 // we need to update the offset table so that the beginnings of objects can be | |
198 // found during scavenge. Note that we are updating the offset table based on | |
199 // where the object will be once the compaction phase finishes. | |
200 | |
201 // Always call cross_threshold(). A contiguous space can only call it when | |
202 // the compaction_top exceeds the current threshold but not for an | |
203 // non-contiguous space. | |
204 cp->threshold = | |
205 cp->space->cross_threshold(compact_top - adjusted_size, compact_top); | |
206 return compact_top; | |
207 } | |
208 | |
209 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt | |
210 // and use of single_block instead of alloc_block. The name here is not really | |
211 // appropriate - maybe a more general name could be invented for both the | |
212 // contiguous and noncontiguous spaces. | |
213 | |
214 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) { | |
215 _bt.single_block(start, the_end); | |
216 return end(); | |
217 } | |
218 | |
219 // Initialize them to NULL. | |
220 void CompactibleFreeListSpace::initializeIndexedFreeListArray() { | |
221 for (size_t i = 0; i < IndexSetSize; i++) { | |
222 // Note that on platforms where objects are double word aligned, | |
223 // the odd array elements are not used. It is convenient, however, | |
224 // to map directly from the object size to the array element. | |
225 _indexedFreeList[i].reset(IndexSetSize); | |
226 _indexedFreeList[i].set_size(i); | |
227 assert(_indexedFreeList[i].count() == 0, "reset check failed"); | |
228 assert(_indexedFreeList[i].head() == NULL, "reset check failed"); | |
229 assert(_indexedFreeList[i].tail() == NULL, "reset check failed"); | |
230 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed"); | |
231 } | |
232 } | |
233 | |
234 void CompactibleFreeListSpace::resetIndexedFreeListArray() { | |
235 for (int i = 1; i < IndexSetSize; i++) { | |
236 assert(_indexedFreeList[i].size() == (size_t) i, | |
237 "Indexed free list sizes are incorrect"); | |
238 _indexedFreeList[i].reset(IndexSetSize); | |
239 assert(_indexedFreeList[i].count() == 0, "reset check failed"); | |
240 assert(_indexedFreeList[i].head() == NULL, "reset check failed"); | |
241 assert(_indexedFreeList[i].tail() == NULL, "reset check failed"); | |
242 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed"); | |
243 } | |
244 } | |
245 | |
246 void CompactibleFreeListSpace::reset(MemRegion mr) { | |
247 resetIndexedFreeListArray(); | |
248 dictionary()->reset(); | |
249 if (BlockOffsetArrayUseUnallocatedBlock) { | |
250 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen"); | |
251 // Everything's allocated until proven otherwise. | |
252 _bt.set_unallocated_block(end()); | |
253 } | |
254 if (!mr.is_empty()) { | |
255 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small"); | |
256 _bt.single_block(mr.start(), mr.word_size()); | |
257 FreeChunk* fc = (FreeChunk*) mr.start(); | |
258 fc->setSize(mr.word_size()); | |
259 if (mr.word_size() >= IndexSetSize ) { | |
260 returnChunkToDictionary(fc); | |
261 } else { | |
262 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); | |
263 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc); | |
264 } | |
265 } | |
266 _promoInfo.reset(); | |
267 _smallLinearAllocBlock._ptr = NULL; | |
268 _smallLinearAllocBlock._word_size = 0; | |
269 } | |
270 | |
271 void CompactibleFreeListSpace::reset_after_compaction() { | |
272 // Reset the space to the new reality - one free chunk. | |
273 MemRegion mr(compaction_top(), end()); | |
274 reset(mr); | |
275 // Now refill the linear allocation block(s) if possible. | |
276 if (_adaptive_freelists) { | |
277 refillLinearAllocBlocksIfNeeded(); | |
278 } else { | |
279 // Place as much of mr in the linAB as we can get, | |
280 // provided it was big enough to go into the dictionary. | |
281 FreeChunk* fc = dictionary()->findLargestDict(); | |
282 if (fc != NULL) { | |
283 assert(fc->size() == mr.word_size(), | |
284 "Why was the chunk broken up?"); | |
285 removeChunkFromDictionary(fc); | |
286 HeapWord* addr = (HeapWord*) fc; | |
287 _smallLinearAllocBlock.set(addr, fc->size() , | |
288 1024*SmallForLinearAlloc, fc->size()); | |
289 // Note that _unallocated_block is not updated here. | |
290 } | |
291 } | |
292 } | |
293 | |
294 // Walks the entire dictionary, returning a coterminal | |
295 // chunk, if it exists. Use with caution since it involves | |
296 // a potentially complete walk of a potentially large tree. | |
297 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() { | |
298 | |
299 assert_lock_strong(&_freelistLock); | |
300 | |
301 return dictionary()->find_chunk_ends_at(end()); | |
302 } | |
303 | |
304 | |
305 #ifndef PRODUCT | |
306 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() { | |
307 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
308 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0); | |
309 } | |
310 } | |
311 | |
312 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() { | |
313 size_t sum = 0; | |
314 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
315 sum += _indexedFreeList[i].allocation_stats()->returnedBytes(); | |
316 } | |
317 return sum; | |
318 } | |
319 | |
320 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const { | |
321 size_t count = 0; | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
322 for (int i = (int)MinChunkSize; i < IndexSetSize; i++) { |
0 | 323 debug_only( |
324 ssize_t total_list_count = 0; | |
325 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; | |
326 fc = fc->next()) { | |
327 total_list_count++; | |
328 } | |
329 assert(total_list_count == _indexedFreeList[i].count(), | |
330 "Count in list is incorrect"); | |
331 ) | |
332 count += _indexedFreeList[i].count(); | |
333 } | |
334 return count; | |
335 } | |
336 | |
337 size_t CompactibleFreeListSpace::totalCount() { | |
338 size_t num = totalCountInIndexedFreeLists(); | |
339 num += dictionary()->totalCount(); | |
340 if (_smallLinearAllocBlock._word_size != 0) { | |
341 num++; | |
342 } | |
343 return num; | |
344 } | |
345 #endif | |
346 | |
347 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const { | |
348 FreeChunk* fc = (FreeChunk*) p; | |
349 return fc->isFree(); | |
350 } | |
351 | |
352 size_t CompactibleFreeListSpace::used() const { | |
353 return capacity() - free(); | |
354 } | |
355 | |
356 size_t CompactibleFreeListSpace::free() const { | |
357 // "MT-safe, but not MT-precise"(TM), if you will: i.e. | |
358 // if you do this while the structures are in flux you | |
359 // may get an approximate answer only; for instance | |
360 // because there is concurrent allocation either | |
361 // directly by mutators or for promotion during a GC. | |
362 // It's "MT-safe", however, in the sense that you are guaranteed | |
363 // not to crash and burn, for instance, because of walking | |
364 // pointers that could disappear as you were walking them. | |
365 // The approximation is because the various components | |
366 // that are read below are not read atomically (and | |
367 // further the computation of totalSizeInIndexedFreeLists() | |
368 // is itself a non-atomic computation. The normal use of | |
369 // this is during a resize operation at the end of GC | |
370 // and at that time you are guaranteed to get the | |
371 // correct actual value. However, for instance, this is | |
372 // also read completely asynchronously by the "perf-sampler" | |
373 // that supports jvmstat, and you are apt to see the values | |
374 // flicker in such cases. | |
375 assert(_dictionary != NULL, "No _dictionary?"); | |
376 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) + | |
377 totalSizeInIndexedFreeLists() + | |
378 _smallLinearAllocBlock._word_size) * HeapWordSize; | |
379 } | |
380 | |
381 size_t CompactibleFreeListSpace::max_alloc_in_words() const { | |
382 assert(_dictionary != NULL, "No _dictionary?"); | |
383 assert_locked(); | |
384 size_t res = _dictionary->maxChunkSize(); | |
385 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size, | |
386 (size_t) SmallForLinearAlloc - 1)); | |
387 // XXX the following could potentially be pretty slow; | |
388 // should one, pesimally for the rare cases when res | |
389 // caclulated above is less than IndexSetSize, | |
390 // just return res calculated above? My reasoning was that | |
391 // those cases will be so rare that the extra time spent doesn't | |
392 // really matter.... | |
393 // Note: do not change the loop test i >= res + IndexSetStride | |
394 // to i > res below, because i is unsigned and res may be zero. | |
395 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride; | |
396 i -= IndexSetStride) { | |
397 if (_indexedFreeList[i].head() != NULL) { | |
398 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList"); | |
399 return i; | |
400 } | |
401 } | |
402 return res; | |
403 } | |
404 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
405 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
406 const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
407 reportIndexedFreeListStatistics(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
408 gclog_or_tty->print_cr("Layout of Indexed Freelists"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
409 gclog_or_tty->print_cr("---------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
410 FreeList::print_labels_on(st, "size"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
411 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
412 _indexedFreeList[i].print_on(gclog_or_tty); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
413 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
414 fc = fc->next()) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
415 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
416 fc, (HeapWord*)fc + i, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
417 fc->cantCoalesce() ? "\t CC" : ""); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
418 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
419 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
420 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
421 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
422 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
423 const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
424 _promoInfo.print_on(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
425 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
426 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
427 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
428 const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
429 _dictionary->reportStatistics(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
430 st->print_cr("Layout of Freelists in Tree"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
431 st->print_cr("---------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
432 _dictionary->print_free_lists(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
433 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
434 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
435 class BlkPrintingClosure: public BlkClosure { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
436 const CMSCollector* _collector; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
437 const CompactibleFreeListSpace* _sp; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
438 const CMSBitMap* _live_bit_map; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
439 const bool _post_remark; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
440 outputStream* _st; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
441 public: |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
442 BlkPrintingClosure(const CMSCollector* collector, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
443 const CompactibleFreeListSpace* sp, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
444 const CMSBitMap* live_bit_map, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
445 outputStream* st): |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
446 _collector(collector), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
447 _sp(sp), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
448 _live_bit_map(live_bit_map), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
449 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
450 _st(st) { } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
451 size_t do_blk(HeapWord* addr); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
452 }; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
453 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
454 size_t BlkPrintingClosure::do_blk(HeapWord* addr) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
455 size_t sz = _sp->block_size_no_stall(addr, _collector); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
456 assert(sz != 0, "Should always be able to compute a size"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
457 if (_sp->block_is_obj(addr)) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
458 const bool dead = _post_remark && !_live_bit_map->isMarked(addr); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
459 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
460 addr, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
461 dead ? "dead" : "live", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
462 sz, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
463 (!dead && CMSPrintObjectsInDump) ? ":" : "."); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
464 if (CMSPrintObjectsInDump && !dead) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
465 oop(addr)->print_on(_st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
466 _st->print_cr("--------------------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
467 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
468 } else { // free block |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
469 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
470 addr, sz, CMSPrintChunksInDump ? ":" : "."); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
471 if (CMSPrintChunksInDump) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
472 ((FreeChunk*)addr)->print_on(_st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
473 _st->print_cr("--------------------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
474 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
475 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
476 return sz; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
477 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
478 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
479 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
480 outputStream* st) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
481 st->print_cr("\n========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
482 st->print_cr("Block layout in CMS Heap:"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
483 st->print_cr("========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
484 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
485 blk_iterate(&bpcl); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
486 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
487 st->print_cr("\n======================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
488 st->print_cr("Order & Layout of Promotion Info Blocks"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
489 st->print_cr("======================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
490 print_promo_info_blocks(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
491 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
492 st->print_cr("\n==========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
493 st->print_cr("Order of Indexed Free Lists"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
494 st->print_cr("========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
495 print_indexed_free_lists(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
496 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
497 st->print_cr("\n================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
498 st->print_cr("Order of Free Lists in Dictionary"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
499 st->print_cr("================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
500 print_dictionary_free_lists(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
501 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
502 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
503 |
0 | 504 void CompactibleFreeListSpace::reportFreeListStatistics() const { |
505 assert_lock_strong(&_freelistLock); | |
506 assert(PrintFLSStatistics != 0, "Reporting error"); | |
507 _dictionary->reportStatistics(); | |
508 if (PrintFLSStatistics > 1) { | |
509 reportIndexedFreeListStatistics(); | |
510 size_t totalSize = totalSizeInIndexedFreeLists() + | |
511 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())); | |
512 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag()); | |
513 } | |
514 } | |
515 | |
516 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const { | |
517 assert_lock_strong(&_freelistLock); | |
518 gclog_or_tty->print("Statistics for IndexedFreeLists:\n" | |
519 "--------------------------------\n"); | |
520 size_t totalSize = totalSizeInIndexedFreeLists(); | |
521 size_t freeBlocks = numFreeBlocksInIndexedFreeLists(); | |
522 gclog_or_tty->print("Total Free Space: %d\n", totalSize); | |
523 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists()); | |
524 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks); | |
525 if (freeBlocks != 0) { | |
526 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks); | |
527 } | |
528 } | |
529 | |
530 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const { | |
531 size_t res = 0; | |
532 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
533 debug_only( | |
534 ssize_t recount = 0; | |
535 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; | |
536 fc = fc->next()) { | |
537 recount += 1; | |
538 } | |
539 assert(recount == _indexedFreeList[i].count(), | |
540 "Incorrect count in list"); | |
541 ) | |
542 res += _indexedFreeList[i].count(); | |
543 } | |
544 return res; | |
545 } | |
546 | |
547 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const { | |
548 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { | |
549 if (_indexedFreeList[i].head() != NULL) { | |
550 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList"); | |
551 return (size_t)i; | |
552 } | |
553 } | |
554 return 0; | |
555 } | |
556 | |
557 void CompactibleFreeListSpace::set_end(HeapWord* value) { | |
558 HeapWord* prevEnd = end(); | |
559 assert(prevEnd != value, "unnecessary set_end call"); | |
560 assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block"); | |
561 _end = value; | |
562 if (prevEnd != NULL) { | |
563 // Resize the underlying block offset table. | |
564 _bt.resize(pointer_delta(value, bottom())); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
565 if (value <= prevEnd) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
566 assert(value >= unallocated_block(), "New end is below unallocated block"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
567 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
568 // Now, take this new chunk and add it to the free blocks. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
569 // Note that the BOT has not yet been updated for this block. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
570 size_t newFcSize = pointer_delta(value, prevEnd); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
571 // XXX This is REALLY UGLY and should be fixed up. XXX |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
572 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
573 // Mark the boundary of the new block in BOT |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
574 _bt.mark_block(prevEnd, value); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
575 // put it all in the linAB |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
576 if (ParallelGCThreads == 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
577 _smallLinearAllocBlock._ptr = prevEnd; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
578 _smallLinearAllocBlock._word_size = newFcSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
579 repairLinearAllocBlock(&_smallLinearAllocBlock); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
580 } else { // ParallelGCThreads > 0 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
581 MutexLockerEx x(parDictionaryAllocLock(), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
582 Mutex::_no_safepoint_check_flag); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
583 _smallLinearAllocBlock._ptr = prevEnd; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
584 _smallLinearAllocBlock._word_size = newFcSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
585 repairLinearAllocBlock(&_smallLinearAllocBlock); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
586 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
587 // Births of chunks put into a LinAB are not recorded. Births |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
588 // of chunks as they are allocated out of a LinAB are. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
589 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
590 // Add the block to the free lists, if possible coalescing it |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
591 // with the last free block, and update the BOT and census data. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
592 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize); |
0 | 593 } |
594 } | |
595 } | |
596 } | |
597 | |
598 class FreeListSpace_DCTOC : public Filtering_DCTOC { | |
599 CompactibleFreeListSpace* _cfls; | |
600 CMSCollector* _collector; | |
601 protected: | |
602 // Override. | |
603 #define walk_mem_region_with_cl_DECL(ClosureType) \ | |
604 virtual void walk_mem_region_with_cl(MemRegion mr, \ | |
605 HeapWord* bottom, HeapWord* top, \ | |
606 ClosureType* cl); \ | |
607 void walk_mem_region_with_cl_par(MemRegion mr, \ | |
608 HeapWord* bottom, HeapWord* top, \ | |
609 ClosureType* cl); \ | |
610 void walk_mem_region_with_cl_nopar(MemRegion mr, \ | |
611 HeapWord* bottom, HeapWord* top, \ | |
612 ClosureType* cl) | |
613 walk_mem_region_with_cl_DECL(OopClosure); | |
614 walk_mem_region_with_cl_DECL(FilteringClosure); | |
615 | |
616 public: | |
617 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp, | |
618 CMSCollector* collector, | |
619 OopClosure* cl, | |
620 CardTableModRefBS::PrecisionStyle precision, | |
621 HeapWord* boundary) : | |
622 Filtering_DCTOC(sp, cl, precision, boundary), | |
623 _cfls(sp), _collector(collector) {} | |
624 }; | |
625 | |
626 // We de-virtualize the block-related calls below, since we know that our | |
627 // space is a CompactibleFreeListSpace. | |
628 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ | |
629 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \ | |
630 HeapWord* bottom, \ | |
631 HeapWord* top, \ | |
632 ClosureType* cl) { \ | |
633 if (SharedHeap::heap()->n_par_threads() > 0) { \ | |
634 walk_mem_region_with_cl_par(mr, bottom, top, cl); \ | |
635 } else { \ | |
636 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \ | |
637 } \ | |
638 } \ | |
639 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \ | |
640 HeapWord* bottom, \ | |
641 HeapWord* top, \ | |
642 ClosureType* cl) { \ | |
643 /* Skip parts that are before "mr", in case "block_start" sent us \ | |
644 back too far. */ \ | |
645 HeapWord* mr_start = mr.start(); \ | |
646 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ | |
647 HeapWord* next = bottom + bot_size; \ | |
648 while (next < mr_start) { \ | |
649 bottom = next; \ | |
650 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ | |
651 next = bottom + bot_size; \ | |
652 } \ | |
653 \ | |
654 while (bottom < top) { \ | |
655 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \ | |
656 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ | |
657 oop(bottom)) && \ | |
658 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ | |
659 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ | |
660 bottom += _cfls->adjustObjectSize(word_sz); \ | |
661 } else { \ | |
662 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \ | |
663 } \ | |
664 } \ | |
665 } \ | |
666 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \ | |
667 HeapWord* bottom, \ | |
668 HeapWord* top, \ | |
669 ClosureType* cl) { \ | |
670 /* Skip parts that are before "mr", in case "block_start" sent us \ | |
671 back too far. */ \ | |
672 HeapWord* mr_start = mr.start(); \ | |
673 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ | |
674 HeapWord* next = bottom + bot_size; \ | |
675 while (next < mr_start) { \ | |
676 bottom = next; \ | |
677 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ | |
678 next = bottom + bot_size; \ | |
679 } \ | |
680 \ | |
681 while (bottom < top) { \ | |
682 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \ | |
683 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ | |
684 oop(bottom)) && \ | |
685 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ | |
686 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ | |
687 bottom += _cfls->adjustObjectSize(word_sz); \ | |
688 } else { \ | |
689 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ | |
690 } \ | |
691 } \ | |
692 } | |
693 | |
694 // (There are only two of these, rather than N, because the split is due | |
695 // only to the introduction of the FilteringClosure, a local part of the | |
696 // impl of this abstraction.) | |
697 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure) | |
698 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) | |
699 | |
700 DirtyCardToOopClosure* | |
701 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl, | |
702 CardTableModRefBS::PrecisionStyle precision, | |
703 HeapWord* boundary) { | |
704 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary); | |
705 } | |
706 | |
707 | |
708 // Note on locking for the space iteration functions: | |
709 // since the collector's iteration activities are concurrent with | |
710 // allocation activities by mutators, absent a suitable mutual exclusion | |
711 // mechanism the iterators may go awry. For instace a block being iterated | |
712 // may suddenly be allocated or divided up and part of it allocated and | |
713 // so on. | |
714 | |
715 // Apply the given closure to each block in the space. | |
716 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) { | |
717 assert_lock_strong(freelistLock()); | |
718 HeapWord *cur, *limit; | |
719 for (cur = bottom(), limit = end(); cur < limit; | |
720 cur += cl->do_blk_careful(cur)); | |
721 } | |
722 | |
723 // Apply the given closure to each block in the space. | |
724 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) { | |
725 assert_lock_strong(freelistLock()); | |
726 HeapWord *cur, *limit; | |
727 for (cur = bottom(), limit = end(); cur < limit; | |
728 cur += cl->do_blk(cur)); | |
729 } | |
730 | |
731 // Apply the given closure to each oop in the space. | |
732 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) { | |
733 assert_lock_strong(freelistLock()); | |
734 HeapWord *cur, *limit; | |
735 size_t curSize; | |
736 for (cur = bottom(), limit = end(); cur < limit; | |
737 cur += curSize) { | |
738 curSize = block_size(cur); | |
739 if (block_is_obj(cur)) { | |
740 oop(cur)->oop_iterate(cl); | |
741 } | |
742 } | |
743 } | |
744 | |
745 // Apply the given closure to each oop in the space \intersect memory region. | |
746 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) { | |
747 assert_lock_strong(freelistLock()); | |
748 if (is_empty()) { | |
749 return; | |
750 } | |
751 MemRegion cur = MemRegion(bottom(), end()); | |
752 mr = mr.intersection(cur); | |
753 if (mr.is_empty()) { | |
754 return; | |
755 } | |
756 if (mr.equals(cur)) { | |
757 oop_iterate(cl); | |
758 return; | |
759 } | |
760 assert(mr.end() <= end(), "just took an intersection above"); | |
761 HeapWord* obj_addr = block_start(mr.start()); | |
762 HeapWord* t = mr.end(); | |
763 | |
764 SpaceMemRegionOopsIterClosure smr_blk(cl, mr); | |
765 if (block_is_obj(obj_addr)) { | |
766 // Handle first object specially. | |
767 oop obj = oop(obj_addr); | |
768 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk)); | |
769 } else { | |
770 FreeChunk* fc = (FreeChunk*)obj_addr; | |
771 obj_addr += fc->size(); | |
772 } | |
773 while (obj_addr < t) { | |
774 HeapWord* obj = obj_addr; | |
775 obj_addr += block_size(obj_addr); | |
776 // If "obj_addr" is not greater than top, then the | |
777 // entire object "obj" is within the region. | |
778 if (obj_addr <= t) { | |
779 if (block_is_obj(obj)) { | |
780 oop(obj)->oop_iterate(cl); | |
781 } | |
782 } else { | |
783 // "obj" extends beyond end of region | |
784 if (block_is_obj(obj)) { | |
785 oop(obj)->oop_iterate(&smr_blk); | |
786 } | |
787 break; | |
788 } | |
789 } | |
790 } | |
791 | |
792 // NOTE: In the following methods, in order to safely be able to | |
793 // apply the closure to an object, we need to be sure that the | |
794 // object has been initialized. We are guaranteed that an object | |
795 // is initialized if we are holding the Heap_lock with the | |
796 // world stopped. | |
797 void CompactibleFreeListSpace::verify_objects_initialized() const { | |
798 if (is_init_completed()) { | |
799 assert_locked_or_safepoint(Heap_lock); | |
800 if (Universe::is_fully_initialized()) { | |
801 guarantee(SafepointSynchronize::is_at_safepoint(), | |
802 "Required for objects to be initialized"); | |
803 } | |
804 } // else make a concession at vm start-up | |
805 } | |
806 | |
807 // Apply the given closure to each object in the space | |
808 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) { | |
809 assert_lock_strong(freelistLock()); | |
810 NOT_PRODUCT(verify_objects_initialized()); | |
811 HeapWord *cur, *limit; | |
812 size_t curSize; | |
813 for (cur = bottom(), limit = end(); cur < limit; | |
814 cur += curSize) { | |
815 curSize = block_size(cur); | |
816 if (block_is_obj(cur)) { | |
817 blk->do_object(oop(cur)); | |
818 } | |
819 } | |
820 } | |
821 | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
822 // Apply the given closure to each live object in the space |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
823 // The usage of CompactibleFreeListSpace |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
824 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
825 // objects in the space with references to objects that are no longer |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
826 // valid. For example, an object may reference another object |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
827 // that has already been sweep up (collected). This method uses |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
828 // obj_is_alive() to determine whether it is safe to apply the closure to |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
829 // an object. See obj_is_alive() for details on how liveness of an |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
830 // object is decided. |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
831 |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
832 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
833 assert_lock_strong(freelistLock()); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
834 NOT_PRODUCT(verify_objects_initialized()); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
835 HeapWord *cur, *limit; |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
836 size_t curSize; |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
837 for (cur = bottom(), limit = end(); cur < limit; |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
838 cur += curSize) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
839 curSize = block_size(cur); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
840 if (block_is_obj(cur) && obj_is_alive(cur)) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
841 blk->do_object(oop(cur)); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
842 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
843 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
844 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
845 |
0 | 846 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, |
847 UpwardsObjectClosure* cl) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
848 assert_locked(freelistLock()); |
0 | 849 NOT_PRODUCT(verify_objects_initialized()); |
850 Space::object_iterate_mem(mr, cl); | |
851 } | |
852 | |
853 // Callers of this iterator beware: The closure application should | |
854 // be robust in the face of uninitialized objects and should (always) | |
855 // return a correct size so that the next addr + size below gives us a | |
856 // valid block boundary. [See for instance, | |
857 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() | |
858 // in ConcurrentMarkSweepGeneration.cpp.] | |
859 HeapWord* | |
860 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) { | |
861 assert_lock_strong(freelistLock()); | |
862 HeapWord *addr, *last; | |
863 size_t size; | |
864 for (addr = bottom(), last = end(); | |
865 addr < last; addr += size) { | |
866 FreeChunk* fc = (FreeChunk*)addr; | |
867 if (fc->isFree()) { | |
868 // Since we hold the free list lock, which protects direct | |
869 // allocation in this generation by mutators, a free object | |
870 // will remain free throughout this iteration code. | |
871 size = fc->size(); | |
872 } else { | |
873 // Note that the object need not necessarily be initialized, | |
874 // because (for instance) the free list lock does NOT protect | |
875 // object initialization. The closure application below must | |
876 // therefore be correct in the face of uninitialized objects. | |
877 size = cl->do_object_careful(oop(addr)); | |
878 if (size == 0) { | |
879 // An unparsable object found. Signal early termination. | |
880 return addr; | |
881 } | |
882 } | |
883 } | |
884 return NULL; | |
885 } | |
886 | |
887 // Callers of this iterator beware: The closure application should | |
888 // be robust in the face of uninitialized objects and should (always) | |
889 // return a correct size so that the next addr + size below gives us a | |
890 // valid block boundary. [See for instance, | |
891 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() | |
892 // in ConcurrentMarkSweepGeneration.cpp.] | |
893 HeapWord* | |
894 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr, | |
895 ObjectClosureCareful* cl) { | |
896 assert_lock_strong(freelistLock()); | |
897 // Can't use used_region() below because it may not necessarily | |
898 // be the same as [bottom(),end()); although we could | |
899 // use [used_region().start(),round_to(used_region().end(),CardSize)), | |
900 // that appears too cumbersome, so we just do the simpler check | |
901 // in the assertion below. | |
902 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr), | |
903 "mr should be non-empty and within used space"); | |
904 HeapWord *addr, *end; | |
905 size_t size; | |
906 for (addr = block_start_careful(mr.start()), end = mr.end(); | |
907 addr < end; addr += size) { | |
908 FreeChunk* fc = (FreeChunk*)addr; | |
909 if (fc->isFree()) { | |
910 // Since we hold the free list lock, which protects direct | |
911 // allocation in this generation by mutators, a free object | |
912 // will remain free throughout this iteration code. | |
913 size = fc->size(); | |
914 } else { | |
915 // Note that the object need not necessarily be initialized, | |
916 // because (for instance) the free list lock does NOT protect | |
917 // object initialization. The closure application below must | |
918 // therefore be correct in the face of uninitialized objects. | |
919 size = cl->do_object_careful_m(oop(addr), mr); | |
920 if (size == 0) { | |
921 // An unparsable object found. Signal early termination. | |
922 return addr; | |
923 } | |
924 } | |
925 } | |
926 return NULL; | |
927 } | |
928 | |
929 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
930 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const { |
0 | 931 NOT_PRODUCT(verify_objects_initialized()); |
932 return _bt.block_start(p); | |
933 } | |
934 | |
935 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const { | |
936 return _bt.block_start_careful(p); | |
937 } | |
938 | |
939 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const { | |
940 NOT_PRODUCT(verify_objects_initialized()); | |
941 assert(MemRegion(bottom(), end()).contains(p), "p not in space"); | |
942 // This must be volatile, or else there is a danger that the compiler | |
943 // will compile the code below into a sometimes-infinite loop, by keeping | |
944 // the value read the first time in a register. | |
945 while (true) { | |
946 // We must do this until we get a consistent view of the object. | |
187 | 947 if (FreeChunk::indicatesFreeChunk(p)) { |
948 volatile FreeChunk* fc = (volatile FreeChunk*)p; | |
949 size_t res = fc->size(); | |
950 // If the object is still a free chunk, return the size, else it | |
951 // has been allocated so try again. | |
952 if (FreeChunk::indicatesFreeChunk(p)) { | |
0 | 953 assert(res != 0, "Block size should not be 0"); |
954 return res; | |
955 } | |
187 | 956 } else { |
957 // must read from what 'p' points to in each loop. | |
958 klassOop k = ((volatile oopDesc*)p)->klass_or_null(); | |
959 if (k != NULL) { | |
960 assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop."); | |
961 oop o = (oop)p; | |
962 assert(o->is_parsable(), "Should be parsable"); | |
963 assert(o->is_oop(true /* ignore mark word */), "Should be an oop."); | |
964 size_t res = o->size_given_klass(k->klass_part()); | |
965 res = adjustObjectSize(res); | |
966 assert(res != 0, "Block size should not be 0"); | |
967 return res; | |
968 } | |
0 | 969 } |
970 } | |
971 } | |
972 | |
973 // A variant of the above that uses the Printezis bits for | |
974 // unparsable but allocated objects. This avoids any possible | |
975 // stalls waiting for mutators to initialize objects, and is | |
976 // thus potentially faster than the variant above. However, | |
977 // this variant may return a zero size for a block that is | |
978 // under mutation and for which a consistent size cannot be | |
979 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits(). | |
980 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p, | |
981 const CMSCollector* c) | |
982 const { | |
983 assert(MemRegion(bottom(), end()).contains(p), "p not in space"); | |
984 // This must be volatile, or else there is a danger that the compiler | |
985 // will compile the code below into a sometimes-infinite loop, by keeping | |
986 // the value read the first time in a register. | |
987 DEBUG_ONLY(uint loops = 0;) | |
988 while (true) { | |
989 // We must do this until we get a consistent view of the object. | |
187 | 990 if (FreeChunk::indicatesFreeChunk(p)) { |
991 volatile FreeChunk* fc = (volatile FreeChunk*)p; | |
992 size_t res = fc->size(); | |
993 if (FreeChunk::indicatesFreeChunk(p)) { | |
0 | 994 assert(res != 0, "Block size should not be 0"); |
995 assert(loops == 0, "Should be 0"); | |
996 return res; | |
997 } | |
998 } else { | |
187 | 999 // must read from what 'p' points to in each loop. |
1000 klassOop k = ((volatile oopDesc*)p)->klass_or_null(); | |
518
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
1001 if (k != NULL && |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
1002 ((oopDesc*)p)->is_parsable() && |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
1003 ((oopDesc*)p)->is_conc_safe()) { |
187 | 1004 assert(k->is_oop(), "Should really be klass oop."); |
1005 oop o = (oop)p; | |
1006 assert(o->is_oop(), "Should be an oop"); | |
1007 size_t res = o->size_given_klass(k->klass_part()); | |
1008 res = adjustObjectSize(res); | |
1009 assert(res != 0, "Block size should not be 0"); | |
1010 return res; | |
1011 } else { | |
1012 return c->block_size_if_printezis_bits(p); | |
1013 } | |
0 | 1014 } |
1015 assert(loops == 0, "Can loop at most once"); | |
1016 DEBUG_ONLY(loops++;) | |
1017 } | |
1018 } | |
1019 | |
1020 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const { | |
1021 NOT_PRODUCT(verify_objects_initialized()); | |
1022 assert(MemRegion(bottom(), end()).contains(p), "p not in space"); | |
1023 FreeChunk* fc = (FreeChunk*)p; | |
1024 if (fc->isFree()) { | |
1025 return fc->size(); | |
1026 } else { | |
1027 // Ignore mark word because this may be a recently promoted | |
1028 // object whose mark word is used to chain together grey | |
1029 // objects (the last one would have a null value). | |
1030 assert(oop(p)->is_oop(true), "Should be an oop"); | |
1031 return adjustObjectSize(oop(p)->size()); | |
1032 } | |
1033 } | |
1034 | |
1035 // This implementation assumes that the property of "being an object" is | |
1036 // stable. But being a free chunk may not be (because of parallel | |
1037 // promotion.) | |
1038 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const { | |
1039 FreeChunk* fc = (FreeChunk*)p; | |
1040 assert(is_in_reserved(p), "Should be in space"); | |
1041 // When doing a mark-sweep-compact of the CMS generation, this | |
1042 // assertion may fail because prepare_for_compaction() uses | |
1043 // space that is garbage to maintain information on ranges of | |
1044 // live objects so that these live ranges can be moved as a whole. | |
1045 // Comment out this assertion until that problem can be solved | |
1046 // (i.e., that the block start calculation may look at objects | |
1047 // at address below "p" in finding the object that contains "p" | |
1048 // and those objects (if garbage) may have been modified to hold | |
1049 // live range information. | |
1050 // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary"); | |
187 | 1051 if (FreeChunk::indicatesFreeChunk(p)) return false; |
1052 klassOop k = oop(p)->klass_or_null(); | |
0 | 1053 if (k != NULL) { |
1054 // Ignore mark word because it may have been used to | |
1055 // chain together promoted objects (the last one | |
1056 // would have a null value). | |
1057 assert(oop(p)->is_oop(true), "Should be an oop"); | |
1058 return true; | |
1059 } else { | |
1060 return false; // Was not an object at the start of collection. | |
1061 } | |
1062 } | |
1063 | |
1064 // Check if the object is alive. This fact is checked either by consulting | |
1065 // the main marking bitmap in the sweeping phase or, if it's a permanent | |
1066 // generation and we're not in the sweeping phase, by checking the | |
1067 // perm_gen_verify_bit_map where we store the "deadness" information if | |
1068 // we did not sweep the perm gen in the most recent previous GC cycle. | |
1069 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const { | |
1070 assert (block_is_obj(p), "The address should point to an object"); | |
1071 | |
1072 // If we're sweeping, we use object liveness information from the main bit map | |
1073 // for both perm gen and old gen. | |
1074 // We don't need to lock the bitmap (live_map or dead_map below), because | |
1075 // EITHER we are in the middle of the sweeping phase, and the | |
1076 // main marking bit map (live_map below) is locked, | |
1077 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below) | |
1078 // is stable, because it's mutated only in the sweeping phase. | |
1079 if (_collector->abstract_state() == CMSCollector::Sweeping) { | |
1080 CMSBitMap* live_map = _collector->markBitMap(); | |
1081 return live_map->isMarked((HeapWord*) p); | |
1082 } else { | |
1083 // If we're not currently sweeping and we haven't swept the perm gen in | |
1084 // the previous concurrent cycle then we may have dead but unswept objects | |
1085 // in the perm gen. In this case, we use the "deadness" information | |
1086 // that we had saved in perm_gen_verify_bit_map at the last sweep. | |
1087 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) { | |
1088 if (_collector->verifying()) { | |
1089 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map(); | |
1090 // Object is marked in the dead_map bitmap at the previous sweep | |
1091 // when we know that it's dead; if the bitmap is not allocated then | |
1092 // the object is alive. | |
1093 return (dead_map->sizeInBits() == 0) // bit_map has been allocated | |
1094 || !dead_map->par_isMarked((HeapWord*) p); | |
1095 } else { | |
1096 return false; // We can't say for sure if it's live, so we say that it's dead. | |
1097 } | |
1098 } | |
1099 } | |
1100 return true; | |
1101 } | |
1102 | |
1103 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const { | |
1104 FreeChunk* fc = (FreeChunk*)p; | |
1105 assert(is_in_reserved(p), "Should be in space"); | |
1106 assert(_bt.block_start(p) == p, "Should be a block boundary"); | |
1107 if (!fc->isFree()) { | |
1108 // Ignore mark word because it may have been used to | |
1109 // chain together promoted objects (the last one | |
1110 // would have a null value). | |
1111 assert(oop(p)->is_oop(true), "Should be an oop"); | |
1112 return true; | |
1113 } | |
1114 return false; | |
1115 } | |
1116 | |
1117 // "MT-safe but not guaranteed MT-precise" (TM); you may get an | |
1118 // approximate answer if you don't hold the freelistlock when you call this. | |
1119 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const { | |
1120 size_t size = 0; | |
1121 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
1122 debug_only( | |
1123 // We may be calling here without the lock in which case we | |
1124 // won't do this modest sanity check. | |
1125 if (freelistLock()->owned_by_self()) { | |
1126 size_t total_list_size = 0; | |
1127 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; | |
1128 fc = fc->next()) { | |
1129 total_list_size += i; | |
1130 } | |
1131 assert(total_list_size == i * _indexedFreeList[i].count(), | |
1132 "Count in list is incorrect"); | |
1133 } | |
1134 ) | |
1135 size += i * _indexedFreeList[i].count(); | |
1136 } | |
1137 return size; | |
1138 } | |
1139 | |
1140 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) { | |
1141 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
1142 return allocate(size); | |
1143 } | |
1144 | |
1145 HeapWord* | |
1146 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) { | |
1147 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size); | |
1148 } | |
1149 | |
1150 HeapWord* CompactibleFreeListSpace::allocate(size_t size) { | |
1151 assert_lock_strong(freelistLock()); | |
1152 HeapWord* res = NULL; | |
1153 assert(size == adjustObjectSize(size), | |
1154 "use adjustObjectSize() before calling into allocate()"); | |
1155 | |
1156 if (_adaptive_freelists) { | |
1157 res = allocate_adaptive_freelists(size); | |
1158 } else { // non-adaptive free lists | |
1159 res = allocate_non_adaptive_freelists(size); | |
1160 } | |
1161 | |
1162 if (res != NULL) { | |
1163 // check that res does lie in this space! | |
1164 assert(is_in_reserved(res), "Not in this space!"); | |
1165 assert(is_aligned((void*)res), "alignment check"); | |
1166 | |
1167 FreeChunk* fc = (FreeChunk*)res; | |
1168 fc->markNotFree(); | |
1169 assert(!fc->isFree(), "shouldn't be marked free"); | |
187 | 1170 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized"); |
0 | 1171 // Verify that the block offset table shows this to |
1172 // be a single block, but not one which is unallocated. | |
1173 _bt.verify_single_block(res, size); | |
1174 _bt.verify_not_unallocated(res, size); | |
1175 // mangle a just allocated object with a distinct pattern. | |
1176 debug_only(fc->mangleAllocated(size)); | |
1177 } | |
1178 | |
1179 return res; | |
1180 } | |
1181 | |
1182 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) { | |
1183 HeapWord* res = NULL; | |
1184 // try and use linear allocation for smaller blocks | |
1185 if (size < _smallLinearAllocBlock._allocation_size_limit) { | |
1186 // if successful, the following also adjusts block offset table | |
1187 res = getChunkFromSmallLinearAllocBlock(size); | |
1188 } | |
1189 // Else triage to indexed lists for smaller sizes | |
1190 if (res == NULL) { | |
1191 if (size < SmallForDictionary) { | |
1192 res = (HeapWord*) getChunkFromIndexedFreeList(size); | |
1193 } else { | |
1194 // else get it from the big dictionary; if even this doesn't | |
1195 // work we are out of luck. | |
1196 res = (HeapWord*)getChunkFromDictionaryExact(size); | |
1197 } | |
1198 } | |
1199 | |
1200 return res; | |
1201 } | |
1202 | |
1203 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) { | |
1204 assert_lock_strong(freelistLock()); | |
1205 HeapWord* res = NULL; | |
1206 assert(size == adjustObjectSize(size), | |
1207 "use adjustObjectSize() before calling into allocate()"); | |
1208 | |
1209 // Strategy | |
1210 // if small | |
1211 // exact size from small object indexed list if small | |
1212 // small or large linear allocation block (linAB) as appropriate | |
1213 // take from lists of greater sized chunks | |
1214 // else | |
1215 // dictionary | |
1216 // small or large linear allocation block if it has the space | |
1217 // Try allocating exact size from indexTable first | |
1218 if (size < IndexSetSize) { | |
1219 res = (HeapWord*) getChunkFromIndexedFreeList(size); | |
1220 if(res != NULL) { | |
1221 assert(res != (HeapWord*)_indexedFreeList[size].head(), | |
1222 "Not removed from free list"); | |
1223 // no block offset table adjustment is necessary on blocks in | |
1224 // the indexed lists. | |
1225 | |
1226 // Try allocating from the small LinAB | |
1227 } else if (size < _smallLinearAllocBlock._allocation_size_limit && | |
1228 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) { | |
1229 // if successful, the above also adjusts block offset table | |
1230 // Note that this call will refill the LinAB to | |
1231 // satisfy the request. This is different that | |
1232 // evm. | |
1233 // Don't record chunk off a LinAB? smallSplitBirth(size); | |
1234 | |
1235 } else { | |
1236 // Raid the exact free lists larger than size, even if they are not | |
1237 // overpopulated. | |
1238 res = (HeapWord*) getChunkFromGreater(size); | |
1239 } | |
1240 } else { | |
1241 // Big objects get allocated directly from the dictionary. | |
1242 res = (HeapWord*) getChunkFromDictionaryExact(size); | |
1243 if (res == NULL) { | |
1244 // Try hard not to fail since an allocation failure will likely | |
1245 // trigger a synchronous GC. Try to get the space from the | |
1246 // allocation blocks. | |
1247 res = getChunkFromSmallLinearAllocBlockRemainder(size); | |
1248 } | |
1249 } | |
1250 | |
1251 return res; | |
1252 } | |
1253 | |
1254 // A worst-case estimate of the space required (in HeapWords) to expand the heap | |
1255 // when promoting obj. | |
1256 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const { | |
1257 // Depending on the object size, expansion may require refilling either a | |
1258 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize | |
1259 // is added because the dictionary may over-allocate to avoid fragmentation. | |
1260 size_t space = obj_size; | |
1261 if (!_adaptive_freelists) { | |
1262 space = MAX2(space, _smallLinearAllocBlock._refillSize); | |
1263 } | |
1264 space += _promoInfo.refillSize() + 2 * MinChunkSize; | |
1265 return space; | |
1266 } | |
1267 | |
1268 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) { | |
1269 FreeChunk* ret; | |
1270 | |
1271 assert(numWords >= MinChunkSize, "Size is less than minimum"); | |
1272 assert(linearAllocationWouldFail() || bestFitFirst(), | |
1273 "Should not be here"); | |
1274 | |
1275 size_t i; | |
1276 size_t currSize = numWords + MinChunkSize; | |
1277 assert(currSize % MinObjAlignment == 0, "currSize should be aligned"); | |
1278 for (i = currSize; i < IndexSetSize; i += IndexSetStride) { | |
1279 FreeList* fl = &_indexedFreeList[i]; | |
1280 if (fl->head()) { | |
1281 ret = getFromListGreater(fl, numWords); | |
1282 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk"); | |
1283 return ret; | |
1284 } | |
1285 } | |
1286 | |
1287 currSize = MAX2((size_t)SmallForDictionary, | |
1288 (size_t)(numWords + MinChunkSize)); | |
1289 | |
1290 /* Try to get a chunk that satisfies request, while avoiding | |
1291 fragmentation that can't be handled. */ | |
1292 { | |
1293 ret = dictionary()->getChunk(currSize); | |
1294 if (ret != NULL) { | |
1295 assert(ret->size() - numWords >= MinChunkSize, | |
1296 "Chunk is too small"); | |
1297 _bt.allocated((HeapWord*)ret, ret->size()); | |
1298 /* Carve returned chunk. */ | |
1299 (void) splitChunkAndReturnRemainder(ret, numWords); | |
1300 /* Label this as no longer a free chunk. */ | |
1301 assert(ret->isFree(), "This chunk should be free"); | |
1302 ret->linkPrev(NULL); | |
1303 } | |
1304 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk"); | |
1305 return ret; | |
1306 } | |
1307 ShouldNotReachHere(); | |
1308 } | |
1309 | |
1310 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) | |
1311 const { | |
1312 assert(fc->size() < IndexSetSize, "Size of chunk is too large"); | |
1313 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc); | |
1314 } | |
1315 | |
1316 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const { | |
1317 if (fc->size() >= IndexSetSize) { | |
1318 return dictionary()->verifyChunkInFreeLists(fc); | |
1319 } else { | |
1320 return verifyChunkInIndexedFreeLists(fc); | |
1321 } | |
1322 } | |
1323 | |
1324 #ifndef PRODUCT | |
1325 void CompactibleFreeListSpace::assert_locked() const { | |
1326 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock()); | |
1327 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1328 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1329 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1330 CMSLockVerifier::assert_locked(lock); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1331 } |
0 | 1332 #endif |
1333 | |
1334 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) { | |
1335 // In the parallel case, the main thread holds the free list lock | |
1336 // on behalf the parallel threads. | |
1337 FreeChunk* fc; | |
1338 { | |
1339 // If GC is parallel, this might be called by several threads. | |
1340 // This should be rare enough that the locking overhead won't affect | |
1341 // the sequential code. | |
1342 MutexLockerEx x(parDictionaryAllocLock(), | |
1343 Mutex::_no_safepoint_check_flag); | |
1344 fc = getChunkFromDictionary(size); | |
1345 } | |
1346 if (fc != NULL) { | |
1347 fc->dontCoalesce(); | |
1348 assert(fc->isFree(), "Should be free, but not coalescable"); | |
1349 // Verify that the block offset table shows this to | |
1350 // be a single block, but not one which is unallocated. | |
1351 _bt.verify_single_block((HeapWord*)fc, fc->size()); | |
1352 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); | |
1353 } | |
1354 return fc; | |
1355 } | |
1356 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
1357 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) { |
0 | 1358 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
1359 assert_locked(); | |
1360 | |
1361 // if we are tracking promotions, then first ensure space for | |
1362 // promotion (including spooling space for saving header if necessary). | |
1363 // then allocate and copy, then track promoted info if needed. | |
1364 // When tracking (see PromotionInfo::track()), the mark word may | |
1365 // be displaced and in this case restoration of the mark word | |
1366 // occurs in the (oop_since_save_marks_)iterate phase. | |
1367 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) { | |
1368 return NULL; | |
1369 } | |
1370 // Call the allocate(size_t, bool) form directly to avoid the | |
1371 // additional call through the allocate(size_t) form. Having | |
1372 // the compile inline the call is problematic because allocate(size_t) | |
1373 // is a virtual method. | |
1374 HeapWord* res = allocate(adjustObjectSize(obj_size)); | |
1375 if (res != NULL) { | |
1376 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size); | |
1377 // if we should be tracking promotions, do so. | |
1378 if (_promoInfo.tracking()) { | |
1379 _promoInfo.track((PromotedObject*)res); | |
1380 } | |
1381 } | |
1382 return oop(res); | |
1383 } | |
1384 | |
1385 HeapWord* | |
1386 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) { | |
1387 assert_locked(); | |
1388 assert(size >= MinChunkSize, "minimum chunk size"); | |
1389 assert(size < _smallLinearAllocBlock._allocation_size_limit, | |
1390 "maximum from smallLinearAllocBlock"); | |
1391 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size); | |
1392 } | |
1393 | |
1394 HeapWord* | |
1395 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk, | |
1396 size_t size) { | |
1397 assert_locked(); | |
1398 assert(size >= MinChunkSize, "too small"); | |
1399 HeapWord* res = NULL; | |
1400 // Try to do linear allocation from blk, making sure that | |
1401 if (blk->_word_size == 0) { | |
1402 // We have probably been unable to fill this either in the prologue or | |
1403 // when it was exhausted at the last linear allocation. Bail out until | |
1404 // next time. | |
1405 assert(blk->_ptr == NULL, "consistency check"); | |
1406 return NULL; | |
1407 } | |
1408 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check"); | |
1409 res = getChunkFromLinearAllocBlockRemainder(blk, size); | |
1410 if (res != NULL) return res; | |
1411 | |
1412 // about to exhaust this linear allocation block | |
1413 if (blk->_word_size == size) { // exactly satisfied | |
1414 res = blk->_ptr; | |
1415 _bt.allocated(res, blk->_word_size); | |
1416 } else if (size + MinChunkSize <= blk->_refillSize) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1417 size_t sz = blk->_word_size; |
0 | 1418 // Update _unallocated_block if the size is such that chunk would be |
1419 // returned to the indexed free list. All other chunks in the indexed | |
1420 // free lists are allocated from the dictionary so that _unallocated_block | |
1421 // has already been adjusted for them. Do it here so that the cost | |
1422 // for all chunks added back to the indexed free lists. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1423 if (sz < SmallForDictionary) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1424 _bt.allocated(blk->_ptr, sz); |
0 | 1425 } |
1426 // Return the chunk that isn't big enough, and then refill below. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1427 addChunkToFreeLists(blk->_ptr, sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1428 splitBirth(sz); |
0 | 1429 // Don't keep statistics on adding back chunk from a LinAB. |
1430 } else { | |
1431 // A refilled block would not satisfy the request. | |
1432 return NULL; | |
1433 } | |
1434 | |
1435 blk->_ptr = NULL; blk->_word_size = 0; | |
1436 refillLinearAllocBlock(blk); | |
1437 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize, | |
1438 "block was replenished"); | |
1439 if (res != NULL) { | |
1440 splitBirth(size); | |
1441 repairLinearAllocBlock(blk); | |
1442 } else if (blk->_ptr != NULL) { | |
1443 res = blk->_ptr; | |
1444 size_t blk_size = blk->_word_size; | |
1445 blk->_word_size -= size; | |
1446 blk->_ptr += size; | |
1447 splitBirth(size); | |
1448 repairLinearAllocBlock(blk); | |
1449 // Update BOT last so that other (parallel) GC threads see a consistent | |
1450 // view of the BOT and free blocks. | |
1451 // Above must occur before BOT is updated below. | |
1452 _bt.split_block(res, blk_size, size); // adjust block offset table | |
1453 } | |
1454 return res; | |
1455 } | |
1456 | |
1457 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder( | |
1458 LinearAllocBlock* blk, | |
1459 size_t size) { | |
1460 assert_locked(); | |
1461 assert(size >= MinChunkSize, "too small"); | |
1462 | |
1463 HeapWord* res = NULL; | |
1464 // This is the common case. Keep it simple. | |
1465 if (blk->_word_size >= size + MinChunkSize) { | |
1466 assert(blk->_ptr != NULL, "consistency check"); | |
1467 res = blk->_ptr; | |
1468 // Note that the BOT is up-to-date for the linAB before allocation. It | |
1469 // indicates the start of the linAB. The split_block() updates the | |
1470 // BOT for the linAB after the allocation (indicates the start of the | |
1471 // next chunk to be allocated). | |
1472 size_t blk_size = blk->_word_size; | |
1473 blk->_word_size -= size; | |
1474 blk->_ptr += size; | |
1475 splitBirth(size); | |
1476 repairLinearAllocBlock(blk); | |
1477 // Update BOT last so that other (parallel) GC threads see a consistent | |
1478 // view of the BOT and free blocks. | |
1479 // Above must occur before BOT is updated below. | |
1480 _bt.split_block(res, blk_size, size); // adjust block offset table | |
1481 _bt.allocated(res, size); | |
1482 } | |
1483 return res; | |
1484 } | |
1485 | |
1486 FreeChunk* | |
1487 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) { | |
1488 assert_locked(); | |
1489 assert(size < SmallForDictionary, "just checking"); | |
1490 FreeChunk* res; | |
1491 res = _indexedFreeList[size].getChunkAtHead(); | |
1492 if (res == NULL) { | |
1493 res = getChunkFromIndexedFreeListHelper(size); | |
1494 } | |
1495 _bt.verify_not_unallocated((HeapWord*) res, size); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1496 assert(res == NULL || res->size() == size, "Incorrect block size"); |
0 | 1497 return res; |
1498 } | |
1499 | |
1500 FreeChunk* | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1501 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1502 bool replenish) { |
0 | 1503 assert_locked(); |
1504 FreeChunk* fc = NULL; | |
1505 if (size < SmallForDictionary) { | |
1506 assert(_indexedFreeList[size].head() == NULL || | |
1507 _indexedFreeList[size].surplus() <= 0, | |
1508 "List for this size should be empty or under populated"); | |
1509 // Try best fit in exact lists before replenishing the list | |
1510 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) { | |
1511 // Replenish list. | |
1512 // | |
1513 // Things tried that failed. | |
1514 // Tried allocating out of the two LinAB's first before | |
1515 // replenishing lists. | |
1516 // Tried small linAB of size 256 (size in indexed list) | |
1517 // and replenishing indexed lists from the small linAB. | |
1518 // | |
1519 FreeChunk* newFc = NULL; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1520 const size_t replenish_size = CMSIndexedFreeListReplenish * size; |
0 | 1521 if (replenish_size < SmallForDictionary) { |
1522 // Do not replenish from an underpopulated size. | |
1523 if (_indexedFreeList[replenish_size].surplus() > 0 && | |
1524 _indexedFreeList[replenish_size].head() != NULL) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1525 newFc = _indexedFreeList[replenish_size].getChunkAtHead(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1526 } else if (bestFitFirst()) { |
0 | 1527 newFc = bestFitSmall(replenish_size); |
1528 } | |
1529 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1530 if (newFc == NULL && replenish_size > size) { |
0 | 1531 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1532 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false); |
0 | 1533 } |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1534 // Note: The stats update re split-death of block obtained above |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1535 // will be recorded below precisely when we know we are going to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1536 // be actually splitting it into more than one pieces below. |
0 | 1537 if (newFc != NULL) { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1538 if (replenish || CMSReplenishIntermediate) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1539 // Replenish this list and return one block to caller. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1540 size_t i; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1541 FreeChunk *curFc, *nextFc; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1542 size_t num_blk = newFc->size() / size; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1543 assert(num_blk >= 1, "Smaller than requested?"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1544 assert(newFc->size() % size == 0, "Should be integral multiple of request"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1545 if (num_blk > 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1546 // we are sure we will be splitting the block just obtained |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1547 // into multiple pieces; record the split-death of the original |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1548 splitDeath(replenish_size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1549 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1550 // carve up and link blocks 0, ..., num_blk - 2 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1551 // The last chunk is not added to the lists but is returned as the |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1552 // free chunk. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1553 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1554 i = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1555 i < (num_blk - 1); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1556 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1557 i++) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1558 curFc->setSize(size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1559 // Don't record this as a return in order to try and |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1560 // determine the "returns" from a GC. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1561 _bt.verify_not_unallocated((HeapWord*) fc, size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1562 _indexedFreeList[size].returnChunkAtTail(curFc, false); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1563 _bt.mark_block((HeapWord*)curFc, size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1564 splitBirth(size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1565 // Don't record the initial population of the indexed list |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1566 // as a split birth. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1567 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1568 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1569 // check that the arithmetic was OK above |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1570 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1571 "inconsistency in carving newFc"); |
0 | 1572 curFc->setSize(size); |
1573 _bt.mark_block((HeapWord*)curFc, size); | |
1574 splitBirth(size); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1575 fc = curFc; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1576 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1577 // Return entire block to caller |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1578 fc = newFc; |
0 | 1579 } |
1580 } | |
1581 } | |
1582 } else { | |
1583 // Get a free chunk from the free chunk dictionary to be returned to | |
1584 // replenish the indexed free list. | |
1585 fc = getChunkFromDictionaryExact(size); | |
1586 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1587 // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk"); |
0 | 1588 return fc; |
1589 } | |
1590 | |
1591 FreeChunk* | |
1592 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) { | |
1593 assert_locked(); | |
1594 FreeChunk* fc = _dictionary->getChunk(size); | |
1595 if (fc == NULL) { | |
1596 return NULL; | |
1597 } | |
1598 _bt.allocated((HeapWord*)fc, fc->size()); | |
1599 if (fc->size() >= size + MinChunkSize) { | |
1600 fc = splitChunkAndReturnRemainder(fc, size); | |
1601 } | |
1602 assert(fc->size() >= size, "chunk too small"); | |
1603 assert(fc->size() < size + MinChunkSize, "chunk too big"); | |
1604 _bt.verify_single_block((HeapWord*)fc, fc->size()); | |
1605 return fc; | |
1606 } | |
1607 | |
1608 FreeChunk* | |
1609 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) { | |
1610 assert_locked(); | |
1611 FreeChunk* fc = _dictionary->getChunk(size); | |
1612 if (fc == NULL) { | |
1613 return fc; | |
1614 } | |
1615 _bt.allocated((HeapWord*)fc, fc->size()); | |
1616 if (fc->size() == size) { | |
1617 _bt.verify_single_block((HeapWord*)fc, size); | |
1618 return fc; | |
1619 } | |
1620 assert(fc->size() > size, "getChunk() guarantee"); | |
1621 if (fc->size() < size + MinChunkSize) { | |
1622 // Return the chunk to the dictionary and go get a bigger one. | |
1623 returnChunkToDictionary(fc); | |
1624 fc = _dictionary->getChunk(size + MinChunkSize); | |
1625 if (fc == NULL) { | |
1626 return NULL; | |
1627 } | |
1628 _bt.allocated((HeapWord*)fc, fc->size()); | |
1629 } | |
1630 assert(fc->size() >= size + MinChunkSize, "tautology"); | |
1631 fc = splitChunkAndReturnRemainder(fc, size); | |
1632 assert(fc->size() == size, "chunk is wrong size"); | |
1633 _bt.verify_single_block((HeapWord*)fc, size); | |
1634 return fc; | |
1635 } | |
1636 | |
1637 void | |
1638 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) { | |
1639 assert_locked(); | |
1640 | |
1641 size_t size = chunk->size(); | |
1642 _bt.verify_single_block((HeapWord*)chunk, size); | |
1643 // adjust _unallocated_block downward, as necessary | |
1644 _bt.freed((HeapWord*)chunk, size); | |
1645 _dictionary->returnChunk(chunk); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1646 #ifndef PRODUCT |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1647 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1648 TreeChunk::as_TreeChunk(chunk)->list()->verify_stats(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1649 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1650 #endif // PRODUCT |
0 | 1651 } |
1652 | |
1653 void | |
1654 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) { | |
1655 assert_locked(); | |
1656 size_t size = fc->size(); | |
1657 _bt.verify_single_block((HeapWord*) fc, size); | |
1658 _bt.verify_not_unallocated((HeapWord*) fc, size); | |
1659 if (_adaptive_freelists) { | |
1660 _indexedFreeList[size].returnChunkAtTail(fc); | |
1661 } else { | |
1662 _indexedFreeList[size].returnChunkAtHead(fc); | |
1663 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1664 #ifndef PRODUCT |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1665 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1666 _indexedFreeList[size].verify_stats(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1667 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1668 #endif // PRODUCT |
0 | 1669 } |
1670 | |
1671 // Add chunk to end of last block -- if it's the largest | |
1672 // block -- and update BOT and census data. We would | |
1673 // of course have preferred to coalesce it with the | |
1674 // last block, but it's currently less expensive to find the | |
1675 // largest block than it is to find the last. | |
1676 void | |
1677 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats( | |
1678 HeapWord* chunk, size_t size) { | |
1679 // check that the chunk does lie in this space! | |
1680 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!"); | |
1681 // One of the parallel gc task threads may be here | |
1682 // whilst others are allocating. | |
1683 Mutex* lock = NULL; | |
1684 if (ParallelGCThreads != 0) { | |
1685 lock = &_parDictionaryAllocLock; | |
1686 } | |
1687 FreeChunk* ec; | |
1688 { | |
1689 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
1690 ec = dictionary()->findLargestDict(); // get largest block | |
1691 if (ec != NULL && ec->end() == chunk) { | |
1692 // It's a coterminal block - we can coalesce. | |
1693 size_t old_size = ec->size(); | |
1694 coalDeath(old_size); | |
1695 removeChunkFromDictionary(ec); | |
1696 size += old_size; | |
1697 } else { | |
1698 ec = (FreeChunk*)chunk; | |
1699 } | |
1700 } | |
1701 ec->setSize(size); | |
1702 debug_only(ec->mangleFreed(size)); | |
1703 if (size < SmallForDictionary) { | |
1704 lock = _indexedFreeListParLocks[size]; | |
1705 } | |
1706 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
1707 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true); | |
1708 // record the birth under the lock since the recording involves | |
1709 // manipulation of the list on which the chunk lives and | |
1710 // if the chunk is allocated and is the last on the list, | |
1711 // the list can go away. | |
1712 coalBirth(size); | |
1713 } | |
1714 | |
1715 void | |
1716 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk, | |
1717 size_t size) { | |
1718 // check that the chunk does lie in this space! | |
1719 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!"); | |
1720 assert_locked(); | |
1721 _bt.verify_single_block(chunk, size); | |
1722 | |
1723 FreeChunk* fc = (FreeChunk*) chunk; | |
1724 fc->setSize(size); | |
1725 debug_only(fc->mangleFreed(size)); | |
1726 if (size < SmallForDictionary) { | |
1727 returnChunkToFreeList(fc); | |
1728 } else { | |
1729 returnChunkToDictionary(fc); | |
1730 } | |
1731 } | |
1732 | |
1733 void | |
1734 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk, | |
1735 size_t size, bool coalesced) { | |
1736 assert_locked(); | |
1737 assert(chunk != NULL, "null chunk"); | |
1738 if (coalesced) { | |
1739 // repair BOT | |
1740 _bt.single_block(chunk, size); | |
1741 } | |
1742 addChunkToFreeLists(chunk, size); | |
1743 } | |
1744 | |
1745 // We _must_ find the purported chunk on our free lists; | |
1746 // we assert if we don't. | |
1747 void | |
1748 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) { | |
1749 size_t size = fc->size(); | |
1750 assert_locked(); | |
1751 debug_only(verifyFreeLists()); | |
1752 if (size < SmallForDictionary) { | |
1753 removeChunkFromIndexedFreeList(fc); | |
1754 } else { | |
1755 removeChunkFromDictionary(fc); | |
1756 } | |
1757 _bt.verify_single_block((HeapWord*)fc, size); | |
1758 debug_only(verifyFreeLists()); | |
1759 } | |
1760 | |
1761 void | |
1762 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) { | |
1763 size_t size = fc->size(); | |
1764 assert_locked(); | |
1765 assert(fc != NULL, "null chunk"); | |
1766 _bt.verify_single_block((HeapWord*)fc, size); | |
1767 _dictionary->removeChunk(fc); | |
1768 // adjust _unallocated_block upward, as necessary | |
1769 _bt.allocated((HeapWord*)fc, size); | |
1770 } | |
1771 | |
1772 void | |
1773 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) { | |
1774 assert_locked(); | |
1775 size_t size = fc->size(); | |
1776 _bt.verify_single_block((HeapWord*)fc, size); | |
1777 NOT_PRODUCT( | |
1778 if (FLSVerifyIndexTable) { | |
1779 verifyIndexedFreeList(size); | |
1780 } | |
1781 ) | |
1782 _indexedFreeList[size].removeChunk(fc); | |
1783 debug_only(fc->clearNext()); | |
1784 debug_only(fc->clearPrev()); | |
1785 NOT_PRODUCT( | |
1786 if (FLSVerifyIndexTable) { | |
1787 verifyIndexedFreeList(size); | |
1788 } | |
1789 ) | |
1790 } | |
1791 | |
1792 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) { | |
1793 /* A hint is the next larger size that has a surplus. | |
1794 Start search at a size large enough to guarantee that | |
1795 the excess is >= MIN_CHUNK. */ | |
1796 size_t start = align_object_size(numWords + MinChunkSize); | |
1797 if (start < IndexSetSize) { | |
1798 FreeList* it = _indexedFreeList; | |
1799 size_t hint = _indexedFreeList[start].hint(); | |
1800 while (hint < IndexSetSize) { | |
1801 assert(hint % MinObjAlignment == 0, "hint should be aligned"); | |
1802 FreeList *fl = &_indexedFreeList[hint]; | |
1803 if (fl->surplus() > 0 && fl->head() != NULL) { | |
1804 // Found a list with surplus, reset original hint | |
1805 // and split out a free chunk which is returned. | |
1806 _indexedFreeList[start].set_hint(hint); | |
1807 FreeChunk* res = getFromListGreater(fl, numWords); | |
1808 assert(res == NULL || res->isFree(), | |
1809 "Should be returning a free chunk"); | |
1810 return res; | |
1811 } | |
1812 hint = fl->hint(); /* keep looking */ | |
1813 } | |
1814 /* None found. */ | |
1815 it[start].set_hint(IndexSetSize); | |
1816 } | |
1817 return NULL; | |
1818 } | |
1819 | |
1820 /* Requires fl->size >= numWords + MinChunkSize */ | |
1821 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl, | |
1822 size_t numWords) { | |
1823 FreeChunk *curr = fl->head(); | |
1824 size_t oldNumWords = curr->size(); | |
1825 assert(numWords >= MinChunkSize, "Word size is too small"); | |
1826 assert(curr != NULL, "List is empty"); | |
1827 assert(oldNumWords >= numWords + MinChunkSize, | |
1828 "Size of chunks in the list is too small"); | |
1829 | |
1830 fl->removeChunk(curr); | |
1831 // recorded indirectly by splitChunkAndReturnRemainder - | |
1832 // smallSplit(oldNumWords, numWords); | |
1833 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords); | |
1834 // Does anything have to be done for the remainder in terms of | |
1835 // fixing the card table? | |
1836 assert(new_chunk == NULL || new_chunk->isFree(), | |
1837 "Should be returning a free chunk"); | |
1838 return new_chunk; | |
1839 } | |
1840 | |
1841 FreeChunk* | |
1842 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk, | |
1843 size_t new_size) { | |
1844 assert_locked(); | |
1845 size_t size = chunk->size(); | |
1846 assert(size > new_size, "Split from a smaller block?"); | |
1847 assert(is_aligned(chunk), "alignment problem"); | |
1848 assert(size == adjustObjectSize(size), "alignment problem"); | |
1849 size_t rem_size = size - new_size; | |
1850 assert(rem_size == adjustObjectSize(rem_size), "alignment problem"); | |
1851 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum"); | |
1852 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size); | |
1853 assert(is_aligned(ffc), "alignment problem"); | |
1854 ffc->setSize(rem_size); | |
1855 ffc->linkNext(NULL); | |
1856 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. | |
1857 // Above must occur before BOT is updated below. | |
1858 // adjust block offset table | |
1859 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size); | |
1860 if (rem_size < SmallForDictionary) { | |
1861 bool is_par = (SharedHeap::heap()->n_par_threads() > 0); | |
1862 if (is_par) _indexedFreeListParLocks[rem_size]->lock(); | |
1863 returnChunkToFreeList(ffc); | |
1864 split(size, rem_size); | |
1865 if (is_par) _indexedFreeListParLocks[rem_size]->unlock(); | |
1866 } else { | |
1867 returnChunkToDictionary(ffc); | |
1868 split(size ,rem_size); | |
1869 } | |
1870 chunk->setSize(new_size); | |
1871 return chunk; | |
1872 } | |
1873 | |
1874 void | |
1875 CompactibleFreeListSpace::sweep_completed() { | |
1876 // Now that space is probably plentiful, refill linear | |
1877 // allocation blocks as needed. | |
1878 refillLinearAllocBlocksIfNeeded(); | |
1879 } | |
1880 | |
1881 void | |
1882 CompactibleFreeListSpace::gc_prologue() { | |
1883 assert_locked(); | |
1884 if (PrintFLSStatistics != 0) { | |
1885 gclog_or_tty->print("Before GC:\n"); | |
1886 reportFreeListStatistics(); | |
1887 } | |
1888 refillLinearAllocBlocksIfNeeded(); | |
1889 } | |
1890 | |
1891 void | |
1892 CompactibleFreeListSpace::gc_epilogue() { | |
1893 assert_locked(); | |
1894 if (PrintGCDetails && Verbose && !_adaptive_freelists) { | |
1895 if (_smallLinearAllocBlock._word_size == 0) | |
1896 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure"); | |
1897 } | |
1898 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); | |
1899 _promoInfo.stopTrackingPromotions(); | |
1900 repairLinearAllocationBlocks(); | |
1901 // Print Space's stats | |
1902 if (PrintFLSStatistics != 0) { | |
1903 gclog_or_tty->print("After GC:\n"); | |
1904 reportFreeListStatistics(); | |
1905 } | |
1906 } | |
1907 | |
1908 // Iteration support, mostly delegated from a CMS generation | |
1909 | |
1910 void CompactibleFreeListSpace::save_marks() { | |
1911 // mark the "end" of the used space at the time of this call; | |
1912 // note, however, that promoted objects from this point | |
1913 // on are tracked in the _promoInfo below. | |
1914 set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ? | |
1915 unallocated_block() : end()); | |
1916 // inform allocator that promotions should be tracked. | |
1917 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); | |
1918 _promoInfo.startTrackingPromotions(); | |
1919 } | |
1920 | |
1921 bool CompactibleFreeListSpace::no_allocs_since_save_marks() { | |
1922 assert(_promoInfo.tracking(), "No preceding save_marks?"); | |
1923 guarantee(SharedHeap::heap()->n_par_threads() == 0, | |
1924 "Shouldn't be called (yet) during parallel part of gc."); | |
1925 return _promoInfo.noPromotions(); | |
1926 } | |
1927 | |
1928 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
1929 \ | |
1930 void CompactibleFreeListSpace:: \ | |
1931 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ | |
1932 assert(SharedHeap::heap()->n_par_threads() == 0, \ | |
1933 "Shouldn't be called (yet) during parallel part of gc."); \ | |
1934 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \ | |
1935 /* \ | |
1936 * This also restores any displaced headers and removes the elements from \ | |
1937 * the iteration set as they are processed, so that we have a clean slate \ | |
1938 * at the end of the iteration. Note, thus, that if new objects are \ | |
1939 * promoted as a result of the iteration they are iterated over as well. \ | |
1940 */ \ | |
1941 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \ | |
1942 } | |
1943 | |
1944 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN) | |
1945 | |
1946 | |
1947 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) { | |
1948 // ugghh... how would one do this efficiently for a non-contiguous space? | |
1949 guarantee(false, "NYI"); | |
1950 } | |
1951 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
1952 bool CompactibleFreeListSpace::linearAllocationWouldFail() const { |
0 | 1953 return _smallLinearAllocBlock._word_size == 0; |
1954 } | |
1955 | |
1956 void CompactibleFreeListSpace::repairLinearAllocationBlocks() { | |
1957 // Fix up linear allocation blocks to look like free blocks | |
1958 repairLinearAllocBlock(&_smallLinearAllocBlock); | |
1959 } | |
1960 | |
1961 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) { | |
1962 assert_locked(); | |
1963 if (blk->_ptr != NULL) { | |
1964 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize, | |
1965 "Minimum block size requirement"); | |
1966 FreeChunk* fc = (FreeChunk*)(blk->_ptr); | |
1967 fc->setSize(blk->_word_size); | |
1968 fc->linkPrev(NULL); // mark as free | |
1969 fc->dontCoalesce(); | |
1970 assert(fc->isFree(), "just marked it free"); | |
1971 assert(fc->cantCoalesce(), "just marked it uncoalescable"); | |
1972 } | |
1973 } | |
1974 | |
1975 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() { | |
1976 assert_locked(); | |
1977 if (_smallLinearAllocBlock._ptr == NULL) { | |
1978 assert(_smallLinearAllocBlock._word_size == 0, | |
1979 "Size of linAB should be zero if the ptr is NULL"); | |
1980 // Reset the linAB refill and allocation size limit. | |
1981 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc); | |
1982 } | |
1983 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock); | |
1984 } | |
1985 | |
1986 void | |
1987 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) { | |
1988 assert_locked(); | |
1989 assert((blk->_ptr == NULL && blk->_word_size == 0) || | |
1990 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize), | |
1991 "blk invariant"); | |
1992 if (blk->_ptr == NULL) { | |
1993 refillLinearAllocBlock(blk); | |
1994 } | |
1995 if (PrintMiscellaneous && Verbose) { | |
1996 if (blk->_word_size == 0) { | |
1997 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure"); | |
1998 } | |
1999 } | |
2000 } | |
2001 | |
2002 void | |
2003 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) { | |
2004 assert_locked(); | |
2005 assert(blk->_word_size == 0 && blk->_ptr == NULL, | |
2006 "linear allocation block should be empty"); | |
2007 FreeChunk* fc; | |
2008 if (blk->_refillSize < SmallForDictionary && | |
2009 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) { | |
2010 // A linAB's strategy might be to use small sizes to reduce | |
2011 // fragmentation but still get the benefits of allocation from a | |
2012 // linAB. | |
2013 } else { | |
2014 fc = getChunkFromDictionary(blk->_refillSize); | |
2015 } | |
2016 if (fc != NULL) { | |
2017 blk->_ptr = (HeapWord*)fc; | |
2018 blk->_word_size = fc->size(); | |
2019 fc->dontCoalesce(); // to prevent sweeper from sweeping us up | |
2020 } | |
2021 } | |
2022 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2023 // Support for concurrent collection policy decisions. |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2024 bool CompactibleFreeListSpace::should_concurrent_collect() const { |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2025 // In the future we might want to add in frgamentation stats -- |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2026 // including erosion of the "mountain" into this decision as well. |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2027 return !adaptive_freelists() && linearAllocationWouldFail(); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2028 } |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2029 |
0 | 2030 // Support for compaction |
2031 | |
2032 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { | |
2033 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); | |
2034 // prepare_for_compaction() uses the space between live objects | |
2035 // so that later phase can skip dead space quickly. So verification | |
2036 // of the free lists doesn't work after. | |
2037 } | |
2038 | |
2039 #define obj_size(q) adjustObjectSize(oop(q)->size()) | |
2040 #define adjust_obj_size(s) adjustObjectSize(s) | |
2041 | |
2042 void CompactibleFreeListSpace::adjust_pointers() { | |
2043 // In other versions of adjust_pointers(), a bail out | |
2044 // based on the amount of live data in the generation | |
2045 // (i.e., if 0, bail out) may be used. | |
2046 // Cannot test used() == 0 here because the free lists have already | |
2047 // been mangled by the compaction. | |
2048 | |
2049 SCAN_AND_ADJUST_POINTERS(adjust_obj_size); | |
2050 // See note about verification in prepare_for_compaction(). | |
2051 } | |
2052 | |
2053 void CompactibleFreeListSpace::compact() { | |
2054 SCAN_AND_COMPACT(obj_size); | |
2055 } | |
2056 | |
2057 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] | |
2058 // where fbs is free block sizes | |
2059 double CompactibleFreeListSpace::flsFrag() const { | |
2060 size_t itabFree = totalSizeInIndexedFreeLists(); | |
2061 double frag = 0.0; | |
2062 size_t i; | |
2063 | |
2064 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2065 double sz = i; | |
2066 frag += _indexedFreeList[i].count() * (sz * sz); | |
2067 } | |
2068 | |
2069 double totFree = itabFree + | |
2070 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())); | |
2071 if (totFree > 0) { | |
2072 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) / | |
2073 (totFree * totFree)); | |
2074 frag = (double)1.0 - frag; | |
2075 } else { | |
2076 assert(frag == 0.0, "Follows from totFree == 0"); | |
2077 } | |
2078 return frag; | |
2079 } | |
2080 | |
2081 void CompactibleFreeListSpace::beginSweepFLCensus( | |
2082 float inter_sweep_current, | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2083 float inter_sweep_estimate, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2084 float intra_sweep_estimate) { |
0 | 2085 assert_locked(); |
2086 size_t i; | |
2087 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2088 FreeList* fl = &_indexedFreeList[i]; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2089 if (PrintFLSStatistics > 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2090 gclog_or_tty->print("size[%d] : ", i); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2091 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2092 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2093 fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent)); |
0 | 2094 fl->set_beforeSweep(fl->count()); |
2095 fl->set_bfrSurp(fl->surplus()); | |
2096 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2097 _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent, |
0 | 2098 inter_sweep_current, |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2099 inter_sweep_estimate, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2100 intra_sweep_estimate); |
0 | 2101 } |
2102 | |
2103 void CompactibleFreeListSpace::setFLSurplus() { | |
2104 assert_locked(); | |
2105 size_t i; | |
2106 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2107 FreeList *fl = &_indexedFreeList[i]; | |
2108 fl->set_surplus(fl->count() - | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2109 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent)); |
0 | 2110 } |
2111 } | |
2112 | |
2113 void CompactibleFreeListSpace::setFLHints() { | |
2114 assert_locked(); | |
2115 size_t i; | |
2116 size_t h = IndexSetSize; | |
2117 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { | |
2118 FreeList *fl = &_indexedFreeList[i]; | |
2119 fl->set_hint(h); | |
2120 if (fl->surplus() > 0) { | |
2121 h = i; | |
2122 } | |
2123 } | |
2124 } | |
2125 | |
2126 void CompactibleFreeListSpace::clearFLCensus() { | |
2127 assert_locked(); | |
2128 int i; | |
2129 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2130 FreeList *fl = &_indexedFreeList[i]; | |
2131 fl->set_prevSweep(fl->count()); | |
2132 fl->set_coalBirths(0); | |
2133 fl->set_coalDeaths(0); | |
2134 fl->set_splitBirths(0); | |
2135 fl->set_splitDeaths(0); | |
2136 } | |
2137 } | |
2138 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2139 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2140 if (PrintFLSStatistics > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2141 HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2142 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2143 largestAddr); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2144 } |
0 | 2145 setFLSurplus(); |
2146 setFLHints(); | |
2147 if (PrintGC && PrintFLSCensus > 0) { | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2148 printFLCensus(sweep_count); |
0 | 2149 } |
2150 clearFLCensus(); | |
2151 assert_locked(); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2152 _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent); |
0 | 2153 } |
2154 | |
2155 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) { | |
2156 if (size < SmallForDictionary) { | |
2157 FreeList *fl = &_indexedFreeList[size]; | |
2158 return (fl->coalDesired() < 0) || | |
2159 ((int)fl->count() > fl->coalDesired()); | |
2160 } else { | |
2161 return dictionary()->coalDictOverPopulated(size); | |
2162 } | |
2163 } | |
2164 | |
2165 void CompactibleFreeListSpace::smallCoalBirth(size_t size) { | |
2166 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2167 FreeList *fl = &_indexedFreeList[size]; | |
2168 fl->increment_coalBirths(); | |
2169 fl->increment_surplus(); | |
2170 } | |
2171 | |
2172 void CompactibleFreeListSpace::smallCoalDeath(size_t size) { | |
2173 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2174 FreeList *fl = &_indexedFreeList[size]; | |
2175 fl->increment_coalDeaths(); | |
2176 fl->decrement_surplus(); | |
2177 } | |
2178 | |
2179 void CompactibleFreeListSpace::coalBirth(size_t size) { | |
2180 if (size < SmallForDictionary) { | |
2181 smallCoalBirth(size); | |
2182 } else { | |
2183 dictionary()->dictCensusUpdate(size, | |
2184 false /* split */, | |
2185 true /* birth */); | |
2186 } | |
2187 } | |
2188 | |
2189 void CompactibleFreeListSpace::coalDeath(size_t size) { | |
2190 if(size < SmallForDictionary) { | |
2191 smallCoalDeath(size); | |
2192 } else { | |
2193 dictionary()->dictCensusUpdate(size, | |
2194 false /* split */, | |
2195 false /* birth */); | |
2196 } | |
2197 } | |
2198 | |
2199 void CompactibleFreeListSpace::smallSplitBirth(size_t size) { | |
2200 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2201 FreeList *fl = &_indexedFreeList[size]; | |
2202 fl->increment_splitBirths(); | |
2203 fl->increment_surplus(); | |
2204 } | |
2205 | |
2206 void CompactibleFreeListSpace::smallSplitDeath(size_t size) { | |
2207 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2208 FreeList *fl = &_indexedFreeList[size]; | |
2209 fl->increment_splitDeaths(); | |
2210 fl->decrement_surplus(); | |
2211 } | |
2212 | |
2213 void CompactibleFreeListSpace::splitBirth(size_t size) { | |
2214 if (size < SmallForDictionary) { | |
2215 smallSplitBirth(size); | |
2216 } else { | |
2217 dictionary()->dictCensusUpdate(size, | |
2218 true /* split */, | |
2219 true /* birth */); | |
2220 } | |
2221 } | |
2222 | |
2223 void CompactibleFreeListSpace::splitDeath(size_t size) { | |
2224 if (size < SmallForDictionary) { | |
2225 smallSplitDeath(size); | |
2226 } else { | |
2227 dictionary()->dictCensusUpdate(size, | |
2228 true /* split */, | |
2229 false /* birth */); | |
2230 } | |
2231 } | |
2232 | |
2233 void CompactibleFreeListSpace::split(size_t from, size_t to1) { | |
2234 size_t to2 = from - to1; | |
2235 splitDeath(from); | |
2236 splitBirth(to1); | |
2237 splitBirth(to2); | |
2238 } | |
2239 | |
2240 void CompactibleFreeListSpace::print() const { | |
2241 tty->print(" CompactibleFreeListSpace"); | |
2242 Space::print(); | |
2243 } | |
2244 | |
2245 void CompactibleFreeListSpace::prepare_for_verify() { | |
2246 assert_locked(); | |
2247 repairLinearAllocationBlocks(); | |
2248 // Verify that the SpoolBlocks look like free blocks of | |
2249 // appropriate sizes... To be done ... | |
2250 } | |
2251 | |
2252 class VerifyAllBlksClosure: public BlkClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2253 private: |
0 | 2254 const CompactibleFreeListSpace* _sp; |
2255 const MemRegion _span; | |
2256 | |
2257 public: | |
2258 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp, | |
2259 MemRegion span) : _sp(sp), _span(span) { } | |
2260 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2261 virtual size_t do_blk(HeapWord* addr) { |
0 | 2262 size_t res; |
2263 if (_sp->block_is_obj(addr)) { | |
2264 oop p = oop(addr); | |
2265 guarantee(p->is_oop(), "Should be an oop"); | |
2266 res = _sp->adjustObjectSize(p->size()); | |
2267 if (_sp->obj_is_alive(addr)) { | |
2268 p->verify(); | |
2269 } | |
2270 } else { | |
2271 FreeChunk* fc = (FreeChunk*)addr; | |
2272 res = fc->size(); | |
2273 if (FLSVerifyLists && !fc->cantCoalesce()) { | |
2274 guarantee(_sp->verifyChunkInFreeLists(fc), | |
2275 "Chunk should be on a free list"); | |
2276 } | |
2277 } | |
2278 guarantee(res != 0, "Livelock: no rank reduction!"); | |
2279 return res; | |
2280 } | |
2281 }; | |
2282 | |
2283 class VerifyAllOopsClosure: public OopClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2284 private: |
0 | 2285 const CMSCollector* _collector; |
2286 const CompactibleFreeListSpace* _sp; | |
2287 const MemRegion _span; | |
2288 const bool _past_remark; | |
2289 const CMSBitMap* _bit_map; | |
2290 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2291 protected: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2292 void do_oop(void* p, oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2293 if (_span.contains(obj)) { // the interior oop points into CMS heap |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2294 if (!_span.contains(p)) { // reference from outside CMS heap |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2295 // Should be a valid object; the first disjunct below allows |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2296 // us to sidestep an assertion in block_is_obj() that insists |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2297 // that p be in _sp. Note that several generations (and spaces) |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2298 // are spanned by _span (CMS heap) above. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2299 guarantee(!_sp->is_in_reserved(obj) || |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2300 _sp->block_is_obj((HeapWord*)obj), |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2301 "Should be an object"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2302 guarantee(obj->is_oop(), "Should be an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2303 obj->verify(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2304 if (_past_remark) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2305 // Remark has been completed, the object should be marked |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2306 _bit_map->isMarked((HeapWord*)obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2307 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2308 } else { // reference within CMS heap |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2309 if (_past_remark) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2310 // Remark has been completed -- so the referent should have |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2311 // been marked, if referring object is. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2312 if (_bit_map->isMarked(_collector->block_start(p))) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2313 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2314 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2315 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2316 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2317 } else if (_sp->is_in_reserved(p)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2318 // the reference is from FLS, and points out of FLS |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2319 guarantee(obj->is_oop(), "Should be an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2320 obj->verify(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2321 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2322 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2323 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2324 template <class T> void do_oop_work(T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2325 T heap_oop = oopDesc::load_heap_oop(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2326 if (!oopDesc::is_null(heap_oop)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2327 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2328 do_oop(p, obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2329 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2330 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2331 |
0 | 2332 public: |
2333 VerifyAllOopsClosure(const CMSCollector* collector, | |
2334 const CompactibleFreeListSpace* sp, MemRegion span, | |
2335 bool past_remark, CMSBitMap* bit_map) : | |
2336 OopClosure(), _collector(collector), _sp(sp), _span(span), | |
2337 _past_remark(past_remark), _bit_map(bit_map) { } | |
2338 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2339 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2340 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); } |
0 | 2341 }; |
2342 | |
2343 void CompactibleFreeListSpace::verify(bool ignored) const { | |
2344 assert_lock_strong(&_freelistLock); | |
2345 verify_objects_initialized(); | |
2346 MemRegion span = _collector->_span; | |
2347 bool past_remark = (_collector->abstract_state() == | |
2348 CMSCollector::Sweeping); | |
2349 | |
2350 ResourceMark rm; | |
2351 HandleMark hm; | |
2352 | |
2353 // Check integrity of CFL data structures | |
2354 _promoInfo.verify(); | |
2355 _dictionary->verify(); | |
2356 if (FLSVerifyIndexTable) { | |
2357 verifyIndexedFreeLists(); | |
2358 } | |
2359 // Check integrity of all objects and free blocks in space | |
2360 { | |
2361 VerifyAllBlksClosure cl(this, span); | |
2362 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const | |
2363 } | |
2364 // Check that all references in the heap to FLS | |
2365 // are to valid objects in FLS or that references in | |
2366 // FLS are to valid objects elsewhere in the heap | |
2367 if (FLSVerifyAllHeapReferences) | |
2368 { | |
2369 VerifyAllOopsClosure cl(_collector, this, span, past_remark, | |
2370 _collector->markBitMap()); | |
2371 CollectedHeap* ch = Universe::heap(); | |
2372 ch->oop_iterate(&cl); // all oops in generations | |
2373 ch->permanent_oop_iterate(&cl); // all oops in perm gen | |
2374 } | |
2375 | |
2376 if (VerifyObjectStartArray) { | |
2377 // Verify the block offset table | |
2378 _bt.verify(); | |
2379 } | |
2380 } | |
2381 | |
2382 #ifndef PRODUCT | |
2383 void CompactibleFreeListSpace::verifyFreeLists() const { | |
2384 if (FLSVerifyLists) { | |
2385 _dictionary->verify(); | |
2386 verifyIndexedFreeLists(); | |
2387 } else { | |
2388 if (FLSVerifyDictionary) { | |
2389 _dictionary->verify(); | |
2390 } | |
2391 if (FLSVerifyIndexTable) { | |
2392 verifyIndexedFreeLists(); | |
2393 } | |
2394 } | |
2395 } | |
2396 #endif | |
2397 | |
2398 void CompactibleFreeListSpace::verifyIndexedFreeLists() const { | |
2399 size_t i = 0; | |
2400 for (; i < MinChunkSize; i++) { | |
2401 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL"); | |
2402 } | |
2403 for (; i < IndexSetSize; i++) { | |
2404 verifyIndexedFreeList(i); | |
2405 } | |
2406 } | |
2407 | |
2408 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2409 FreeChunk* fc = _indexedFreeList[size].head(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2410 FreeChunk* tail = _indexedFreeList[size].tail(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2411 size_t num = _indexedFreeList[size].count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2412 size_t n = 0; |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
2413 guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2414 for (; fc != NULL; fc = fc->next(), n++) { |
0 | 2415 guarantee(fc->size() == size, "Size inconsistency"); |
2416 guarantee(fc->isFree(), "!free?"); | |
2417 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list"); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2418 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail"); |
0 | 2419 } |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2420 guarantee(n == num, "Incorrect count"); |
0 | 2421 } |
2422 | |
2423 #ifndef PRODUCT | |
2424 void CompactibleFreeListSpace::checkFreeListConsistency() const { | |
2425 assert(_dictionary->minSize() <= IndexSetSize, | |
2426 "Some sizes can't be allocated without recourse to" | |
2427 " linear allocation buffers"); | |
2428 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk), | |
2429 "else MIN_TREE_CHUNK_SIZE is wrong"); | |
2430 assert((IndexSetStride == 2 && IndexSetStart == 2) || | |
2431 (IndexSetStride == 1 && IndexSetStart == 1), "just checking"); | |
2432 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0), | |
2433 "Some for-loops may be incorrectly initialized"); | |
2434 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1), | |
2435 "For-loops that iterate over IndexSet with stride 2 may be wrong"); | |
2436 } | |
2437 #endif | |
2438 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2439 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const { |
0 | 2440 assert_lock_strong(&_freelistLock); |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2441 FreeList total; |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2442 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2443 FreeList::print_labels_on(gclog_or_tty, "size"); |
0 | 2444 size_t totalFree = 0; |
2445 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2446 const FreeList *fl = &_indexedFreeList[i]; | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2447 totalFree += fl->count() * fl->size(); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2448 if (i % (40*IndexSetStride) == 0) { |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2449 FreeList::print_labels_on(gclog_or_tty, "size"); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2450 } |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2451 fl->print_on(gclog_or_tty); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2452 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2453 total.set_surplus( total.surplus() + fl->surplus() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2454 total.set_desired( total.desired() + fl->desired() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2455 total.set_prevSweep( total.prevSweep() + fl->prevSweep() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2456 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep()); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2457 total.set_count( total.count() + fl->count() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2458 total.set_coalBirths( total.coalBirths() + fl->coalBirths() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2459 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2460 total.set_splitBirths(total.splitBirths() + fl->splitBirths()); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2461 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths()); |
0 | 2462 } |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2463 total.print_on(gclog_or_tty, "TOTAL"); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2464 gclog_or_tty->print_cr("Total free in indexed lists " |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2465 SIZE_FORMAT " words", totalFree); |
0 | 2466 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n", |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2467 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/ |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2468 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0), |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2469 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0)); |
0 | 2470 _dictionary->printDictCensus(); |
2471 } | |
2472 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2473 /////////////////////////////////////////////////////////////////////////// |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2474 // CFLS_LAB |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2475 /////////////////////////////////////////////////////////////////////////// |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2476 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2477 #define VECTOR_257(x) \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2478 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2479 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2480 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2481 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2482 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2483 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2484 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2485 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2486 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2487 x } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2488 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2489 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2490 // OldPLABSize, whose static default is different; if overridden at the |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2491 // command-line, this will get reinitialized via a call to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2492 // modify_initialization() below. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2493 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] = |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2494 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim)); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2495 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2496 int CFLS_LAB::_global_num_workers[] = VECTOR_257(0); |
0 | 2497 |
2498 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) : | |
2499 _cfls(cfls) | |
2500 { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2501 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above"); |
0 | 2502 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
2503 i < CompactibleFreeListSpace::IndexSetSize; | |
2504 i += CompactibleFreeListSpace::IndexSetStride) { | |
2505 _indexedFreeList[i].set_size(i); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2506 _num_blocks[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2507 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2508 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2509 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2510 static bool _CFLS_LAB_modified = false; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2511 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2512 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2513 assert(!_CFLS_LAB_modified, "Call only once"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2514 _CFLS_LAB_modified = true; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2515 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2516 i < CompactibleFreeListSpace::IndexSetSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2517 i += CompactibleFreeListSpace::IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2518 _blocks_to_claim[i].modify(n, wt, true /* force */); |
0 | 2519 } |
2520 } | |
2521 | |
2522 HeapWord* CFLS_LAB::alloc(size_t word_sz) { | |
2523 FreeChunk* res; | |
2524 word_sz = _cfls->adjustObjectSize(word_sz); | |
2525 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) { | |
2526 // This locking manages sync with other large object allocations. | |
2527 MutexLockerEx x(_cfls->parDictionaryAllocLock(), | |
2528 Mutex::_no_safepoint_check_flag); | |
2529 res = _cfls->getChunkFromDictionaryExact(word_sz); | |
2530 if (res == NULL) return NULL; | |
2531 } else { | |
2532 FreeList* fl = &_indexedFreeList[word_sz]; | |
2533 if (fl->count() == 0) { | |
2534 // Attempt to refill this local free list. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2535 get_from_global_pool(word_sz, fl); |
0 | 2536 // If it didn't work, give up. |
2537 if (fl->count() == 0) return NULL; | |
2538 } | |
2539 res = fl->getChunkAtHead(); | |
2540 assert(res != NULL, "Why was count non-zero?"); | |
2541 } | |
2542 res->markNotFree(); | |
2543 assert(!res->isFree(), "shouldn't be marked free"); | |
187 | 2544 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized"); |
0 | 2545 // mangle a just allocated object with a distinct pattern. |
2546 debug_only(res->mangleAllocated(word_sz)); | |
2547 return (HeapWord*)res; | |
2548 } | |
2549 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2550 // Get a chunk of blocks of the right size and update related |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2551 // book-keeping stats |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2552 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2553 // Get the #blocks we want to claim |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2554 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2555 assert(n_blks > 0, "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2556 assert(ResizePLAB || n_blks == OldPLABSize, "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2557 // In some cases, when the application has a phase change, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2558 // there may be a sudden and sharp shift in the object survival |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2559 // profile, and updating the counts at the end of a scavenge |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2560 // may not be quick enough, giving rise to large scavenge pauses |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2561 // during these phase changes. It is beneficial to detect such |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2562 // changes on-the-fly during a scavenge and avoid such a phase-change |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2563 // pothole. The following code is a heuristic attempt to do that. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2564 // It is protected by a product flag until we have gained |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2565 // enough experience with this heuristic and fine-tuned its behaviour. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2566 // WARNING: This might increase fragmentation if we overreact to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2567 // small spikes, so some kind of historical smoothing based on |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2568 // previous experience with the greater reactivity might be useful. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2569 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2570 // default. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2571 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2572 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2573 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2574 n_blks = MIN2(n_blks, CMSOldPLABMax); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2575 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2576 assert(n_blks > 0, "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2577 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2578 // Update stats table entry for this block size |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2579 _num_blocks[word_sz] += fl->count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2580 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2581 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2582 void CFLS_LAB::compute_desired_plab_size() { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2583 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
0 | 2584 i < CompactibleFreeListSpace::IndexSetSize; |
2585 i += CompactibleFreeListSpace::IndexSetStride) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2586 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2587 "Counter inconsistency"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2588 if (_global_num_workers[i] > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2589 // Need to smooth wrt historical average |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2590 if (ResizeOldPLAB) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2591 _blocks_to_claim[i].sample( |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2592 MAX2((size_t)CMSOldPLABMin, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2593 MIN2((size_t)CMSOldPLABMax, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2594 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills)))); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2595 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2596 // Reset counters for next round |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2597 _global_num_workers[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2598 _global_num_blocks[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2599 if (PrintOldPLAB) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2600 gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2601 } |
0 | 2602 } |
2603 } | |
2604 } | |
2605 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2606 void CFLS_LAB::retire(int tid) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2607 // We run this single threaded with the world stopped; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2608 // so no need for locks and such. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2609 #define CFLS_LAB_PARALLEL_ACCESS 0 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2610 NOT_PRODUCT(Thread* t = Thread::current();) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2611 assert(Thread::current()->is_VM_thread(), "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2612 assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2613 "Will access to uninitialized slot below"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2614 #if CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2615 for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2616 i > 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2617 i -= CompactibleFreeListSpace::IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2618 #else // CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2619 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2620 i < CompactibleFreeListSpace::IndexSetSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2621 i += CompactibleFreeListSpace::IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2622 #endif // !CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2623 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2624 "Can't retire more than what we obtained"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2625 if (_num_blocks[i] > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2626 size_t num_retire = _indexedFreeList[i].count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2627 assert(_num_blocks[i] > num_retire, "Should have used at least one"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2628 { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2629 #if CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2630 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i], |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2631 Mutex::_no_safepoint_check_flag); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2632 #endif // CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2633 // Update globals stats for num_blocks used |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2634 _global_num_blocks[i] += (_num_blocks[i] - num_retire); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2635 _global_num_workers[i]++; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2636 assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2637 if (num_retire > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2638 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2639 // Reset this list. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2640 _indexedFreeList[i] = FreeList(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2641 _indexedFreeList[i].set_size(i); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2642 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2643 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2644 if (PrintOldPLAB) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2645 gclog_or_tty->print_cr("%d[%d]: %d/%d/%d", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2646 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2647 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2648 // Reset stats for next round |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2649 _num_blocks[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2650 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2651 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2652 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2653 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2654 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) { |
0 | 2655 assert(fl->count() == 0, "Precondition."); |
2656 assert(word_sz < CompactibleFreeListSpace::IndexSetSize, | |
2657 "Precondition"); | |
2658 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2659 // We'll try all multiples of word_sz in the indexed set, starting with |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2660 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2661 // then try getting a big chunk and splitting it. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2662 { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2663 bool found; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2664 int k; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2665 size_t cur_sz; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2666 for (k = 1, cur_sz = k * word_sz, found = false; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2667 (cur_sz < CompactibleFreeListSpace::IndexSetSize) && |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2668 (CMSSplitIndexedFreeListBlocks || k <= 1); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2669 k++, cur_sz = k * word_sz) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2670 FreeList* gfl = &_indexedFreeList[cur_sz]; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2671 FreeList fl_for_cur_sz; // Empty. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2672 fl_for_cur_sz.set_size(cur_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2673 { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2674 MutexLockerEx x(_indexedFreeListParLocks[cur_sz], |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2675 Mutex::_no_safepoint_check_flag); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2676 if (gfl->count() != 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2677 // nn is the number of chunks of size cur_sz that |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2678 // we'd need to split k-ways each, in order to create |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2679 // "n" chunks of size word_sz each. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2680 const size_t nn = MAX2(n/k, (size_t)1); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2681 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2682 found = true; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2683 if (k > 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2684 // Update split death stats for the cur_sz-size blocks list: |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2685 // we increment the split death count by the number of blocks |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2686 // we just took from the cur_sz-size blocks list and which |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2687 // we will be splitting below. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2688 ssize_t deaths = _indexedFreeList[cur_sz].splitDeaths() + |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2689 fl_for_cur_sz.count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2690 _indexedFreeList[cur_sz].set_splitDeaths(deaths); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2691 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2692 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2693 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2694 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2695 if (found) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2696 if (k == 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2697 fl->prepend(&fl_for_cur_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2698 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2699 // Divide each block on fl_for_cur_sz up k ways. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2700 FreeChunk* fc; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2701 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2702 // Must do this in reverse order, so that anybody attempting to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2703 // access the main chunk sees it as a single free block until we |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2704 // change it. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2705 size_t fc_size = fc->size(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2706 for (int i = k-1; i >= 0; i--) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2707 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2708 ffc->setSize(word_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2709 ffc->linkNext(NULL); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2710 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2711 // Above must occur before BOT is updated below. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2712 // splitting from the right, fc_size == (k - i + 1) * wordsize |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2713 _bt.mark_block((HeapWord*)ffc, word_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2714 fc_size -= word_sz; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2715 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2716 _bt.verify_single_block((HeapWord*)fc, fc_size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2717 _bt.verify_single_block((HeapWord*)ffc, ffc->size()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2718 // Push this on "fl". |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2719 fl->returnChunkAtHead(ffc); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2720 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2721 // TRAP |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2722 assert(fl->tail()->next() == NULL, "List invariant."); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2723 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2724 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2725 // Update birth stats for this block size. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2726 size_t num = fl->count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2727 MutexLockerEx x(_indexedFreeListParLocks[word_sz], |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2728 Mutex::_no_safepoint_check_flag); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2729 ssize_t births = _indexedFreeList[word_sz].splitBirths() + num; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2730 _indexedFreeList[word_sz].set_splitBirths(births); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2731 return; |
0 | 2732 } |
2733 } | |
2734 } | |
2735 // Otherwise, we'll split a block from the dictionary. | |
2736 FreeChunk* fc = NULL; | |
2737 FreeChunk* rem_fc = NULL; | |
2738 size_t rem; | |
2739 { | |
2740 MutexLockerEx x(parDictionaryAllocLock(), | |
2741 Mutex::_no_safepoint_check_flag); | |
2742 while (n > 0) { | |
2743 fc = dictionary()->getChunk(MAX2(n * word_sz, | |
2744 _dictionary->minSize()), | |
2745 FreeBlockDictionary::atLeast); | |
2746 if (fc != NULL) { | |
2747 _bt.allocated((HeapWord*)fc, fc->size()); // update _unallocated_blk | |
2748 dictionary()->dictCensusUpdate(fc->size(), | |
2749 true /*split*/, | |
2750 false /*birth*/); | |
2751 break; | |
2752 } else { | |
2753 n--; | |
2754 } | |
2755 } | |
2756 if (fc == NULL) return; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2757 assert((ssize_t)n >= 1, "Control point invariant"); |
0 | 2758 // Otherwise, split up that block. |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2759 const size_t nn = fc->size() / word_sz; |
0 | 2760 n = MIN2(nn, n); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2761 assert((ssize_t)n >= 1, "Control point invariant"); |
0 | 2762 rem = fc->size() - n * word_sz; |
2763 // If there is a remainder, and it's too small, allocate one fewer. | |
2764 if (rem > 0 && rem < MinChunkSize) { | |
2765 n--; rem += word_sz; | |
2766 } | |
1148
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2767 // Note that at this point we may have n == 0. |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2768 assert((ssize_t)n >= 0, "Control point invariant"); |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2769 |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2770 // If n is 0, the chunk fc that was found is not large |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2771 // enough to leave a viable remainder. We are unable to |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2772 // allocate even one block. Return fc to the |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2773 // dictionary and return, leaving "fl" empty. |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2774 if (n == 0) { |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2775 returnChunkToDictionary(fc); |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2776 return; |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2777 } |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2778 |
0 | 2779 // First return the remainder, if any. |
2780 // Note that we hold the lock until we decide if we're going to give | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2781 // back the remainder to the dictionary, since a concurrent allocation |
0 | 2782 // may otherwise see the heap as empty. (We're willing to take that |
2783 // hit if the block is a small block.) | |
2784 if (rem > 0) { | |
2785 size_t prefix_size = n * word_sz; | |
2786 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size); | |
2787 rem_fc->setSize(rem); | |
2788 rem_fc->linkNext(NULL); | |
2789 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. | |
2790 // Above must occur before BOT is updated below. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2791 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error"); |
0 | 2792 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size); |
2793 if (rem >= IndexSetSize) { | |
2794 returnChunkToDictionary(rem_fc); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2795 dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/); |
0 | 2796 rem_fc = NULL; |
2797 } | |
2798 // Otherwise, return it to the small list below. | |
2799 } | |
2800 } | |
2801 if (rem_fc != NULL) { | |
2802 MutexLockerEx x(_indexedFreeListParLocks[rem], | |
2803 Mutex::_no_safepoint_check_flag); | |
2804 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size()); | |
2805 _indexedFreeList[rem].returnChunkAtHead(rem_fc); | |
2806 smallSplitBirth(rem); | |
2807 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2808 assert((ssize_t)n > 0 && fc != NULL, "Consistency"); |
0 | 2809 // Now do the splitting up. |
2810 // Must do this in reverse order, so that anybody attempting to | |
2811 // access the main chunk sees it as a single free block until we | |
2812 // change it. | |
2813 size_t fc_size = n * word_sz; | |
2814 // All but first chunk in this loop | |
2815 for (ssize_t i = n-1; i > 0; i--) { | |
2816 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); | |
2817 ffc->setSize(word_sz); | |
2818 ffc->linkNext(NULL); | |
2819 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. | |
2820 // Above must occur before BOT is updated below. | |
2821 // splitting from the right, fc_size == (n - i + 1) * wordsize | |
2822 _bt.mark_block((HeapWord*)ffc, word_sz); | |
2823 fc_size -= word_sz; | |
2824 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); | |
2825 _bt.verify_single_block((HeapWord*)ffc, ffc->size()); | |
2826 _bt.verify_single_block((HeapWord*)fc, fc_size); | |
2827 // Push this on "fl". | |
2828 fl->returnChunkAtHead(ffc); | |
2829 } | |
2830 // First chunk | |
2831 fc->setSize(word_sz); | |
2832 fc->linkNext(NULL); | |
2833 fc->linkPrev(NULL); | |
2834 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); | |
2835 _bt.verify_single_block((HeapWord*)fc, fc->size()); | |
2836 fl->returnChunkAtHead(fc); | |
2837 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2838 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks"); |
0 | 2839 { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2840 // Update the stats for this block size. |
0 | 2841 MutexLockerEx x(_indexedFreeListParLocks[word_sz], |
2842 Mutex::_no_safepoint_check_flag); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2843 const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2844 _indexedFreeList[word_sz].set_splitBirths(births); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2845 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2846 // _indexedFreeList[word_sz].set_surplus(new_surplus); |
0 | 2847 } |
2848 | |
2849 // TRAP | |
2850 assert(fl->tail()->next() == NULL, "List invariant."); | |
2851 } | |
2852 | |
2853 // Set up the space's par_seq_tasks structure for work claiming | |
2854 // for parallel rescan. See CMSParRemarkTask where this is currently used. | |
2855 // XXX Need to suitably abstract and generalize this and the next | |
2856 // method into one. | |
2857 void | |
2858 CompactibleFreeListSpace:: | |
2859 initialize_sequential_subtasks_for_rescan(int n_threads) { | |
2860 // The "size" of each task is fixed according to rescan_task_size. | |
2861 assert(n_threads > 0, "Unexpected n_threads argument"); | |
2862 const size_t task_size = rescan_task_size(); | |
2863 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size; | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2864 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect"); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2865 assert(n_tasks == 0 || |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2866 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) && |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2867 (used_region().start() + n_tasks*task_size >= used_region().end())), |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2868 "n_tasks calculation incorrect"); |
0 | 2869 SequentialSubTasksDone* pst = conc_par_seq_tasks(); |
2870 assert(!pst->valid(), "Clobbering existing data?"); | |
2871 pst->set_par_threads(n_threads); | |
2872 pst->set_n_tasks((int)n_tasks); | |
2873 } | |
2874 | |
2875 // Set up the space's par_seq_tasks structure for work claiming | |
2876 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used. | |
2877 void | |
2878 CompactibleFreeListSpace:: | |
2879 initialize_sequential_subtasks_for_marking(int n_threads, | |
2880 HeapWord* low) { | |
2881 // The "size" of each task is fixed according to rescan_task_size. | |
2882 assert(n_threads > 0, "Unexpected n_threads argument"); | |
2883 const size_t task_size = marking_task_size(); | |
2884 assert(task_size > CardTableModRefBS::card_size_in_words && | |
2885 (task_size % CardTableModRefBS::card_size_in_words == 0), | |
2886 "Otherwise arithmetic below would be incorrect"); | |
2887 MemRegion span = _gen->reserved(); | |
2888 if (low != NULL) { | |
2889 if (span.contains(low)) { | |
2890 // Align low down to a card boundary so that | |
2891 // we can use block_offset_careful() on span boundaries. | |
2892 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low, | |
2893 CardTableModRefBS::card_size); | |
2894 // Clip span prefix at aligned_low | |
2895 span = span.intersection(MemRegion(aligned_low, span.end())); | |
2896 } else if (low > span.end()) { | |
2897 span = MemRegion(low, low); // Null region | |
2898 } // else use entire span | |
2899 } | |
2900 assert(span.is_empty() || | |
2901 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0), | |
2902 "span should start at a card boundary"); | |
2903 size_t n_tasks = (span.word_size() + task_size - 1)/task_size; | |
2904 assert((n_tasks == 0) == span.is_empty(), "Inconsistency"); | |
2905 assert(n_tasks == 0 || | |
2906 ((span.start() + (n_tasks - 1)*task_size < span.end()) && | |
2907 (span.start() + n_tasks*task_size >= span.end())), | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2908 "n_tasks calculation incorrect"); |
0 | 2909 SequentialSubTasksDone* pst = conc_par_seq_tasks(); |
2910 assert(!pst->valid(), "Clobbering existing data?"); | |
2911 pst->set_par_threads(n_threads); | |
2912 pst->set_n_tasks((int)n_tasks); | |
2913 } |