Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp @ 1959:9eecf81a02fb
7000578: CMS: assert(SafepointSynchronize::is_at_safepoint()) failed: Else races are possible
Summary: Weakened assert in onj_is_alive() to allow its use at initialization time when is_at_safepoint() normally reports false; added some related asserts to check order of is_init_completed() after Universe::is_fully_initialized().
Reviewed-by: jcoomes
author | ysr |
---|---|
date | Tue, 16 Nov 2010 13:58:48 -0800 |
parents | 4df7f8cba524 |
children | f95d63e2154a |
rev | line source |
---|---|
0 | 1 /* |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_compactibleFreeListSpace.cpp.incl" | |
27 | |
28 ///////////////////////////////////////////////////////////////////////// | |
29 //// CompactibleFreeListSpace | |
30 ///////////////////////////////////////////////////////////////////////// | |
31 | |
32 // highest ranked free list lock rank | |
33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3; | |
34 | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
35 // Defaults are 0 so things will break badly if incorrectly initialized. |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
36 int CompactibleFreeListSpace::IndexSetStart = 0; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
37 int CompactibleFreeListSpace::IndexSetStride = 0; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
38 |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
39 size_t MinChunkSize = 0; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
40 |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
41 void CompactibleFreeListSpace::set_cms_values() { |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
42 // Set CMS global values |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
43 assert(MinChunkSize == 0, "already set"); |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
44 #define numQuanta(x,y) ((x+y-1)/y) |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
45 MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
46 |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
47 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set"); |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
48 IndexSetStart = MinObjAlignment; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
49 IndexSetStride = MinObjAlignment; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
50 } |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
51 |
0 | 52 // Constructor |
53 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, | |
54 MemRegion mr, bool use_adaptive_freelists, | |
55 FreeBlockDictionary::DictionaryChoice dictionaryChoice) : | |
56 _dictionaryChoice(dictionaryChoice), | |
57 _adaptive_freelists(use_adaptive_freelists), | |
58 _bt(bs, mr), | |
59 // free list locks are in the range of values taken by _lockRank | |
60 // This range currently is [_leaf+2, _leaf+3] | |
61 // Note: this requires that CFLspace c'tors | |
62 // are called serially in the order in which the locks are | |
63 // are acquired in the program text. This is true today. | |
64 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true), | |
65 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1 | |
66 "CompactibleFreeListSpace._dict_par_lock", true), | |
67 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * | |
68 CMSRescanMultiple), | |
69 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * | |
70 CMSConcMarkMultiple), | |
71 _collector(NULL) | |
72 { | |
73 _bt.set_space(this); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
187
diff
changeset
|
74 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
0 | 75 // We have all of "mr", all of which we place in the dictionary |
76 // as one big chunk. We'll need to decide here which of several | |
77 // possible alternative dictionary implementations to use. For | |
78 // now the choice is easy, since we have only one working | |
79 // implementation, namely, the simple binary tree (splaying | |
80 // temporarily disabled). | |
81 switch (dictionaryChoice) { | |
82 case FreeBlockDictionary::dictionarySplayTree: | |
83 case FreeBlockDictionary::dictionarySkipList: | |
84 default: | |
85 warning("dictionaryChoice: selected option not understood; using" | |
86 " default BinaryTreeDictionary implementation instead."); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
87 case FreeBlockDictionary::dictionaryBinaryTree: |
0 | 88 _dictionary = new BinaryTreeDictionary(mr); |
89 break; | |
90 } | |
91 assert(_dictionary != NULL, "CMS dictionary initialization"); | |
92 // The indexed free lists are initially all empty and are lazily | |
93 // filled in on demand. Initialize the array elements to NULL. | |
94 initializeIndexedFreeListArray(); | |
95 | |
96 // Not using adaptive free lists assumes that allocation is first | |
97 // from the linAB's. Also a cms perm gen which can be compacted | |
98 // has to have the klass's klassKlass allocated at a lower | |
99 // address in the heap than the klass so that the klassKlass is | |
100 // moved to its new location before the klass is moved. | |
101 // Set the _refillSize for the linear allocation blocks | |
102 if (!use_adaptive_freelists) { | |
103 FreeChunk* fc = _dictionary->getChunk(mr.word_size()); | |
104 // The small linAB initially has all the space and will allocate | |
105 // a chunk of any size. | |
106 HeapWord* addr = (HeapWord*) fc; | |
107 _smallLinearAllocBlock.set(addr, fc->size() , | |
108 1024*SmallForLinearAlloc, fc->size()); | |
109 // Note that _unallocated_block is not updated here. | |
110 // Allocations from the linear allocation block should | |
111 // update it. | |
112 } else { | |
113 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, | |
114 SmallForLinearAlloc); | |
115 } | |
116 // CMSIndexedFreeListReplenish should be at least 1 | |
117 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish); | |
118 _promoInfo.setSpace(this); | |
119 if (UseCMSBestFit) { | |
120 _fitStrategy = FreeBlockBestFitFirst; | |
121 } else { | |
122 _fitStrategy = FreeBlockStrategyNone; | |
123 } | |
124 checkFreeListConsistency(); | |
125 | |
126 // Initialize locks for parallel case. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
127 |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
128 if (CollectedHeap::use_parallel_gc_threads()) { |
0 | 129 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
130 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1 | |
131 "a freelist par lock", | |
132 true); | |
133 if (_indexedFreeListParLocks[i] == NULL) | |
134 vm_exit_during_initialization("Could not allocate a par lock"); | |
135 DEBUG_ONLY( | |
136 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]); | |
137 ) | |
138 } | |
139 _dictionary->set_par_lock(&_parDictionaryAllocLock); | |
140 } | |
141 } | |
142 | |
143 // Like CompactibleSpace forward() but always calls cross_threshold() to | |
144 // update the block offset table. Removed initialize_threshold call because | |
145 // CFLS does not use a block offset array for contiguous spaces. | |
146 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size, | |
147 CompactPoint* cp, HeapWord* compact_top) { | |
148 // q is alive | |
149 // First check if we should switch compaction space | |
150 assert(this == cp->space, "'this' should be current compaction space."); | |
151 size_t compaction_max_size = pointer_delta(end(), compact_top); | |
152 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size), | |
153 "virtual adjustObjectSize_v() method is not correct"); | |
154 size_t adjusted_size = adjustObjectSize(size); | |
155 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0, | |
156 "no small fragments allowed"); | |
157 assert(minimum_free_block_size() == MinChunkSize, | |
158 "for de-virtualized reference below"); | |
159 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize | |
160 if (adjusted_size + MinChunkSize > compaction_max_size && | |
161 adjusted_size != compaction_max_size) { | |
162 do { | |
163 // switch to next compaction space | |
164 cp->space->set_compaction_top(compact_top); | |
165 cp->space = cp->space->next_compaction_space(); | |
166 if (cp->space == NULL) { | |
167 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); | |
168 assert(cp->gen != NULL, "compaction must succeed"); | |
169 cp->space = cp->gen->first_compaction_space(); | |
170 assert(cp->space != NULL, "generation must have a first compaction space"); | |
171 } | |
172 compact_top = cp->space->bottom(); | |
173 cp->space->set_compaction_top(compact_top); | |
174 // The correct adjusted_size may not be the same as that for this method | |
175 // (i.e., cp->space may no longer be "this" so adjust the size again. | |
176 // Use the virtual method which is not used above to save the virtual | |
177 // dispatch. | |
178 adjusted_size = cp->space->adjust_object_size_v(size); | |
179 compaction_max_size = pointer_delta(cp->space->end(), compact_top); | |
180 assert(cp->space->minimum_free_block_size() == 0, "just checking"); | |
181 } while (adjusted_size > compaction_max_size); | |
182 } | |
183 | |
184 // store the forwarding pointer into the mark word | |
185 if ((HeapWord*)q != compact_top) { | |
186 q->forward_to(oop(compact_top)); | |
187 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); | |
188 } else { | |
189 // if the object isn't moving we can just set the mark to the default | |
190 // mark and handle it specially later on. | |
191 q->init_mark(); | |
192 assert(q->forwardee() == NULL, "should be forwarded to NULL"); | |
193 } | |
194 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
195 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size)); |
0 | 196 compact_top += adjusted_size; |
197 | |
198 // we need to update the offset table so that the beginnings of objects can be | |
199 // found during scavenge. Note that we are updating the offset table based on | |
200 // where the object will be once the compaction phase finishes. | |
201 | |
202 // Always call cross_threshold(). A contiguous space can only call it when | |
203 // the compaction_top exceeds the current threshold but not for an | |
204 // non-contiguous space. | |
205 cp->threshold = | |
206 cp->space->cross_threshold(compact_top - adjusted_size, compact_top); | |
207 return compact_top; | |
208 } | |
209 | |
210 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt | |
211 // and use of single_block instead of alloc_block. The name here is not really | |
212 // appropriate - maybe a more general name could be invented for both the | |
213 // contiguous and noncontiguous spaces. | |
214 | |
215 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) { | |
216 _bt.single_block(start, the_end); | |
217 return end(); | |
218 } | |
219 | |
220 // Initialize them to NULL. | |
221 void CompactibleFreeListSpace::initializeIndexedFreeListArray() { | |
222 for (size_t i = 0; i < IndexSetSize; i++) { | |
223 // Note that on platforms where objects are double word aligned, | |
224 // the odd array elements are not used. It is convenient, however, | |
225 // to map directly from the object size to the array element. | |
226 _indexedFreeList[i].reset(IndexSetSize); | |
227 _indexedFreeList[i].set_size(i); | |
228 assert(_indexedFreeList[i].count() == 0, "reset check failed"); | |
229 assert(_indexedFreeList[i].head() == NULL, "reset check failed"); | |
230 assert(_indexedFreeList[i].tail() == NULL, "reset check failed"); | |
231 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed"); | |
232 } | |
233 } | |
234 | |
235 void CompactibleFreeListSpace::resetIndexedFreeListArray() { | |
236 for (int i = 1; i < IndexSetSize; i++) { | |
237 assert(_indexedFreeList[i].size() == (size_t) i, | |
238 "Indexed free list sizes are incorrect"); | |
239 _indexedFreeList[i].reset(IndexSetSize); | |
240 assert(_indexedFreeList[i].count() == 0, "reset check failed"); | |
241 assert(_indexedFreeList[i].head() == NULL, "reset check failed"); | |
242 assert(_indexedFreeList[i].tail() == NULL, "reset check failed"); | |
243 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed"); | |
244 } | |
245 } | |
246 | |
247 void CompactibleFreeListSpace::reset(MemRegion mr) { | |
248 resetIndexedFreeListArray(); | |
249 dictionary()->reset(); | |
250 if (BlockOffsetArrayUseUnallocatedBlock) { | |
251 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen"); | |
252 // Everything's allocated until proven otherwise. | |
253 _bt.set_unallocated_block(end()); | |
254 } | |
255 if (!mr.is_empty()) { | |
256 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small"); | |
257 _bt.single_block(mr.start(), mr.word_size()); | |
258 FreeChunk* fc = (FreeChunk*) mr.start(); | |
259 fc->setSize(mr.word_size()); | |
260 if (mr.word_size() >= IndexSetSize ) { | |
261 returnChunkToDictionary(fc); | |
262 } else { | |
263 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); | |
264 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc); | |
265 } | |
266 } | |
267 _promoInfo.reset(); | |
268 _smallLinearAllocBlock._ptr = NULL; | |
269 _smallLinearAllocBlock._word_size = 0; | |
270 } | |
271 | |
272 void CompactibleFreeListSpace::reset_after_compaction() { | |
273 // Reset the space to the new reality - one free chunk. | |
274 MemRegion mr(compaction_top(), end()); | |
275 reset(mr); | |
276 // Now refill the linear allocation block(s) if possible. | |
277 if (_adaptive_freelists) { | |
278 refillLinearAllocBlocksIfNeeded(); | |
279 } else { | |
280 // Place as much of mr in the linAB as we can get, | |
281 // provided it was big enough to go into the dictionary. | |
282 FreeChunk* fc = dictionary()->findLargestDict(); | |
283 if (fc != NULL) { | |
284 assert(fc->size() == mr.word_size(), | |
285 "Why was the chunk broken up?"); | |
286 removeChunkFromDictionary(fc); | |
287 HeapWord* addr = (HeapWord*) fc; | |
288 _smallLinearAllocBlock.set(addr, fc->size() , | |
289 1024*SmallForLinearAlloc, fc->size()); | |
290 // Note that _unallocated_block is not updated here. | |
291 } | |
292 } | |
293 } | |
294 | |
295 // Walks the entire dictionary, returning a coterminal | |
296 // chunk, if it exists. Use with caution since it involves | |
297 // a potentially complete walk of a potentially large tree. | |
298 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() { | |
299 | |
300 assert_lock_strong(&_freelistLock); | |
301 | |
302 return dictionary()->find_chunk_ends_at(end()); | |
303 } | |
304 | |
305 | |
306 #ifndef PRODUCT | |
307 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() { | |
308 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
309 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0); | |
310 } | |
311 } | |
312 | |
313 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() { | |
314 size_t sum = 0; | |
315 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
316 sum += _indexedFreeList[i].allocation_stats()->returnedBytes(); | |
317 } | |
318 return sum; | |
319 } | |
320 | |
321 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const { | |
322 size_t count = 0; | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
323 for (int i = (int)MinChunkSize; i < IndexSetSize; i++) { |
0 | 324 debug_only( |
325 ssize_t total_list_count = 0; | |
326 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; | |
327 fc = fc->next()) { | |
328 total_list_count++; | |
329 } | |
330 assert(total_list_count == _indexedFreeList[i].count(), | |
331 "Count in list is incorrect"); | |
332 ) | |
333 count += _indexedFreeList[i].count(); | |
334 } | |
335 return count; | |
336 } | |
337 | |
338 size_t CompactibleFreeListSpace::totalCount() { | |
339 size_t num = totalCountInIndexedFreeLists(); | |
340 num += dictionary()->totalCount(); | |
341 if (_smallLinearAllocBlock._word_size != 0) { | |
342 num++; | |
343 } | |
344 return num; | |
345 } | |
346 #endif | |
347 | |
348 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const { | |
349 FreeChunk* fc = (FreeChunk*) p; | |
350 return fc->isFree(); | |
351 } | |
352 | |
353 size_t CompactibleFreeListSpace::used() const { | |
354 return capacity() - free(); | |
355 } | |
356 | |
357 size_t CompactibleFreeListSpace::free() const { | |
358 // "MT-safe, but not MT-precise"(TM), if you will: i.e. | |
359 // if you do this while the structures are in flux you | |
360 // may get an approximate answer only; for instance | |
361 // because there is concurrent allocation either | |
362 // directly by mutators or for promotion during a GC. | |
363 // It's "MT-safe", however, in the sense that you are guaranteed | |
364 // not to crash and burn, for instance, because of walking | |
365 // pointers that could disappear as you were walking them. | |
366 // The approximation is because the various components | |
367 // that are read below are not read atomically (and | |
368 // further the computation of totalSizeInIndexedFreeLists() | |
369 // is itself a non-atomic computation. The normal use of | |
370 // this is during a resize operation at the end of GC | |
371 // and at that time you are guaranteed to get the | |
372 // correct actual value. However, for instance, this is | |
373 // also read completely asynchronously by the "perf-sampler" | |
374 // that supports jvmstat, and you are apt to see the values | |
375 // flicker in such cases. | |
376 assert(_dictionary != NULL, "No _dictionary?"); | |
377 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) + | |
378 totalSizeInIndexedFreeLists() + | |
379 _smallLinearAllocBlock._word_size) * HeapWordSize; | |
380 } | |
381 | |
382 size_t CompactibleFreeListSpace::max_alloc_in_words() const { | |
383 assert(_dictionary != NULL, "No _dictionary?"); | |
384 assert_locked(); | |
385 size_t res = _dictionary->maxChunkSize(); | |
386 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size, | |
387 (size_t) SmallForLinearAlloc - 1)); | |
388 // XXX the following could potentially be pretty slow; | |
389 // should one, pesimally for the rare cases when res | |
390 // caclulated above is less than IndexSetSize, | |
391 // just return res calculated above? My reasoning was that | |
392 // those cases will be so rare that the extra time spent doesn't | |
393 // really matter.... | |
394 // Note: do not change the loop test i >= res + IndexSetStride | |
395 // to i > res below, because i is unsigned and res may be zero. | |
396 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride; | |
397 i -= IndexSetStride) { | |
398 if (_indexedFreeList[i].head() != NULL) { | |
399 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList"); | |
400 return i; | |
401 } | |
402 } | |
403 return res; | |
404 } | |
405 | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
406 void LinearAllocBlock::print_on(outputStream* st) const { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
407 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
408 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT, |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
409 _ptr, _word_size, _refillSize, _allocation_size_limit); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
410 } |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
411 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
412 void CompactibleFreeListSpace::print_on(outputStream* st) const { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
413 st->print_cr("COMPACTIBLE FREELIST SPACE"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
414 st->print_cr(" Space:"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
415 Space::print_on(st); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
416 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
417 st->print_cr("promoInfo:"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
418 _promoInfo.print_on(st); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
419 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
420 st->print_cr("_smallLinearAllocBlock"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
421 _smallLinearAllocBlock.print_on(st); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
422 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
423 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
424 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
425 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s", |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
426 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
427 } |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
428 |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
429 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
430 const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
431 reportIndexedFreeListStatistics(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
432 gclog_or_tty->print_cr("Layout of Indexed Freelists"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
433 gclog_or_tty->print_cr("---------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
434 FreeList::print_labels_on(st, "size"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
435 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
436 _indexedFreeList[i].print_on(gclog_or_tty); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
437 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
438 fc = fc->next()) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
439 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
440 fc, (HeapWord*)fc + i, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
441 fc->cantCoalesce() ? "\t CC" : ""); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
442 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
443 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
444 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
445 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
446 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
447 const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
448 _promoInfo.print_on(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
449 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
450 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
451 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
452 const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
453 _dictionary->reportStatistics(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
454 st->print_cr("Layout of Freelists in Tree"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
455 st->print_cr("---------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
456 _dictionary->print_free_lists(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
457 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
458 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
459 class BlkPrintingClosure: public BlkClosure { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
460 const CMSCollector* _collector; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
461 const CompactibleFreeListSpace* _sp; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
462 const CMSBitMap* _live_bit_map; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
463 const bool _post_remark; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
464 outputStream* _st; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
465 public: |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
466 BlkPrintingClosure(const CMSCollector* collector, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
467 const CompactibleFreeListSpace* sp, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
468 const CMSBitMap* live_bit_map, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
469 outputStream* st): |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
470 _collector(collector), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
471 _sp(sp), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
472 _live_bit_map(live_bit_map), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
473 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
474 _st(st) { } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
475 size_t do_blk(HeapWord* addr); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
476 }; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
477 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
478 size_t BlkPrintingClosure::do_blk(HeapWord* addr) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
479 size_t sz = _sp->block_size_no_stall(addr, _collector); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
480 assert(sz != 0, "Should always be able to compute a size"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
481 if (_sp->block_is_obj(addr)) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
482 const bool dead = _post_remark && !_live_bit_map->isMarked(addr); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
483 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
484 addr, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
485 dead ? "dead" : "live", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
486 sz, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
487 (!dead && CMSPrintObjectsInDump) ? ":" : "."); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
488 if (CMSPrintObjectsInDump && !dead) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
489 oop(addr)->print_on(_st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
490 _st->print_cr("--------------------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
491 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
492 } else { // free block |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
493 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
494 addr, sz, CMSPrintChunksInDump ? ":" : "."); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
495 if (CMSPrintChunksInDump) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
496 ((FreeChunk*)addr)->print_on(_st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
497 _st->print_cr("--------------------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
498 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
499 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
500 return sz; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
501 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
502 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
503 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
504 outputStream* st) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
505 st->print_cr("\n========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
506 st->print_cr("Block layout in CMS Heap:"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
507 st->print_cr("========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
508 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
509 blk_iterate(&bpcl); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
510 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
511 st->print_cr("\n======================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
512 st->print_cr("Order & Layout of Promotion Info Blocks"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
513 st->print_cr("======================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
514 print_promo_info_blocks(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
515 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
516 st->print_cr("\n==========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
517 st->print_cr("Order of Indexed Free Lists"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
518 st->print_cr("========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
519 print_indexed_free_lists(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
520 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
521 st->print_cr("\n================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
522 st->print_cr("Order of Free Lists in Dictionary"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
523 st->print_cr("================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
524 print_dictionary_free_lists(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
525 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
526 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
527 |
0 | 528 void CompactibleFreeListSpace::reportFreeListStatistics() const { |
529 assert_lock_strong(&_freelistLock); | |
530 assert(PrintFLSStatistics != 0, "Reporting error"); | |
531 _dictionary->reportStatistics(); | |
532 if (PrintFLSStatistics > 1) { | |
533 reportIndexedFreeListStatistics(); | |
534 size_t totalSize = totalSizeInIndexedFreeLists() + | |
535 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())); | |
536 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag()); | |
537 } | |
538 } | |
539 | |
540 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const { | |
541 assert_lock_strong(&_freelistLock); | |
542 gclog_or_tty->print("Statistics for IndexedFreeLists:\n" | |
543 "--------------------------------\n"); | |
544 size_t totalSize = totalSizeInIndexedFreeLists(); | |
545 size_t freeBlocks = numFreeBlocksInIndexedFreeLists(); | |
546 gclog_or_tty->print("Total Free Space: %d\n", totalSize); | |
547 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists()); | |
548 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks); | |
549 if (freeBlocks != 0) { | |
550 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks); | |
551 } | |
552 } | |
553 | |
554 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const { | |
555 size_t res = 0; | |
556 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
557 debug_only( | |
558 ssize_t recount = 0; | |
559 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; | |
560 fc = fc->next()) { | |
561 recount += 1; | |
562 } | |
563 assert(recount == _indexedFreeList[i].count(), | |
564 "Incorrect count in list"); | |
565 ) | |
566 res += _indexedFreeList[i].count(); | |
567 } | |
568 return res; | |
569 } | |
570 | |
571 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const { | |
572 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { | |
573 if (_indexedFreeList[i].head() != NULL) { | |
574 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList"); | |
575 return (size_t)i; | |
576 } | |
577 } | |
578 return 0; | |
579 } | |
580 | |
581 void CompactibleFreeListSpace::set_end(HeapWord* value) { | |
582 HeapWord* prevEnd = end(); | |
583 assert(prevEnd != value, "unnecessary set_end call"); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
584 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
585 "New end is below unallocated block"); |
0 | 586 _end = value; |
587 if (prevEnd != NULL) { | |
588 // Resize the underlying block offset table. | |
589 _bt.resize(pointer_delta(value, bottom())); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
590 if (value <= prevEnd) { |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
591 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
592 "New end is below unallocated block"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
593 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
594 // Now, take this new chunk and add it to the free blocks. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
595 // Note that the BOT has not yet been updated for this block. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
596 size_t newFcSize = pointer_delta(value, prevEnd); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
597 // XXX This is REALLY UGLY and should be fixed up. XXX |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
598 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
599 // Mark the boundary of the new block in BOT |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
600 _bt.mark_block(prevEnd, value); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
601 // put it all in the linAB |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
602 if (ParallelGCThreads == 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
603 _smallLinearAllocBlock._ptr = prevEnd; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
604 _smallLinearAllocBlock._word_size = newFcSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
605 repairLinearAllocBlock(&_smallLinearAllocBlock); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
606 } else { // ParallelGCThreads > 0 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
607 MutexLockerEx x(parDictionaryAllocLock(), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
608 Mutex::_no_safepoint_check_flag); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
609 _smallLinearAllocBlock._ptr = prevEnd; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
610 _smallLinearAllocBlock._word_size = newFcSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
611 repairLinearAllocBlock(&_smallLinearAllocBlock); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
612 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
613 // Births of chunks put into a LinAB are not recorded. Births |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
614 // of chunks as they are allocated out of a LinAB are. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
615 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
616 // Add the block to the free lists, if possible coalescing it |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
617 // with the last free block, and update the BOT and census data. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
618 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize); |
0 | 619 } |
620 } | |
621 } | |
622 } | |
623 | |
624 class FreeListSpace_DCTOC : public Filtering_DCTOC { | |
625 CompactibleFreeListSpace* _cfls; | |
626 CMSCollector* _collector; | |
627 protected: | |
628 // Override. | |
629 #define walk_mem_region_with_cl_DECL(ClosureType) \ | |
630 virtual void walk_mem_region_with_cl(MemRegion mr, \ | |
631 HeapWord* bottom, HeapWord* top, \ | |
632 ClosureType* cl); \ | |
633 void walk_mem_region_with_cl_par(MemRegion mr, \ | |
634 HeapWord* bottom, HeapWord* top, \ | |
635 ClosureType* cl); \ | |
636 void walk_mem_region_with_cl_nopar(MemRegion mr, \ | |
637 HeapWord* bottom, HeapWord* top, \ | |
638 ClosureType* cl) | |
639 walk_mem_region_with_cl_DECL(OopClosure); | |
640 walk_mem_region_with_cl_DECL(FilteringClosure); | |
641 | |
642 public: | |
643 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp, | |
644 CMSCollector* collector, | |
645 OopClosure* cl, | |
646 CardTableModRefBS::PrecisionStyle precision, | |
647 HeapWord* boundary) : | |
648 Filtering_DCTOC(sp, cl, precision, boundary), | |
649 _cfls(sp), _collector(collector) {} | |
650 }; | |
651 | |
652 // We de-virtualize the block-related calls below, since we know that our | |
653 // space is a CompactibleFreeListSpace. | |
654 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ | |
655 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \ | |
656 HeapWord* bottom, \ | |
657 HeapWord* top, \ | |
658 ClosureType* cl) { \ | |
659 if (SharedHeap::heap()->n_par_threads() > 0) { \ | |
660 walk_mem_region_with_cl_par(mr, bottom, top, cl); \ | |
661 } else { \ | |
662 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \ | |
663 } \ | |
664 } \ | |
665 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \ | |
666 HeapWord* bottom, \ | |
667 HeapWord* top, \ | |
668 ClosureType* cl) { \ | |
669 /* Skip parts that are before "mr", in case "block_start" sent us \ | |
670 back too far. */ \ | |
671 HeapWord* mr_start = mr.start(); \ | |
672 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ | |
673 HeapWord* next = bottom + bot_size; \ | |
674 while (next < mr_start) { \ | |
675 bottom = next; \ | |
676 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ | |
677 next = bottom + bot_size; \ | |
678 } \ | |
679 \ | |
680 while (bottom < top) { \ | |
681 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \ | |
682 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ | |
683 oop(bottom)) && \ | |
684 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ | |
685 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ | |
686 bottom += _cfls->adjustObjectSize(word_sz); \ | |
687 } else { \ | |
688 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \ | |
689 } \ | |
690 } \ | |
691 } \ | |
692 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \ | |
693 HeapWord* bottom, \ | |
694 HeapWord* top, \ | |
695 ClosureType* cl) { \ | |
696 /* Skip parts that are before "mr", in case "block_start" sent us \ | |
697 back too far. */ \ | |
698 HeapWord* mr_start = mr.start(); \ | |
699 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ | |
700 HeapWord* next = bottom + bot_size; \ | |
701 while (next < mr_start) { \ | |
702 bottom = next; \ | |
703 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ | |
704 next = bottom + bot_size; \ | |
705 } \ | |
706 \ | |
707 while (bottom < top) { \ | |
708 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \ | |
709 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ | |
710 oop(bottom)) && \ | |
711 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ | |
712 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ | |
713 bottom += _cfls->adjustObjectSize(word_sz); \ | |
714 } else { \ | |
715 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ | |
716 } \ | |
717 } \ | |
718 } | |
719 | |
720 // (There are only two of these, rather than N, because the split is due | |
721 // only to the introduction of the FilteringClosure, a local part of the | |
722 // impl of this abstraction.) | |
723 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure) | |
724 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) | |
725 | |
726 DirtyCardToOopClosure* | |
727 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl, | |
728 CardTableModRefBS::PrecisionStyle precision, | |
729 HeapWord* boundary) { | |
730 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary); | |
731 } | |
732 | |
733 | |
734 // Note on locking for the space iteration functions: | |
735 // since the collector's iteration activities are concurrent with | |
736 // allocation activities by mutators, absent a suitable mutual exclusion | |
737 // mechanism the iterators may go awry. For instace a block being iterated | |
738 // may suddenly be allocated or divided up and part of it allocated and | |
739 // so on. | |
740 | |
741 // Apply the given closure to each block in the space. | |
742 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) { | |
743 assert_lock_strong(freelistLock()); | |
744 HeapWord *cur, *limit; | |
745 for (cur = bottom(), limit = end(); cur < limit; | |
746 cur += cl->do_blk_careful(cur)); | |
747 } | |
748 | |
749 // Apply the given closure to each block in the space. | |
750 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) { | |
751 assert_lock_strong(freelistLock()); | |
752 HeapWord *cur, *limit; | |
753 for (cur = bottom(), limit = end(); cur < limit; | |
754 cur += cl->do_blk(cur)); | |
755 } | |
756 | |
757 // Apply the given closure to each oop in the space. | |
758 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) { | |
759 assert_lock_strong(freelistLock()); | |
760 HeapWord *cur, *limit; | |
761 size_t curSize; | |
762 for (cur = bottom(), limit = end(); cur < limit; | |
763 cur += curSize) { | |
764 curSize = block_size(cur); | |
765 if (block_is_obj(cur)) { | |
766 oop(cur)->oop_iterate(cl); | |
767 } | |
768 } | |
769 } | |
770 | |
771 // Apply the given closure to each oop in the space \intersect memory region. | |
772 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) { | |
773 assert_lock_strong(freelistLock()); | |
774 if (is_empty()) { | |
775 return; | |
776 } | |
777 MemRegion cur = MemRegion(bottom(), end()); | |
778 mr = mr.intersection(cur); | |
779 if (mr.is_empty()) { | |
780 return; | |
781 } | |
782 if (mr.equals(cur)) { | |
783 oop_iterate(cl); | |
784 return; | |
785 } | |
786 assert(mr.end() <= end(), "just took an intersection above"); | |
787 HeapWord* obj_addr = block_start(mr.start()); | |
788 HeapWord* t = mr.end(); | |
789 | |
790 SpaceMemRegionOopsIterClosure smr_blk(cl, mr); | |
791 if (block_is_obj(obj_addr)) { | |
792 // Handle first object specially. | |
793 oop obj = oop(obj_addr); | |
794 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk)); | |
795 } else { | |
796 FreeChunk* fc = (FreeChunk*)obj_addr; | |
797 obj_addr += fc->size(); | |
798 } | |
799 while (obj_addr < t) { | |
800 HeapWord* obj = obj_addr; | |
801 obj_addr += block_size(obj_addr); | |
802 // If "obj_addr" is not greater than top, then the | |
803 // entire object "obj" is within the region. | |
804 if (obj_addr <= t) { | |
805 if (block_is_obj(obj)) { | |
806 oop(obj)->oop_iterate(cl); | |
807 } | |
808 } else { | |
809 // "obj" extends beyond end of region | |
810 if (block_is_obj(obj)) { | |
811 oop(obj)->oop_iterate(&smr_blk); | |
812 } | |
813 break; | |
814 } | |
815 } | |
816 } | |
817 | |
818 // NOTE: In the following methods, in order to safely be able to | |
819 // apply the closure to an object, we need to be sure that the | |
820 // object has been initialized. We are guaranteed that an object | |
821 // is initialized if we are holding the Heap_lock with the | |
822 // world stopped. | |
823 void CompactibleFreeListSpace::verify_objects_initialized() const { | |
824 if (is_init_completed()) { | |
825 assert_locked_or_safepoint(Heap_lock); | |
826 if (Universe::is_fully_initialized()) { | |
827 guarantee(SafepointSynchronize::is_at_safepoint(), | |
828 "Required for objects to be initialized"); | |
829 } | |
830 } // else make a concession at vm start-up | |
831 } | |
832 | |
833 // Apply the given closure to each object in the space | |
834 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) { | |
835 assert_lock_strong(freelistLock()); | |
836 NOT_PRODUCT(verify_objects_initialized()); | |
837 HeapWord *cur, *limit; | |
838 size_t curSize; | |
839 for (cur = bottom(), limit = end(); cur < limit; | |
840 cur += curSize) { | |
841 curSize = block_size(cur); | |
842 if (block_is_obj(cur)) { | |
843 blk->do_object(oop(cur)); | |
844 } | |
845 } | |
846 } | |
847 | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
848 // Apply the given closure to each live object in the space |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
849 // The usage of CompactibleFreeListSpace |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
850 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
851 // objects in the space with references to objects that are no longer |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
852 // valid. For example, an object may reference another object |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
853 // that has already been sweep up (collected). This method uses |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
854 // obj_is_alive() to determine whether it is safe to apply the closure to |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
855 // an object. See obj_is_alive() for details on how liveness of an |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
856 // object is decided. |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
857 |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
858 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
859 assert_lock_strong(freelistLock()); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
860 NOT_PRODUCT(verify_objects_initialized()); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
861 HeapWord *cur, *limit; |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
862 size_t curSize; |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
863 for (cur = bottom(), limit = end(); cur < limit; |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
864 cur += curSize) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
865 curSize = block_size(cur); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
866 if (block_is_obj(cur) && obj_is_alive(cur)) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
867 blk->do_object(oop(cur)); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
868 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
869 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
870 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
871 |
0 | 872 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, |
873 UpwardsObjectClosure* cl) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
874 assert_locked(freelistLock()); |
0 | 875 NOT_PRODUCT(verify_objects_initialized()); |
876 Space::object_iterate_mem(mr, cl); | |
877 } | |
878 | |
879 // Callers of this iterator beware: The closure application should | |
880 // be robust in the face of uninitialized objects and should (always) | |
881 // return a correct size so that the next addr + size below gives us a | |
882 // valid block boundary. [See for instance, | |
883 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() | |
884 // in ConcurrentMarkSweepGeneration.cpp.] | |
885 HeapWord* | |
886 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) { | |
887 assert_lock_strong(freelistLock()); | |
888 HeapWord *addr, *last; | |
889 size_t size; | |
890 for (addr = bottom(), last = end(); | |
891 addr < last; addr += size) { | |
892 FreeChunk* fc = (FreeChunk*)addr; | |
893 if (fc->isFree()) { | |
894 // Since we hold the free list lock, which protects direct | |
895 // allocation in this generation by mutators, a free object | |
896 // will remain free throughout this iteration code. | |
897 size = fc->size(); | |
898 } else { | |
899 // Note that the object need not necessarily be initialized, | |
900 // because (for instance) the free list lock does NOT protect | |
901 // object initialization. The closure application below must | |
902 // therefore be correct in the face of uninitialized objects. | |
903 size = cl->do_object_careful(oop(addr)); | |
904 if (size == 0) { | |
905 // An unparsable object found. Signal early termination. | |
906 return addr; | |
907 } | |
908 } | |
909 } | |
910 return NULL; | |
911 } | |
912 | |
913 // Callers of this iterator beware: The closure application should | |
914 // be robust in the face of uninitialized objects and should (always) | |
915 // return a correct size so that the next addr + size below gives us a | |
916 // valid block boundary. [See for instance, | |
917 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() | |
918 // in ConcurrentMarkSweepGeneration.cpp.] | |
919 HeapWord* | |
920 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr, | |
921 ObjectClosureCareful* cl) { | |
922 assert_lock_strong(freelistLock()); | |
923 // Can't use used_region() below because it may not necessarily | |
924 // be the same as [bottom(),end()); although we could | |
925 // use [used_region().start(),round_to(used_region().end(),CardSize)), | |
926 // that appears too cumbersome, so we just do the simpler check | |
927 // in the assertion below. | |
928 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr), | |
929 "mr should be non-empty and within used space"); | |
930 HeapWord *addr, *end; | |
931 size_t size; | |
932 for (addr = block_start_careful(mr.start()), end = mr.end(); | |
933 addr < end; addr += size) { | |
934 FreeChunk* fc = (FreeChunk*)addr; | |
935 if (fc->isFree()) { | |
936 // Since we hold the free list lock, which protects direct | |
937 // allocation in this generation by mutators, a free object | |
938 // will remain free throughout this iteration code. | |
939 size = fc->size(); | |
940 } else { | |
941 // Note that the object need not necessarily be initialized, | |
942 // because (for instance) the free list lock does NOT protect | |
943 // object initialization. The closure application below must | |
944 // therefore be correct in the face of uninitialized objects. | |
945 size = cl->do_object_careful_m(oop(addr), mr); | |
946 if (size == 0) { | |
947 // An unparsable object found. Signal early termination. | |
948 return addr; | |
949 } | |
950 } | |
951 } | |
952 return NULL; | |
953 } | |
954 | |
955 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
956 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const { |
0 | 957 NOT_PRODUCT(verify_objects_initialized()); |
958 return _bt.block_start(p); | |
959 } | |
960 | |
961 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const { | |
962 return _bt.block_start_careful(p); | |
963 } | |
964 | |
965 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const { | |
966 NOT_PRODUCT(verify_objects_initialized()); | |
967 // This must be volatile, or else there is a danger that the compiler | |
968 // will compile the code below into a sometimes-infinite loop, by keeping | |
969 // the value read the first time in a register. | |
970 while (true) { | |
971 // We must do this until we get a consistent view of the object. | |
187 | 972 if (FreeChunk::indicatesFreeChunk(p)) { |
973 volatile FreeChunk* fc = (volatile FreeChunk*)p; | |
974 size_t res = fc->size(); | |
975 // If the object is still a free chunk, return the size, else it | |
976 // has been allocated so try again. | |
977 if (FreeChunk::indicatesFreeChunk(p)) { | |
0 | 978 assert(res != 0, "Block size should not be 0"); |
979 return res; | |
980 } | |
187 | 981 } else { |
982 // must read from what 'p' points to in each loop. | |
983 klassOop k = ((volatile oopDesc*)p)->klass_or_null(); | |
984 if (k != NULL) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
985 assert(k->is_oop(true /* ignore mark word */), "Should be klass oop"); |
187 | 986 oop o = (oop)p; |
987 assert(o->is_parsable(), "Should be parsable"); | |
988 assert(o->is_oop(true /* ignore mark word */), "Should be an oop."); | |
989 size_t res = o->size_given_klass(k->klass_part()); | |
990 res = adjustObjectSize(res); | |
991 assert(res != 0, "Block size should not be 0"); | |
992 return res; | |
993 } | |
0 | 994 } |
995 } | |
996 } | |
997 | |
998 // A variant of the above that uses the Printezis bits for | |
999 // unparsable but allocated objects. This avoids any possible | |
1000 // stalls waiting for mutators to initialize objects, and is | |
1001 // thus potentially faster than the variant above. However, | |
1002 // this variant may return a zero size for a block that is | |
1003 // under mutation and for which a consistent size cannot be | |
1004 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits(). | |
1005 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p, | |
1006 const CMSCollector* c) | |
1007 const { | |
1008 assert(MemRegion(bottom(), end()).contains(p), "p not in space"); | |
1009 // This must be volatile, or else there is a danger that the compiler | |
1010 // will compile the code below into a sometimes-infinite loop, by keeping | |
1011 // the value read the first time in a register. | |
1012 DEBUG_ONLY(uint loops = 0;) | |
1013 while (true) { | |
1014 // We must do this until we get a consistent view of the object. | |
187 | 1015 if (FreeChunk::indicatesFreeChunk(p)) { |
1016 volatile FreeChunk* fc = (volatile FreeChunk*)p; | |
1017 size_t res = fc->size(); | |
1018 if (FreeChunk::indicatesFreeChunk(p)) { | |
0 | 1019 assert(res != 0, "Block size should not be 0"); |
1020 assert(loops == 0, "Should be 0"); | |
1021 return res; | |
1022 } | |
1023 } else { | |
187 | 1024 // must read from what 'p' points to in each loop. |
1025 klassOop k = ((volatile oopDesc*)p)->klass_or_null(); | |
518
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
1026 if (k != NULL && |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
1027 ((oopDesc*)p)->is_parsable() && |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
1028 ((oopDesc*)p)->is_conc_safe()) { |
187 | 1029 assert(k->is_oop(), "Should really be klass oop."); |
1030 oop o = (oop)p; | |
1031 assert(o->is_oop(), "Should be an oop"); | |
1032 size_t res = o->size_given_klass(k->klass_part()); | |
1033 res = adjustObjectSize(res); | |
1034 assert(res != 0, "Block size should not be 0"); | |
1035 return res; | |
1036 } else { | |
1037 return c->block_size_if_printezis_bits(p); | |
1038 } | |
0 | 1039 } |
1040 assert(loops == 0, "Can loop at most once"); | |
1041 DEBUG_ONLY(loops++;) | |
1042 } | |
1043 } | |
1044 | |
1045 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const { | |
1046 NOT_PRODUCT(verify_objects_initialized()); | |
1047 assert(MemRegion(bottom(), end()).contains(p), "p not in space"); | |
1048 FreeChunk* fc = (FreeChunk*)p; | |
1049 if (fc->isFree()) { | |
1050 return fc->size(); | |
1051 } else { | |
1052 // Ignore mark word because this may be a recently promoted | |
1053 // object whose mark word is used to chain together grey | |
1054 // objects (the last one would have a null value). | |
1055 assert(oop(p)->is_oop(true), "Should be an oop"); | |
1056 return adjustObjectSize(oop(p)->size()); | |
1057 } | |
1058 } | |
1059 | |
1060 // This implementation assumes that the property of "being an object" is | |
1061 // stable. But being a free chunk may not be (because of parallel | |
1062 // promotion.) | |
1063 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const { | |
1064 FreeChunk* fc = (FreeChunk*)p; | |
1065 assert(is_in_reserved(p), "Should be in space"); | |
1066 // When doing a mark-sweep-compact of the CMS generation, this | |
1067 // assertion may fail because prepare_for_compaction() uses | |
1068 // space that is garbage to maintain information on ranges of | |
1069 // live objects so that these live ranges can be moved as a whole. | |
1070 // Comment out this assertion until that problem can be solved | |
1071 // (i.e., that the block start calculation may look at objects | |
1072 // at address below "p" in finding the object that contains "p" | |
1073 // and those objects (if garbage) may have been modified to hold | |
1074 // live range information. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
1075 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
1076 // "Should be a block boundary"); |
187 | 1077 if (FreeChunk::indicatesFreeChunk(p)) return false; |
1078 klassOop k = oop(p)->klass_or_null(); | |
0 | 1079 if (k != NULL) { |
1080 // Ignore mark word because it may have been used to | |
1081 // chain together promoted objects (the last one | |
1082 // would have a null value). | |
1083 assert(oop(p)->is_oop(true), "Should be an oop"); | |
1084 return true; | |
1085 } else { | |
1086 return false; // Was not an object at the start of collection. | |
1087 } | |
1088 } | |
1089 | |
1090 // Check if the object is alive. This fact is checked either by consulting | |
1091 // the main marking bitmap in the sweeping phase or, if it's a permanent | |
1092 // generation and we're not in the sweeping phase, by checking the | |
1093 // perm_gen_verify_bit_map where we store the "deadness" information if | |
1094 // we did not sweep the perm gen in the most recent previous GC cycle. | |
1095 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const { | |
1959
9eecf81a02fb
7000578: CMS: assert(SafepointSynchronize::is_at_safepoint()) failed: Else races are possible
ysr
parents:
1952
diff
changeset
|
1096 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), |
9eecf81a02fb
7000578: CMS: assert(SafepointSynchronize::is_at_safepoint()) failed: Else races are possible
ysr
parents:
1952
diff
changeset
|
1097 "Else races are possible"); |
1951
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1098 assert(block_is_obj(p), "The address should point to an object"); |
0 | 1099 |
1100 // If we're sweeping, we use object liveness information from the main bit map | |
1101 // for both perm gen and old gen. | |
1102 // We don't need to lock the bitmap (live_map or dead_map below), because | |
1103 // EITHER we are in the middle of the sweeping phase, and the | |
1104 // main marking bit map (live_map below) is locked, | |
1105 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below) | |
1106 // is stable, because it's mutated only in the sweeping phase. | |
1951
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1107 // NOTE: This method is also used by jmap where, if class unloading is |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1108 // off, the results can return "false" for legitimate perm objects, |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1109 // when we are not in the midst of a sweeping phase, which can result |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1110 // in jmap not reporting certain perm gen objects. This will be moot |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1111 // if/when the perm gen goes away in the future. |
0 | 1112 if (_collector->abstract_state() == CMSCollector::Sweeping) { |
1113 CMSBitMap* live_map = _collector->markBitMap(); | |
1951
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1114 return live_map->par_isMarked((HeapWord*) p); |
0 | 1115 } else { |
1116 // If we're not currently sweeping and we haven't swept the perm gen in | |
1117 // the previous concurrent cycle then we may have dead but unswept objects | |
1118 // in the perm gen. In this case, we use the "deadness" information | |
1119 // that we had saved in perm_gen_verify_bit_map at the last sweep. | |
1120 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) { | |
1121 if (_collector->verifying()) { | |
1122 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map(); | |
1123 // Object is marked in the dead_map bitmap at the previous sweep | |
1124 // when we know that it's dead; if the bitmap is not allocated then | |
1125 // the object is alive. | |
1126 return (dead_map->sizeInBits() == 0) // bit_map has been allocated | |
1127 || !dead_map->par_isMarked((HeapWord*) p); | |
1128 } else { | |
1129 return false; // We can't say for sure if it's live, so we say that it's dead. | |
1130 } | |
1131 } | |
1132 } | |
1133 return true; | |
1134 } | |
1135 | |
1136 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const { | |
1137 FreeChunk* fc = (FreeChunk*)p; | |
1138 assert(is_in_reserved(p), "Should be in space"); | |
1139 assert(_bt.block_start(p) == p, "Should be a block boundary"); | |
1140 if (!fc->isFree()) { | |
1141 // Ignore mark word because it may have been used to | |
1142 // chain together promoted objects (the last one | |
1143 // would have a null value). | |
1144 assert(oop(p)->is_oop(true), "Should be an oop"); | |
1145 return true; | |
1146 } | |
1147 return false; | |
1148 } | |
1149 | |
1150 // "MT-safe but not guaranteed MT-precise" (TM); you may get an | |
1151 // approximate answer if you don't hold the freelistlock when you call this. | |
1152 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const { | |
1153 size_t size = 0; | |
1154 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
1155 debug_only( | |
1156 // We may be calling here without the lock in which case we | |
1157 // won't do this modest sanity check. | |
1158 if (freelistLock()->owned_by_self()) { | |
1159 size_t total_list_size = 0; | |
1160 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; | |
1161 fc = fc->next()) { | |
1162 total_list_size += i; | |
1163 } | |
1164 assert(total_list_size == i * _indexedFreeList[i].count(), | |
1165 "Count in list is incorrect"); | |
1166 } | |
1167 ) | |
1168 size += i * _indexedFreeList[i].count(); | |
1169 } | |
1170 return size; | |
1171 } | |
1172 | |
1173 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) { | |
1174 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
1175 return allocate(size); | |
1176 } | |
1177 | |
1178 HeapWord* | |
1179 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) { | |
1180 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size); | |
1181 } | |
1182 | |
1183 HeapWord* CompactibleFreeListSpace::allocate(size_t size) { | |
1184 assert_lock_strong(freelistLock()); | |
1185 HeapWord* res = NULL; | |
1186 assert(size == adjustObjectSize(size), | |
1187 "use adjustObjectSize() before calling into allocate()"); | |
1188 | |
1189 if (_adaptive_freelists) { | |
1190 res = allocate_adaptive_freelists(size); | |
1191 } else { // non-adaptive free lists | |
1192 res = allocate_non_adaptive_freelists(size); | |
1193 } | |
1194 | |
1195 if (res != NULL) { | |
1196 // check that res does lie in this space! | |
1197 assert(is_in_reserved(res), "Not in this space!"); | |
1198 assert(is_aligned((void*)res), "alignment check"); | |
1199 | |
1200 FreeChunk* fc = (FreeChunk*)res; | |
1201 fc->markNotFree(); | |
1202 assert(!fc->isFree(), "shouldn't be marked free"); | |
187 | 1203 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized"); |
0 | 1204 // Verify that the block offset table shows this to |
1205 // be a single block, but not one which is unallocated. | |
1206 _bt.verify_single_block(res, size); | |
1207 _bt.verify_not_unallocated(res, size); | |
1208 // mangle a just allocated object with a distinct pattern. | |
1209 debug_only(fc->mangleAllocated(size)); | |
1210 } | |
1211 | |
1212 return res; | |
1213 } | |
1214 | |
1215 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) { | |
1216 HeapWord* res = NULL; | |
1217 // try and use linear allocation for smaller blocks | |
1218 if (size < _smallLinearAllocBlock._allocation_size_limit) { | |
1219 // if successful, the following also adjusts block offset table | |
1220 res = getChunkFromSmallLinearAllocBlock(size); | |
1221 } | |
1222 // Else triage to indexed lists for smaller sizes | |
1223 if (res == NULL) { | |
1224 if (size < SmallForDictionary) { | |
1225 res = (HeapWord*) getChunkFromIndexedFreeList(size); | |
1226 } else { | |
1227 // else get it from the big dictionary; if even this doesn't | |
1228 // work we are out of luck. | |
1229 res = (HeapWord*)getChunkFromDictionaryExact(size); | |
1230 } | |
1231 } | |
1232 | |
1233 return res; | |
1234 } | |
1235 | |
1236 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) { | |
1237 assert_lock_strong(freelistLock()); | |
1238 HeapWord* res = NULL; | |
1239 assert(size == adjustObjectSize(size), | |
1240 "use adjustObjectSize() before calling into allocate()"); | |
1241 | |
1242 // Strategy | |
1243 // if small | |
1244 // exact size from small object indexed list if small | |
1245 // small or large linear allocation block (linAB) as appropriate | |
1246 // take from lists of greater sized chunks | |
1247 // else | |
1248 // dictionary | |
1249 // small or large linear allocation block if it has the space | |
1250 // Try allocating exact size from indexTable first | |
1251 if (size < IndexSetSize) { | |
1252 res = (HeapWord*) getChunkFromIndexedFreeList(size); | |
1253 if(res != NULL) { | |
1254 assert(res != (HeapWord*)_indexedFreeList[size].head(), | |
1255 "Not removed from free list"); | |
1256 // no block offset table adjustment is necessary on blocks in | |
1257 // the indexed lists. | |
1258 | |
1259 // Try allocating from the small LinAB | |
1260 } else if (size < _smallLinearAllocBlock._allocation_size_limit && | |
1261 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) { | |
1262 // if successful, the above also adjusts block offset table | |
1263 // Note that this call will refill the LinAB to | |
1264 // satisfy the request. This is different that | |
1265 // evm. | |
1266 // Don't record chunk off a LinAB? smallSplitBirth(size); | |
1267 } else { | |
1268 // Raid the exact free lists larger than size, even if they are not | |
1269 // overpopulated. | |
1270 res = (HeapWord*) getChunkFromGreater(size); | |
1271 } | |
1272 } else { | |
1273 // Big objects get allocated directly from the dictionary. | |
1274 res = (HeapWord*) getChunkFromDictionaryExact(size); | |
1275 if (res == NULL) { | |
1276 // Try hard not to fail since an allocation failure will likely | |
1277 // trigger a synchronous GC. Try to get the space from the | |
1278 // allocation blocks. | |
1279 res = getChunkFromSmallLinearAllocBlockRemainder(size); | |
1280 } | |
1281 } | |
1282 | |
1283 return res; | |
1284 } | |
1285 | |
1286 // A worst-case estimate of the space required (in HeapWords) to expand the heap | |
1287 // when promoting obj. | |
1288 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const { | |
1289 // Depending on the object size, expansion may require refilling either a | |
1290 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize | |
1291 // is added because the dictionary may over-allocate to avoid fragmentation. | |
1292 size_t space = obj_size; | |
1293 if (!_adaptive_freelists) { | |
1294 space = MAX2(space, _smallLinearAllocBlock._refillSize); | |
1295 } | |
1296 space += _promoInfo.refillSize() + 2 * MinChunkSize; | |
1297 return space; | |
1298 } | |
1299 | |
1300 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) { | |
1301 FreeChunk* ret; | |
1302 | |
1303 assert(numWords >= MinChunkSize, "Size is less than minimum"); | |
1304 assert(linearAllocationWouldFail() || bestFitFirst(), | |
1305 "Should not be here"); | |
1306 | |
1307 size_t i; | |
1308 size_t currSize = numWords + MinChunkSize; | |
1309 assert(currSize % MinObjAlignment == 0, "currSize should be aligned"); | |
1310 for (i = currSize; i < IndexSetSize; i += IndexSetStride) { | |
1311 FreeList* fl = &_indexedFreeList[i]; | |
1312 if (fl->head()) { | |
1313 ret = getFromListGreater(fl, numWords); | |
1314 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk"); | |
1315 return ret; | |
1316 } | |
1317 } | |
1318 | |
1319 currSize = MAX2((size_t)SmallForDictionary, | |
1320 (size_t)(numWords + MinChunkSize)); | |
1321 | |
1322 /* Try to get a chunk that satisfies request, while avoiding | |
1323 fragmentation that can't be handled. */ | |
1324 { | |
1325 ret = dictionary()->getChunk(currSize); | |
1326 if (ret != NULL) { | |
1327 assert(ret->size() - numWords >= MinChunkSize, | |
1328 "Chunk is too small"); | |
1329 _bt.allocated((HeapWord*)ret, ret->size()); | |
1330 /* Carve returned chunk. */ | |
1331 (void) splitChunkAndReturnRemainder(ret, numWords); | |
1332 /* Label this as no longer a free chunk. */ | |
1333 assert(ret->isFree(), "This chunk should be free"); | |
1334 ret->linkPrev(NULL); | |
1335 } | |
1336 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk"); | |
1337 return ret; | |
1338 } | |
1339 ShouldNotReachHere(); | |
1340 } | |
1341 | |
1342 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) | |
1343 const { | |
1344 assert(fc->size() < IndexSetSize, "Size of chunk is too large"); | |
1345 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc); | |
1346 } | |
1347 | |
1348 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const { | |
1349 if (fc->size() >= IndexSetSize) { | |
1350 return dictionary()->verifyChunkInFreeLists(fc); | |
1351 } else { | |
1352 return verifyChunkInIndexedFreeLists(fc); | |
1353 } | |
1354 } | |
1355 | |
1356 #ifndef PRODUCT | |
1357 void CompactibleFreeListSpace::assert_locked() const { | |
1358 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock()); | |
1359 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1360 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1361 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1362 CMSLockVerifier::assert_locked(lock); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1363 } |
0 | 1364 #endif |
1365 | |
1366 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) { | |
1367 // In the parallel case, the main thread holds the free list lock | |
1368 // on behalf the parallel threads. | |
1369 FreeChunk* fc; | |
1370 { | |
1371 // If GC is parallel, this might be called by several threads. | |
1372 // This should be rare enough that the locking overhead won't affect | |
1373 // the sequential code. | |
1374 MutexLockerEx x(parDictionaryAllocLock(), | |
1375 Mutex::_no_safepoint_check_flag); | |
1376 fc = getChunkFromDictionary(size); | |
1377 } | |
1378 if (fc != NULL) { | |
1379 fc->dontCoalesce(); | |
1380 assert(fc->isFree(), "Should be free, but not coalescable"); | |
1381 // Verify that the block offset table shows this to | |
1382 // be a single block, but not one which is unallocated. | |
1383 _bt.verify_single_block((HeapWord*)fc, fc->size()); | |
1384 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); | |
1385 } | |
1386 return fc; | |
1387 } | |
1388 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
1389 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) { |
0 | 1390 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
1391 assert_locked(); | |
1392 | |
1393 // if we are tracking promotions, then first ensure space for | |
1394 // promotion (including spooling space for saving header if necessary). | |
1395 // then allocate and copy, then track promoted info if needed. | |
1396 // When tracking (see PromotionInfo::track()), the mark word may | |
1397 // be displaced and in this case restoration of the mark word | |
1398 // occurs in the (oop_since_save_marks_)iterate phase. | |
1399 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) { | |
1400 return NULL; | |
1401 } | |
1402 // Call the allocate(size_t, bool) form directly to avoid the | |
1403 // additional call through the allocate(size_t) form. Having | |
1404 // the compile inline the call is problematic because allocate(size_t) | |
1405 // is a virtual method. | |
1406 HeapWord* res = allocate(adjustObjectSize(obj_size)); | |
1407 if (res != NULL) { | |
1408 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size); | |
1409 // if we should be tracking promotions, do so. | |
1410 if (_promoInfo.tracking()) { | |
1411 _promoInfo.track((PromotedObject*)res); | |
1412 } | |
1413 } | |
1414 return oop(res); | |
1415 } | |
1416 | |
1417 HeapWord* | |
1418 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) { | |
1419 assert_locked(); | |
1420 assert(size >= MinChunkSize, "minimum chunk size"); | |
1421 assert(size < _smallLinearAllocBlock._allocation_size_limit, | |
1422 "maximum from smallLinearAllocBlock"); | |
1423 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size); | |
1424 } | |
1425 | |
1426 HeapWord* | |
1427 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk, | |
1428 size_t size) { | |
1429 assert_locked(); | |
1430 assert(size >= MinChunkSize, "too small"); | |
1431 HeapWord* res = NULL; | |
1432 // Try to do linear allocation from blk, making sure that | |
1433 if (blk->_word_size == 0) { | |
1434 // We have probably been unable to fill this either in the prologue or | |
1435 // when it was exhausted at the last linear allocation. Bail out until | |
1436 // next time. | |
1437 assert(blk->_ptr == NULL, "consistency check"); | |
1438 return NULL; | |
1439 } | |
1440 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check"); | |
1441 res = getChunkFromLinearAllocBlockRemainder(blk, size); | |
1442 if (res != NULL) return res; | |
1443 | |
1444 // about to exhaust this linear allocation block | |
1445 if (blk->_word_size == size) { // exactly satisfied | |
1446 res = blk->_ptr; | |
1447 _bt.allocated(res, blk->_word_size); | |
1448 } else if (size + MinChunkSize <= blk->_refillSize) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1449 size_t sz = blk->_word_size; |
0 | 1450 // Update _unallocated_block if the size is such that chunk would be |
1451 // returned to the indexed free list. All other chunks in the indexed | |
1452 // free lists are allocated from the dictionary so that _unallocated_block | |
1453 // has already been adjusted for them. Do it here so that the cost | |
1454 // for all chunks added back to the indexed free lists. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1455 if (sz < SmallForDictionary) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1456 _bt.allocated(blk->_ptr, sz); |
0 | 1457 } |
1458 // Return the chunk that isn't big enough, and then refill below. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1459 addChunkToFreeLists(blk->_ptr, sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1460 splitBirth(sz); |
0 | 1461 // Don't keep statistics on adding back chunk from a LinAB. |
1462 } else { | |
1463 // A refilled block would not satisfy the request. | |
1464 return NULL; | |
1465 } | |
1466 | |
1467 blk->_ptr = NULL; blk->_word_size = 0; | |
1468 refillLinearAllocBlock(blk); | |
1469 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize, | |
1470 "block was replenished"); | |
1471 if (res != NULL) { | |
1472 splitBirth(size); | |
1473 repairLinearAllocBlock(blk); | |
1474 } else if (blk->_ptr != NULL) { | |
1475 res = blk->_ptr; | |
1476 size_t blk_size = blk->_word_size; | |
1477 blk->_word_size -= size; | |
1478 blk->_ptr += size; | |
1479 splitBirth(size); | |
1480 repairLinearAllocBlock(blk); | |
1481 // Update BOT last so that other (parallel) GC threads see a consistent | |
1482 // view of the BOT and free blocks. | |
1483 // Above must occur before BOT is updated below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1484 OrderAccess::storestore(); |
0 | 1485 _bt.split_block(res, blk_size, size); // adjust block offset table |
1486 } | |
1487 return res; | |
1488 } | |
1489 | |
1490 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder( | |
1491 LinearAllocBlock* blk, | |
1492 size_t size) { | |
1493 assert_locked(); | |
1494 assert(size >= MinChunkSize, "too small"); | |
1495 | |
1496 HeapWord* res = NULL; | |
1497 // This is the common case. Keep it simple. | |
1498 if (blk->_word_size >= size + MinChunkSize) { | |
1499 assert(blk->_ptr != NULL, "consistency check"); | |
1500 res = blk->_ptr; | |
1501 // Note that the BOT is up-to-date for the linAB before allocation. It | |
1502 // indicates the start of the linAB. The split_block() updates the | |
1503 // BOT for the linAB after the allocation (indicates the start of the | |
1504 // next chunk to be allocated). | |
1505 size_t blk_size = blk->_word_size; | |
1506 blk->_word_size -= size; | |
1507 blk->_ptr += size; | |
1508 splitBirth(size); | |
1509 repairLinearAllocBlock(blk); | |
1510 // Update BOT last so that other (parallel) GC threads see a consistent | |
1511 // view of the BOT and free blocks. | |
1512 // Above must occur before BOT is updated below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1513 OrderAccess::storestore(); |
0 | 1514 _bt.split_block(res, blk_size, size); // adjust block offset table |
1515 _bt.allocated(res, size); | |
1516 } | |
1517 return res; | |
1518 } | |
1519 | |
1520 FreeChunk* | |
1521 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) { | |
1522 assert_locked(); | |
1523 assert(size < SmallForDictionary, "just checking"); | |
1524 FreeChunk* res; | |
1525 res = _indexedFreeList[size].getChunkAtHead(); | |
1526 if (res == NULL) { | |
1527 res = getChunkFromIndexedFreeListHelper(size); | |
1528 } | |
1529 _bt.verify_not_unallocated((HeapWord*) res, size); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1530 assert(res == NULL || res->size() == size, "Incorrect block size"); |
0 | 1531 return res; |
1532 } | |
1533 | |
1534 FreeChunk* | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1535 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1536 bool replenish) { |
0 | 1537 assert_locked(); |
1538 FreeChunk* fc = NULL; | |
1539 if (size < SmallForDictionary) { | |
1540 assert(_indexedFreeList[size].head() == NULL || | |
1541 _indexedFreeList[size].surplus() <= 0, | |
1542 "List for this size should be empty or under populated"); | |
1543 // Try best fit in exact lists before replenishing the list | |
1544 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) { | |
1545 // Replenish list. | |
1546 // | |
1547 // Things tried that failed. | |
1548 // Tried allocating out of the two LinAB's first before | |
1549 // replenishing lists. | |
1550 // Tried small linAB of size 256 (size in indexed list) | |
1551 // and replenishing indexed lists from the small linAB. | |
1552 // | |
1553 FreeChunk* newFc = NULL; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1554 const size_t replenish_size = CMSIndexedFreeListReplenish * size; |
0 | 1555 if (replenish_size < SmallForDictionary) { |
1556 // Do not replenish from an underpopulated size. | |
1557 if (_indexedFreeList[replenish_size].surplus() > 0 && | |
1558 _indexedFreeList[replenish_size].head() != NULL) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1559 newFc = _indexedFreeList[replenish_size].getChunkAtHead(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1560 } else if (bestFitFirst()) { |
0 | 1561 newFc = bestFitSmall(replenish_size); |
1562 } | |
1563 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1564 if (newFc == NULL && replenish_size > size) { |
0 | 1565 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1566 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false); |
0 | 1567 } |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1568 // Note: The stats update re split-death of block obtained above |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1569 // will be recorded below precisely when we know we are going to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1570 // be actually splitting it into more than one pieces below. |
0 | 1571 if (newFc != NULL) { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1572 if (replenish || CMSReplenishIntermediate) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1573 // Replenish this list and return one block to caller. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1574 size_t i; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1575 FreeChunk *curFc, *nextFc; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1576 size_t num_blk = newFc->size() / size; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1577 assert(num_blk >= 1, "Smaller than requested?"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1578 assert(newFc->size() % size == 0, "Should be integral multiple of request"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1579 if (num_blk > 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1580 // we are sure we will be splitting the block just obtained |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1581 // into multiple pieces; record the split-death of the original |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1582 splitDeath(replenish_size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1583 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1584 // carve up and link blocks 0, ..., num_blk - 2 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1585 // The last chunk is not added to the lists but is returned as the |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1586 // free chunk. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1587 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1588 i = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1589 i < (num_blk - 1); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1590 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1591 i++) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1592 curFc->setSize(size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1593 // Don't record this as a return in order to try and |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1594 // determine the "returns" from a GC. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1595 _bt.verify_not_unallocated((HeapWord*) fc, size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1596 _indexedFreeList[size].returnChunkAtTail(curFc, false); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1597 _bt.mark_block((HeapWord*)curFc, size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1598 splitBirth(size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1599 // Don't record the initial population of the indexed list |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1600 // as a split birth. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1601 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1602 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1603 // check that the arithmetic was OK above |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1604 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1605 "inconsistency in carving newFc"); |
0 | 1606 curFc->setSize(size); |
1607 _bt.mark_block((HeapWord*)curFc, size); | |
1608 splitBirth(size); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1609 fc = curFc; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1610 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1611 // Return entire block to caller |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1612 fc = newFc; |
0 | 1613 } |
1614 } | |
1615 } | |
1616 } else { | |
1617 // Get a free chunk from the free chunk dictionary to be returned to | |
1618 // replenish the indexed free list. | |
1619 fc = getChunkFromDictionaryExact(size); | |
1620 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1621 // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk"); |
0 | 1622 return fc; |
1623 } | |
1624 | |
1625 FreeChunk* | |
1626 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) { | |
1627 assert_locked(); | |
1628 FreeChunk* fc = _dictionary->getChunk(size); | |
1629 if (fc == NULL) { | |
1630 return NULL; | |
1631 } | |
1632 _bt.allocated((HeapWord*)fc, fc->size()); | |
1633 if (fc->size() >= size + MinChunkSize) { | |
1634 fc = splitChunkAndReturnRemainder(fc, size); | |
1635 } | |
1636 assert(fc->size() >= size, "chunk too small"); | |
1637 assert(fc->size() < size + MinChunkSize, "chunk too big"); | |
1638 _bt.verify_single_block((HeapWord*)fc, fc->size()); | |
1639 return fc; | |
1640 } | |
1641 | |
1642 FreeChunk* | |
1643 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) { | |
1644 assert_locked(); | |
1645 FreeChunk* fc = _dictionary->getChunk(size); | |
1646 if (fc == NULL) { | |
1647 return fc; | |
1648 } | |
1649 _bt.allocated((HeapWord*)fc, fc->size()); | |
1650 if (fc->size() == size) { | |
1651 _bt.verify_single_block((HeapWord*)fc, size); | |
1652 return fc; | |
1653 } | |
1654 assert(fc->size() > size, "getChunk() guarantee"); | |
1655 if (fc->size() < size + MinChunkSize) { | |
1656 // Return the chunk to the dictionary and go get a bigger one. | |
1657 returnChunkToDictionary(fc); | |
1658 fc = _dictionary->getChunk(size + MinChunkSize); | |
1659 if (fc == NULL) { | |
1660 return NULL; | |
1661 } | |
1662 _bt.allocated((HeapWord*)fc, fc->size()); | |
1663 } | |
1664 assert(fc->size() >= size + MinChunkSize, "tautology"); | |
1665 fc = splitChunkAndReturnRemainder(fc, size); | |
1666 assert(fc->size() == size, "chunk is wrong size"); | |
1667 _bt.verify_single_block((HeapWord*)fc, size); | |
1668 return fc; | |
1669 } | |
1670 | |
1671 void | |
1672 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) { | |
1673 assert_locked(); | |
1674 | |
1675 size_t size = chunk->size(); | |
1676 _bt.verify_single_block((HeapWord*)chunk, size); | |
1677 // adjust _unallocated_block downward, as necessary | |
1678 _bt.freed((HeapWord*)chunk, size); | |
1679 _dictionary->returnChunk(chunk); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1680 #ifndef PRODUCT |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1681 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1682 TreeChunk::as_TreeChunk(chunk)->list()->verify_stats(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1683 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1684 #endif // PRODUCT |
0 | 1685 } |
1686 | |
1687 void | |
1688 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) { | |
1689 assert_locked(); | |
1690 size_t size = fc->size(); | |
1691 _bt.verify_single_block((HeapWord*) fc, size); | |
1692 _bt.verify_not_unallocated((HeapWord*) fc, size); | |
1693 if (_adaptive_freelists) { | |
1694 _indexedFreeList[size].returnChunkAtTail(fc); | |
1695 } else { | |
1696 _indexedFreeList[size].returnChunkAtHead(fc); | |
1697 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1698 #ifndef PRODUCT |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1699 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1700 _indexedFreeList[size].verify_stats(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1701 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1702 #endif // PRODUCT |
0 | 1703 } |
1704 | |
1705 // Add chunk to end of last block -- if it's the largest | |
1706 // block -- and update BOT and census data. We would | |
1707 // of course have preferred to coalesce it with the | |
1708 // last block, but it's currently less expensive to find the | |
1709 // largest block than it is to find the last. | |
1710 void | |
1711 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats( | |
1712 HeapWord* chunk, size_t size) { | |
1713 // check that the chunk does lie in this space! | |
1714 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!"); | |
1715 // One of the parallel gc task threads may be here | |
1716 // whilst others are allocating. | |
1717 Mutex* lock = NULL; | |
1718 if (ParallelGCThreads != 0) { | |
1719 lock = &_parDictionaryAllocLock; | |
1720 } | |
1721 FreeChunk* ec; | |
1722 { | |
1723 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
1724 ec = dictionary()->findLargestDict(); // get largest block | |
1725 if (ec != NULL && ec->end() == chunk) { | |
1726 // It's a coterminal block - we can coalesce. | |
1727 size_t old_size = ec->size(); | |
1728 coalDeath(old_size); | |
1729 removeChunkFromDictionary(ec); | |
1730 size += old_size; | |
1731 } else { | |
1732 ec = (FreeChunk*)chunk; | |
1733 } | |
1734 } | |
1735 ec->setSize(size); | |
1736 debug_only(ec->mangleFreed(size)); | |
1737 if (size < SmallForDictionary) { | |
1738 lock = _indexedFreeListParLocks[size]; | |
1739 } | |
1740 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
1741 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true); | |
1742 // record the birth under the lock since the recording involves | |
1743 // manipulation of the list on which the chunk lives and | |
1744 // if the chunk is allocated and is the last on the list, | |
1745 // the list can go away. | |
1746 coalBirth(size); | |
1747 } | |
1748 | |
1749 void | |
1750 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk, | |
1751 size_t size) { | |
1752 // check that the chunk does lie in this space! | |
1753 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!"); | |
1754 assert_locked(); | |
1755 _bt.verify_single_block(chunk, size); | |
1756 | |
1757 FreeChunk* fc = (FreeChunk*) chunk; | |
1758 fc->setSize(size); | |
1759 debug_only(fc->mangleFreed(size)); | |
1760 if (size < SmallForDictionary) { | |
1761 returnChunkToFreeList(fc); | |
1762 } else { | |
1763 returnChunkToDictionary(fc); | |
1764 } | |
1765 } | |
1766 | |
1767 void | |
1768 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk, | |
1769 size_t size, bool coalesced) { | |
1770 assert_locked(); | |
1771 assert(chunk != NULL, "null chunk"); | |
1772 if (coalesced) { | |
1773 // repair BOT | |
1774 _bt.single_block(chunk, size); | |
1775 } | |
1776 addChunkToFreeLists(chunk, size); | |
1777 } | |
1778 | |
1779 // We _must_ find the purported chunk on our free lists; | |
1780 // we assert if we don't. | |
1781 void | |
1782 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) { | |
1783 size_t size = fc->size(); | |
1784 assert_locked(); | |
1785 debug_only(verifyFreeLists()); | |
1786 if (size < SmallForDictionary) { | |
1787 removeChunkFromIndexedFreeList(fc); | |
1788 } else { | |
1789 removeChunkFromDictionary(fc); | |
1790 } | |
1791 _bt.verify_single_block((HeapWord*)fc, size); | |
1792 debug_only(verifyFreeLists()); | |
1793 } | |
1794 | |
1795 void | |
1796 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) { | |
1797 size_t size = fc->size(); | |
1798 assert_locked(); | |
1799 assert(fc != NULL, "null chunk"); | |
1800 _bt.verify_single_block((HeapWord*)fc, size); | |
1801 _dictionary->removeChunk(fc); | |
1802 // adjust _unallocated_block upward, as necessary | |
1803 _bt.allocated((HeapWord*)fc, size); | |
1804 } | |
1805 | |
1806 void | |
1807 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) { | |
1808 assert_locked(); | |
1809 size_t size = fc->size(); | |
1810 _bt.verify_single_block((HeapWord*)fc, size); | |
1811 NOT_PRODUCT( | |
1812 if (FLSVerifyIndexTable) { | |
1813 verifyIndexedFreeList(size); | |
1814 } | |
1815 ) | |
1816 _indexedFreeList[size].removeChunk(fc); | |
1817 debug_only(fc->clearNext()); | |
1818 debug_only(fc->clearPrev()); | |
1819 NOT_PRODUCT( | |
1820 if (FLSVerifyIndexTable) { | |
1821 verifyIndexedFreeList(size); | |
1822 } | |
1823 ) | |
1824 } | |
1825 | |
1826 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) { | |
1827 /* A hint is the next larger size that has a surplus. | |
1828 Start search at a size large enough to guarantee that | |
1829 the excess is >= MIN_CHUNK. */ | |
1830 size_t start = align_object_size(numWords + MinChunkSize); | |
1831 if (start < IndexSetSize) { | |
1832 FreeList* it = _indexedFreeList; | |
1833 size_t hint = _indexedFreeList[start].hint(); | |
1834 while (hint < IndexSetSize) { | |
1835 assert(hint % MinObjAlignment == 0, "hint should be aligned"); | |
1836 FreeList *fl = &_indexedFreeList[hint]; | |
1837 if (fl->surplus() > 0 && fl->head() != NULL) { | |
1838 // Found a list with surplus, reset original hint | |
1839 // and split out a free chunk which is returned. | |
1840 _indexedFreeList[start].set_hint(hint); | |
1841 FreeChunk* res = getFromListGreater(fl, numWords); | |
1842 assert(res == NULL || res->isFree(), | |
1843 "Should be returning a free chunk"); | |
1844 return res; | |
1845 } | |
1846 hint = fl->hint(); /* keep looking */ | |
1847 } | |
1848 /* None found. */ | |
1849 it[start].set_hint(IndexSetSize); | |
1850 } | |
1851 return NULL; | |
1852 } | |
1853 | |
1854 /* Requires fl->size >= numWords + MinChunkSize */ | |
1855 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl, | |
1856 size_t numWords) { | |
1857 FreeChunk *curr = fl->head(); | |
1858 size_t oldNumWords = curr->size(); | |
1859 assert(numWords >= MinChunkSize, "Word size is too small"); | |
1860 assert(curr != NULL, "List is empty"); | |
1861 assert(oldNumWords >= numWords + MinChunkSize, | |
1862 "Size of chunks in the list is too small"); | |
1863 | |
1864 fl->removeChunk(curr); | |
1865 // recorded indirectly by splitChunkAndReturnRemainder - | |
1866 // smallSplit(oldNumWords, numWords); | |
1867 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords); | |
1868 // Does anything have to be done for the remainder in terms of | |
1869 // fixing the card table? | |
1870 assert(new_chunk == NULL || new_chunk->isFree(), | |
1871 "Should be returning a free chunk"); | |
1872 return new_chunk; | |
1873 } | |
1874 | |
1875 FreeChunk* | |
1876 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk, | |
1877 size_t new_size) { | |
1878 assert_locked(); | |
1879 size_t size = chunk->size(); | |
1880 assert(size > new_size, "Split from a smaller block?"); | |
1881 assert(is_aligned(chunk), "alignment problem"); | |
1882 assert(size == adjustObjectSize(size), "alignment problem"); | |
1883 size_t rem_size = size - new_size; | |
1884 assert(rem_size == adjustObjectSize(rem_size), "alignment problem"); | |
1885 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum"); | |
1886 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size); | |
1887 assert(is_aligned(ffc), "alignment problem"); | |
1888 ffc->setSize(rem_size); | |
1889 ffc->linkNext(NULL); | |
1890 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. | |
1891 // Above must occur before BOT is updated below. | |
1892 // adjust block offset table | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1893 OrderAccess::storestore(); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1894 assert(chunk->isFree() && ffc->isFree(), "Error"); |
0 | 1895 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size); |
1896 if (rem_size < SmallForDictionary) { | |
1897 bool is_par = (SharedHeap::heap()->n_par_threads() > 0); | |
1898 if (is_par) _indexedFreeListParLocks[rem_size]->lock(); | |
1899 returnChunkToFreeList(ffc); | |
1900 split(size, rem_size); | |
1901 if (is_par) _indexedFreeListParLocks[rem_size]->unlock(); | |
1902 } else { | |
1903 returnChunkToDictionary(ffc); | |
1904 split(size ,rem_size); | |
1905 } | |
1906 chunk->setSize(new_size); | |
1907 return chunk; | |
1908 } | |
1909 | |
1910 void | |
1911 CompactibleFreeListSpace::sweep_completed() { | |
1912 // Now that space is probably plentiful, refill linear | |
1913 // allocation blocks as needed. | |
1914 refillLinearAllocBlocksIfNeeded(); | |
1915 } | |
1916 | |
1917 void | |
1918 CompactibleFreeListSpace::gc_prologue() { | |
1919 assert_locked(); | |
1920 if (PrintFLSStatistics != 0) { | |
1921 gclog_or_tty->print("Before GC:\n"); | |
1922 reportFreeListStatistics(); | |
1923 } | |
1924 refillLinearAllocBlocksIfNeeded(); | |
1925 } | |
1926 | |
1927 void | |
1928 CompactibleFreeListSpace::gc_epilogue() { | |
1929 assert_locked(); | |
1930 if (PrintGCDetails && Verbose && !_adaptive_freelists) { | |
1931 if (_smallLinearAllocBlock._word_size == 0) | |
1932 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure"); | |
1933 } | |
1934 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); | |
1935 _promoInfo.stopTrackingPromotions(); | |
1936 repairLinearAllocationBlocks(); | |
1937 // Print Space's stats | |
1938 if (PrintFLSStatistics != 0) { | |
1939 gclog_or_tty->print("After GC:\n"); | |
1940 reportFreeListStatistics(); | |
1941 } | |
1942 } | |
1943 | |
1944 // Iteration support, mostly delegated from a CMS generation | |
1945 | |
1946 void CompactibleFreeListSpace::save_marks() { | |
1947 // mark the "end" of the used space at the time of this call; | |
1948 // note, however, that promoted objects from this point | |
1949 // on are tracked in the _promoInfo below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1950 set_saved_mark_word(unallocated_block()); |
0 | 1951 // inform allocator that promotions should be tracked. |
1952 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); | |
1953 _promoInfo.startTrackingPromotions(); | |
1954 } | |
1955 | |
1956 bool CompactibleFreeListSpace::no_allocs_since_save_marks() { | |
1957 assert(_promoInfo.tracking(), "No preceding save_marks?"); | |
1777
179464550c7d
6983930: CMS: Various small cleanups ca September 2010
ysr
parents:
1716
diff
changeset
|
1958 assert(SharedHeap::heap()->n_par_threads() == 0, |
179464550c7d
6983930: CMS: Various small cleanups ca September 2010
ysr
parents:
1716
diff
changeset
|
1959 "Shouldn't be called if using parallel gc."); |
0 | 1960 return _promoInfo.noPromotions(); |
1961 } | |
1962 | |
1963 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
1964 \ | |
1965 void CompactibleFreeListSpace:: \ | |
1966 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ | |
1967 assert(SharedHeap::heap()->n_par_threads() == 0, \ | |
1968 "Shouldn't be called (yet) during parallel part of gc."); \ | |
1969 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \ | |
1970 /* \ | |
1971 * This also restores any displaced headers and removes the elements from \ | |
1972 * the iteration set as they are processed, so that we have a clean slate \ | |
1973 * at the end of the iteration. Note, thus, that if new objects are \ | |
1974 * promoted as a result of the iteration they are iterated over as well. \ | |
1975 */ \ | |
1976 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \ | |
1977 } | |
1978 | |
1979 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN) | |
1980 | |
1981 | |
1982 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) { | |
1983 // ugghh... how would one do this efficiently for a non-contiguous space? | |
1984 guarantee(false, "NYI"); | |
1985 } | |
1986 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
1987 bool CompactibleFreeListSpace::linearAllocationWouldFail() const { |
0 | 1988 return _smallLinearAllocBlock._word_size == 0; |
1989 } | |
1990 | |
1991 void CompactibleFreeListSpace::repairLinearAllocationBlocks() { | |
1992 // Fix up linear allocation blocks to look like free blocks | |
1993 repairLinearAllocBlock(&_smallLinearAllocBlock); | |
1994 } | |
1995 | |
1996 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) { | |
1997 assert_locked(); | |
1998 if (blk->_ptr != NULL) { | |
1999 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize, | |
2000 "Minimum block size requirement"); | |
2001 FreeChunk* fc = (FreeChunk*)(blk->_ptr); | |
2002 fc->setSize(blk->_word_size); | |
2003 fc->linkPrev(NULL); // mark as free | |
2004 fc->dontCoalesce(); | |
2005 assert(fc->isFree(), "just marked it free"); | |
2006 assert(fc->cantCoalesce(), "just marked it uncoalescable"); | |
2007 } | |
2008 } | |
2009 | |
2010 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() { | |
2011 assert_locked(); | |
2012 if (_smallLinearAllocBlock._ptr == NULL) { | |
2013 assert(_smallLinearAllocBlock._word_size == 0, | |
2014 "Size of linAB should be zero if the ptr is NULL"); | |
2015 // Reset the linAB refill and allocation size limit. | |
2016 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc); | |
2017 } | |
2018 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock); | |
2019 } | |
2020 | |
2021 void | |
2022 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) { | |
2023 assert_locked(); | |
2024 assert((blk->_ptr == NULL && blk->_word_size == 0) || | |
2025 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize), | |
2026 "blk invariant"); | |
2027 if (blk->_ptr == NULL) { | |
2028 refillLinearAllocBlock(blk); | |
2029 } | |
2030 if (PrintMiscellaneous && Verbose) { | |
2031 if (blk->_word_size == 0) { | |
2032 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure"); | |
2033 } | |
2034 } | |
2035 } | |
2036 | |
2037 void | |
2038 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) { | |
2039 assert_locked(); | |
2040 assert(blk->_word_size == 0 && blk->_ptr == NULL, | |
2041 "linear allocation block should be empty"); | |
2042 FreeChunk* fc; | |
2043 if (blk->_refillSize < SmallForDictionary && | |
2044 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) { | |
2045 // A linAB's strategy might be to use small sizes to reduce | |
2046 // fragmentation but still get the benefits of allocation from a | |
2047 // linAB. | |
2048 } else { | |
2049 fc = getChunkFromDictionary(blk->_refillSize); | |
2050 } | |
2051 if (fc != NULL) { | |
2052 blk->_ptr = (HeapWord*)fc; | |
2053 blk->_word_size = fc->size(); | |
2054 fc->dontCoalesce(); // to prevent sweeper from sweeping us up | |
2055 } | |
2056 } | |
2057 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2058 // Support for concurrent collection policy decisions. |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2059 bool CompactibleFreeListSpace::should_concurrent_collect() const { |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2060 // In the future we might want to add in frgamentation stats -- |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2061 // including erosion of the "mountain" into this decision as well. |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2062 return !adaptive_freelists() && linearAllocationWouldFail(); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2063 } |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2064 |
0 | 2065 // Support for compaction |
2066 | |
2067 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { | |
2068 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); | |
2069 // prepare_for_compaction() uses the space between live objects | |
2070 // so that later phase can skip dead space quickly. So verification | |
2071 // of the free lists doesn't work after. | |
2072 } | |
2073 | |
2074 #define obj_size(q) adjustObjectSize(oop(q)->size()) | |
2075 #define adjust_obj_size(s) adjustObjectSize(s) | |
2076 | |
2077 void CompactibleFreeListSpace::adjust_pointers() { | |
2078 // In other versions of adjust_pointers(), a bail out | |
2079 // based on the amount of live data in the generation | |
2080 // (i.e., if 0, bail out) may be used. | |
2081 // Cannot test used() == 0 here because the free lists have already | |
2082 // been mangled by the compaction. | |
2083 | |
2084 SCAN_AND_ADJUST_POINTERS(adjust_obj_size); | |
2085 // See note about verification in prepare_for_compaction(). | |
2086 } | |
2087 | |
2088 void CompactibleFreeListSpace::compact() { | |
2089 SCAN_AND_COMPACT(obj_size); | |
2090 } | |
2091 | |
2092 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] | |
2093 // where fbs is free block sizes | |
2094 double CompactibleFreeListSpace::flsFrag() const { | |
2095 size_t itabFree = totalSizeInIndexedFreeLists(); | |
2096 double frag = 0.0; | |
2097 size_t i; | |
2098 | |
2099 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2100 double sz = i; | |
2101 frag += _indexedFreeList[i].count() * (sz * sz); | |
2102 } | |
2103 | |
2104 double totFree = itabFree + | |
2105 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())); | |
2106 if (totFree > 0) { | |
2107 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) / | |
2108 (totFree * totFree)); | |
2109 frag = (double)1.0 - frag; | |
2110 } else { | |
2111 assert(frag == 0.0, "Follows from totFree == 0"); | |
2112 } | |
2113 return frag; | |
2114 } | |
2115 | |
2116 void CompactibleFreeListSpace::beginSweepFLCensus( | |
2117 float inter_sweep_current, | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2118 float inter_sweep_estimate, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2119 float intra_sweep_estimate) { |
0 | 2120 assert_locked(); |
2121 size_t i; | |
2122 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2123 FreeList* fl = &_indexedFreeList[i]; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2124 if (PrintFLSStatistics > 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2125 gclog_or_tty->print("size[%d] : ", i); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2126 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2127 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2128 fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent)); |
0 | 2129 fl->set_beforeSweep(fl->count()); |
2130 fl->set_bfrSurp(fl->surplus()); | |
2131 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2132 _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent, |
0 | 2133 inter_sweep_current, |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2134 inter_sweep_estimate, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2135 intra_sweep_estimate); |
0 | 2136 } |
2137 | |
2138 void CompactibleFreeListSpace::setFLSurplus() { | |
2139 assert_locked(); | |
2140 size_t i; | |
2141 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2142 FreeList *fl = &_indexedFreeList[i]; | |
2143 fl->set_surplus(fl->count() - | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2144 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent)); |
0 | 2145 } |
2146 } | |
2147 | |
2148 void CompactibleFreeListSpace::setFLHints() { | |
2149 assert_locked(); | |
2150 size_t i; | |
2151 size_t h = IndexSetSize; | |
2152 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { | |
2153 FreeList *fl = &_indexedFreeList[i]; | |
2154 fl->set_hint(h); | |
2155 if (fl->surplus() > 0) { | |
2156 h = i; | |
2157 } | |
2158 } | |
2159 } | |
2160 | |
2161 void CompactibleFreeListSpace::clearFLCensus() { | |
2162 assert_locked(); | |
2163 int i; | |
2164 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2165 FreeList *fl = &_indexedFreeList[i]; | |
2166 fl->set_prevSweep(fl->count()); | |
2167 fl->set_coalBirths(0); | |
2168 fl->set_coalDeaths(0); | |
2169 fl->set_splitBirths(0); | |
2170 fl->set_splitDeaths(0); | |
2171 } | |
2172 } | |
2173 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2174 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2175 if (PrintFLSStatistics > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2176 HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2177 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2178 largestAddr); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2179 } |
0 | 2180 setFLSurplus(); |
2181 setFLHints(); | |
2182 if (PrintGC && PrintFLSCensus > 0) { | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2183 printFLCensus(sweep_count); |
0 | 2184 } |
2185 clearFLCensus(); | |
2186 assert_locked(); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2187 _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent); |
0 | 2188 } |
2189 | |
2190 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) { | |
2191 if (size < SmallForDictionary) { | |
2192 FreeList *fl = &_indexedFreeList[size]; | |
2193 return (fl->coalDesired() < 0) || | |
2194 ((int)fl->count() > fl->coalDesired()); | |
2195 } else { | |
2196 return dictionary()->coalDictOverPopulated(size); | |
2197 } | |
2198 } | |
2199 | |
2200 void CompactibleFreeListSpace::smallCoalBirth(size_t size) { | |
2201 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2202 FreeList *fl = &_indexedFreeList[size]; | |
2203 fl->increment_coalBirths(); | |
2204 fl->increment_surplus(); | |
2205 } | |
2206 | |
2207 void CompactibleFreeListSpace::smallCoalDeath(size_t size) { | |
2208 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2209 FreeList *fl = &_indexedFreeList[size]; | |
2210 fl->increment_coalDeaths(); | |
2211 fl->decrement_surplus(); | |
2212 } | |
2213 | |
2214 void CompactibleFreeListSpace::coalBirth(size_t size) { | |
2215 if (size < SmallForDictionary) { | |
2216 smallCoalBirth(size); | |
2217 } else { | |
2218 dictionary()->dictCensusUpdate(size, | |
2219 false /* split */, | |
2220 true /* birth */); | |
2221 } | |
2222 } | |
2223 | |
2224 void CompactibleFreeListSpace::coalDeath(size_t size) { | |
2225 if(size < SmallForDictionary) { | |
2226 smallCoalDeath(size); | |
2227 } else { | |
2228 dictionary()->dictCensusUpdate(size, | |
2229 false /* split */, | |
2230 false /* birth */); | |
2231 } | |
2232 } | |
2233 | |
2234 void CompactibleFreeListSpace::smallSplitBirth(size_t size) { | |
2235 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2236 FreeList *fl = &_indexedFreeList[size]; | |
2237 fl->increment_splitBirths(); | |
2238 fl->increment_surplus(); | |
2239 } | |
2240 | |
2241 void CompactibleFreeListSpace::smallSplitDeath(size_t size) { | |
2242 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2243 FreeList *fl = &_indexedFreeList[size]; | |
2244 fl->increment_splitDeaths(); | |
2245 fl->decrement_surplus(); | |
2246 } | |
2247 | |
2248 void CompactibleFreeListSpace::splitBirth(size_t size) { | |
2249 if (size < SmallForDictionary) { | |
2250 smallSplitBirth(size); | |
2251 } else { | |
2252 dictionary()->dictCensusUpdate(size, | |
2253 true /* split */, | |
2254 true /* birth */); | |
2255 } | |
2256 } | |
2257 | |
2258 void CompactibleFreeListSpace::splitDeath(size_t size) { | |
2259 if (size < SmallForDictionary) { | |
2260 smallSplitDeath(size); | |
2261 } else { | |
2262 dictionary()->dictCensusUpdate(size, | |
2263 true /* split */, | |
2264 false /* birth */); | |
2265 } | |
2266 } | |
2267 | |
2268 void CompactibleFreeListSpace::split(size_t from, size_t to1) { | |
2269 size_t to2 = from - to1; | |
2270 splitDeath(from); | |
2271 splitBirth(to1); | |
2272 splitBirth(to2); | |
2273 } | |
2274 | |
2275 void CompactibleFreeListSpace::print() const { | |
1952
4df7f8cba524
6996613: CompactibleFreeListSpace::print should call CompactibleFreeListSpace::print_on, not Space::print_on
ysr
parents:
1951
diff
changeset
|
2276 print_on(tty); |
0 | 2277 } |
2278 | |
2279 void CompactibleFreeListSpace::prepare_for_verify() { | |
2280 assert_locked(); | |
2281 repairLinearAllocationBlocks(); | |
2282 // Verify that the SpoolBlocks look like free blocks of | |
2283 // appropriate sizes... To be done ... | |
2284 } | |
2285 | |
2286 class VerifyAllBlksClosure: public BlkClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2287 private: |
0 | 2288 const CompactibleFreeListSpace* _sp; |
2289 const MemRegion _span; | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2290 HeapWord* _last_addr; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2291 size_t _last_size; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2292 bool _last_was_obj; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2293 bool _last_was_live; |
0 | 2294 |
2295 public: | |
2296 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp, | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2297 MemRegion span) : _sp(sp), _span(span), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2298 _last_addr(NULL), _last_size(0), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2299 _last_was_obj(false), _last_was_live(false) { } |
0 | 2300 |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2301 virtual size_t do_blk(HeapWord* addr) { |
0 | 2302 size_t res; |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2303 bool was_obj = false; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2304 bool was_live = false; |
0 | 2305 if (_sp->block_is_obj(addr)) { |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2306 was_obj = true; |
0 | 2307 oop p = oop(addr); |
2308 guarantee(p->is_oop(), "Should be an oop"); | |
2309 res = _sp->adjustObjectSize(p->size()); | |
2310 if (_sp->obj_is_alive(addr)) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2311 was_live = true; |
0 | 2312 p->verify(); |
2313 } | |
2314 } else { | |
2315 FreeChunk* fc = (FreeChunk*)addr; | |
2316 res = fc->size(); | |
2317 if (FLSVerifyLists && !fc->cantCoalesce()) { | |
2318 guarantee(_sp->verifyChunkInFreeLists(fc), | |
2319 "Chunk should be on a free list"); | |
2320 } | |
2321 } | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2322 if (res == 0) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2323 gclog_or_tty->print_cr("Livelock: no rank reduction!"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2324 gclog_or_tty->print_cr( |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2325 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n" |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2326 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n", |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2327 addr, res, was_obj ?"true":"false", was_live ?"true":"false", |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2328 _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2329 _sp->print_on(gclog_or_tty); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2330 guarantee(false, "Seppuku!"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2331 } |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2332 _last_addr = addr; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2333 _last_size = res; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2334 _last_was_obj = was_obj; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2335 _last_was_live = was_live; |
0 | 2336 return res; |
2337 } | |
2338 }; | |
2339 | |
2340 class VerifyAllOopsClosure: public OopClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2341 private: |
0 | 2342 const CMSCollector* _collector; |
2343 const CompactibleFreeListSpace* _sp; | |
2344 const MemRegion _span; | |
2345 const bool _past_remark; | |
2346 const CMSBitMap* _bit_map; | |
2347 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2348 protected: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2349 void do_oop(void* p, oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2350 if (_span.contains(obj)) { // the interior oop points into CMS heap |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2351 if (!_span.contains(p)) { // reference from outside CMS heap |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2352 // Should be a valid object; the first disjunct below allows |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2353 // us to sidestep an assertion in block_is_obj() that insists |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2354 // that p be in _sp. Note that several generations (and spaces) |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2355 // are spanned by _span (CMS heap) above. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2356 guarantee(!_sp->is_in_reserved(obj) || |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2357 _sp->block_is_obj((HeapWord*)obj), |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2358 "Should be an object"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2359 guarantee(obj->is_oop(), "Should be an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2360 obj->verify(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2361 if (_past_remark) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2362 // Remark has been completed, the object should be marked |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2363 _bit_map->isMarked((HeapWord*)obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2364 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2365 } else { // reference within CMS heap |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2366 if (_past_remark) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2367 // Remark has been completed -- so the referent should have |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2368 // been marked, if referring object is. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2369 if (_bit_map->isMarked(_collector->block_start(p))) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2370 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2371 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2372 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2373 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2374 } else if (_sp->is_in_reserved(p)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2375 // the reference is from FLS, and points out of FLS |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2376 guarantee(obj->is_oop(), "Should be an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2377 obj->verify(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2378 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2379 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2380 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2381 template <class T> void do_oop_work(T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2382 T heap_oop = oopDesc::load_heap_oop(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2383 if (!oopDesc::is_null(heap_oop)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2384 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2385 do_oop(p, obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2386 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2387 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2388 |
0 | 2389 public: |
2390 VerifyAllOopsClosure(const CMSCollector* collector, | |
2391 const CompactibleFreeListSpace* sp, MemRegion span, | |
2392 bool past_remark, CMSBitMap* bit_map) : | |
2393 OopClosure(), _collector(collector), _sp(sp), _span(span), | |
2394 _past_remark(past_remark), _bit_map(bit_map) { } | |
2395 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2396 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2397 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); } |
0 | 2398 }; |
2399 | |
2400 void CompactibleFreeListSpace::verify(bool ignored) const { | |
2401 assert_lock_strong(&_freelistLock); | |
2402 verify_objects_initialized(); | |
2403 MemRegion span = _collector->_span; | |
2404 bool past_remark = (_collector->abstract_state() == | |
2405 CMSCollector::Sweeping); | |
2406 | |
2407 ResourceMark rm; | |
2408 HandleMark hm; | |
2409 | |
2410 // Check integrity of CFL data structures | |
2411 _promoInfo.verify(); | |
2412 _dictionary->verify(); | |
2413 if (FLSVerifyIndexTable) { | |
2414 verifyIndexedFreeLists(); | |
2415 } | |
2416 // Check integrity of all objects and free blocks in space | |
2417 { | |
2418 VerifyAllBlksClosure cl(this, span); | |
2419 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const | |
2420 } | |
2421 // Check that all references in the heap to FLS | |
2422 // are to valid objects in FLS or that references in | |
2423 // FLS are to valid objects elsewhere in the heap | |
2424 if (FLSVerifyAllHeapReferences) | |
2425 { | |
2426 VerifyAllOopsClosure cl(_collector, this, span, past_remark, | |
2427 _collector->markBitMap()); | |
2428 CollectedHeap* ch = Universe::heap(); | |
2429 ch->oop_iterate(&cl); // all oops in generations | |
2430 ch->permanent_oop_iterate(&cl); // all oops in perm gen | |
2431 } | |
2432 | |
2433 if (VerifyObjectStartArray) { | |
2434 // Verify the block offset table | |
2435 _bt.verify(); | |
2436 } | |
2437 } | |
2438 | |
2439 #ifndef PRODUCT | |
2440 void CompactibleFreeListSpace::verifyFreeLists() const { | |
2441 if (FLSVerifyLists) { | |
2442 _dictionary->verify(); | |
2443 verifyIndexedFreeLists(); | |
2444 } else { | |
2445 if (FLSVerifyDictionary) { | |
2446 _dictionary->verify(); | |
2447 } | |
2448 if (FLSVerifyIndexTable) { | |
2449 verifyIndexedFreeLists(); | |
2450 } | |
2451 } | |
2452 } | |
2453 #endif | |
2454 | |
2455 void CompactibleFreeListSpace::verifyIndexedFreeLists() const { | |
2456 size_t i = 0; | |
2457 for (; i < MinChunkSize; i++) { | |
2458 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL"); | |
2459 } | |
2460 for (; i < IndexSetSize; i++) { | |
2461 verifyIndexedFreeList(i); | |
2462 } | |
2463 } | |
2464 | |
2465 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2466 FreeChunk* fc = _indexedFreeList[size].head(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2467 FreeChunk* tail = _indexedFreeList[size].tail(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2468 size_t num = _indexedFreeList[size].count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2469 size_t n = 0; |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
2470 guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2471 for (; fc != NULL; fc = fc->next(), n++) { |
0 | 2472 guarantee(fc->size() == size, "Size inconsistency"); |
2473 guarantee(fc->isFree(), "!free?"); | |
2474 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list"); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2475 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail"); |
0 | 2476 } |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2477 guarantee(n == num, "Incorrect count"); |
0 | 2478 } |
2479 | |
2480 #ifndef PRODUCT | |
2481 void CompactibleFreeListSpace::checkFreeListConsistency() const { | |
2482 assert(_dictionary->minSize() <= IndexSetSize, | |
2483 "Some sizes can't be allocated without recourse to" | |
2484 " linear allocation buffers"); | |
2485 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk), | |
2486 "else MIN_TREE_CHUNK_SIZE is wrong"); | |
2487 assert((IndexSetStride == 2 && IndexSetStart == 2) || | |
2488 (IndexSetStride == 1 && IndexSetStart == 1), "just checking"); | |
2489 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0), | |
2490 "Some for-loops may be incorrectly initialized"); | |
2491 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1), | |
2492 "For-loops that iterate over IndexSet with stride 2 may be wrong"); | |
2493 } | |
2494 #endif | |
2495 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2496 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const { |
0 | 2497 assert_lock_strong(&_freelistLock); |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2498 FreeList total; |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2499 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2500 FreeList::print_labels_on(gclog_or_tty, "size"); |
0 | 2501 size_t totalFree = 0; |
2502 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2503 const FreeList *fl = &_indexedFreeList[i]; | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2504 totalFree += fl->count() * fl->size(); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2505 if (i % (40*IndexSetStride) == 0) { |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2506 FreeList::print_labels_on(gclog_or_tty, "size"); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2507 } |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2508 fl->print_on(gclog_or_tty); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2509 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2510 total.set_surplus( total.surplus() + fl->surplus() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2511 total.set_desired( total.desired() + fl->desired() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2512 total.set_prevSweep( total.prevSweep() + fl->prevSweep() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2513 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep()); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2514 total.set_count( total.count() + fl->count() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2515 total.set_coalBirths( total.coalBirths() + fl->coalBirths() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2516 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2517 total.set_splitBirths(total.splitBirths() + fl->splitBirths()); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2518 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths()); |
0 | 2519 } |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2520 total.print_on(gclog_or_tty, "TOTAL"); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2521 gclog_or_tty->print_cr("Total free in indexed lists " |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2522 SIZE_FORMAT " words", totalFree); |
0 | 2523 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n", |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2524 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/ |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2525 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0), |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2526 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0)); |
0 | 2527 _dictionary->printDictCensus(); |
2528 } | |
2529 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2530 /////////////////////////////////////////////////////////////////////////// |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2531 // CFLS_LAB |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2532 /////////////////////////////////////////////////////////////////////////// |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2533 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2534 #define VECTOR_257(x) \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2535 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2536 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2537 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2538 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2539 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2540 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2541 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2542 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2543 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2544 x } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2545 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2546 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2547 // OldPLABSize, whose static default is different; if overridden at the |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2548 // command-line, this will get reinitialized via a call to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2549 // modify_initialization() below. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2550 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] = |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2551 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim)); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2552 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2553 int CFLS_LAB::_global_num_workers[] = VECTOR_257(0); |
0 | 2554 |
2555 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) : | |
2556 _cfls(cfls) | |
2557 { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2558 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above"); |
0 | 2559 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
2560 i < CompactibleFreeListSpace::IndexSetSize; | |
2561 i += CompactibleFreeListSpace::IndexSetStride) { | |
2562 _indexedFreeList[i].set_size(i); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2563 _num_blocks[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2564 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2565 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2566 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2567 static bool _CFLS_LAB_modified = false; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2568 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2569 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2570 assert(!_CFLS_LAB_modified, "Call only once"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2571 _CFLS_LAB_modified = true; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2572 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2573 i < CompactibleFreeListSpace::IndexSetSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2574 i += CompactibleFreeListSpace::IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2575 _blocks_to_claim[i].modify(n, wt, true /* force */); |
0 | 2576 } |
2577 } | |
2578 | |
2579 HeapWord* CFLS_LAB::alloc(size_t word_sz) { | |
2580 FreeChunk* res; | |
1777
179464550c7d
6983930: CMS: Various small cleanups ca September 2010
ysr
parents:
1716
diff
changeset
|
2581 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error"); |
0 | 2582 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) { |
2583 // This locking manages sync with other large object allocations. | |
2584 MutexLockerEx x(_cfls->parDictionaryAllocLock(), | |
2585 Mutex::_no_safepoint_check_flag); | |
2586 res = _cfls->getChunkFromDictionaryExact(word_sz); | |
2587 if (res == NULL) return NULL; | |
2588 } else { | |
2589 FreeList* fl = &_indexedFreeList[word_sz]; | |
2590 if (fl->count() == 0) { | |
2591 // Attempt to refill this local free list. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2592 get_from_global_pool(word_sz, fl); |
0 | 2593 // If it didn't work, give up. |
2594 if (fl->count() == 0) return NULL; | |
2595 } | |
2596 res = fl->getChunkAtHead(); | |
2597 assert(res != NULL, "Why was count non-zero?"); | |
2598 } | |
2599 res->markNotFree(); | |
2600 assert(!res->isFree(), "shouldn't be marked free"); | |
187 | 2601 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized"); |
0 | 2602 // mangle a just allocated object with a distinct pattern. |
2603 debug_only(res->mangleAllocated(word_sz)); | |
2604 return (HeapWord*)res; | |
2605 } | |
2606 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2607 // Get a chunk of blocks of the right size and update related |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2608 // book-keeping stats |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2609 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2610 // Get the #blocks we want to claim |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2611 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2612 assert(n_blks > 0, "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2613 assert(ResizePLAB || n_blks == OldPLABSize, "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2614 // In some cases, when the application has a phase change, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2615 // there may be a sudden and sharp shift in the object survival |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2616 // profile, and updating the counts at the end of a scavenge |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2617 // may not be quick enough, giving rise to large scavenge pauses |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2618 // during these phase changes. It is beneficial to detect such |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2619 // changes on-the-fly during a scavenge and avoid such a phase-change |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2620 // pothole. The following code is a heuristic attempt to do that. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2621 // It is protected by a product flag until we have gained |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2622 // enough experience with this heuristic and fine-tuned its behaviour. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2623 // WARNING: This might increase fragmentation if we overreact to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2624 // small spikes, so some kind of historical smoothing based on |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2625 // previous experience with the greater reactivity might be useful. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2626 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2627 // default. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2628 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2629 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2630 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2631 n_blks = MIN2(n_blks, CMSOldPLABMax); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2632 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2633 assert(n_blks > 0, "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2634 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2635 // Update stats table entry for this block size |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2636 _num_blocks[word_sz] += fl->count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2637 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2638 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2639 void CFLS_LAB::compute_desired_plab_size() { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2640 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
0 | 2641 i < CompactibleFreeListSpace::IndexSetSize; |
2642 i += CompactibleFreeListSpace::IndexSetStride) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2643 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2644 "Counter inconsistency"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2645 if (_global_num_workers[i] > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2646 // Need to smooth wrt historical average |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2647 if (ResizeOldPLAB) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2648 _blocks_to_claim[i].sample( |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2649 MAX2((size_t)CMSOldPLABMin, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2650 MIN2((size_t)CMSOldPLABMax, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2651 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills)))); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2652 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2653 // Reset counters for next round |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2654 _global_num_workers[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2655 _global_num_blocks[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2656 if (PrintOldPLAB) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2657 gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2658 } |
0 | 2659 } |
2660 } | |
2661 } | |
2662 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2663 void CFLS_LAB::retire(int tid) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2664 // We run this single threaded with the world stopped; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2665 // so no need for locks and such. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2666 #define CFLS_LAB_PARALLEL_ACCESS 0 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2667 NOT_PRODUCT(Thread* t = Thread::current();) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2668 assert(Thread::current()->is_VM_thread(), "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2669 assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2670 "Will access to uninitialized slot below"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2671 #if CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2672 for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2673 i > 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2674 i -= CompactibleFreeListSpace::IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2675 #else // CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2676 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2677 i < CompactibleFreeListSpace::IndexSetSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2678 i += CompactibleFreeListSpace::IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2679 #endif // !CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2680 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2681 "Can't retire more than what we obtained"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2682 if (_num_blocks[i] > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2683 size_t num_retire = _indexedFreeList[i].count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2684 assert(_num_blocks[i] > num_retire, "Should have used at least one"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2685 { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2686 #if CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2687 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i], |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2688 Mutex::_no_safepoint_check_flag); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2689 #endif // CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2690 // Update globals stats for num_blocks used |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2691 _global_num_blocks[i] += (_num_blocks[i] - num_retire); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2692 _global_num_workers[i]++; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2693 assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2694 if (num_retire > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2695 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2696 // Reset this list. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2697 _indexedFreeList[i] = FreeList(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2698 _indexedFreeList[i].set_size(i); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2699 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2700 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2701 if (PrintOldPLAB) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2702 gclog_or_tty->print_cr("%d[%d]: %d/%d/%d", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2703 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2704 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2705 // Reset stats for next round |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2706 _num_blocks[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2707 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2708 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2709 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2710 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2711 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) { |
0 | 2712 assert(fl->count() == 0, "Precondition."); |
2713 assert(word_sz < CompactibleFreeListSpace::IndexSetSize, | |
2714 "Precondition"); | |
2715 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2716 // We'll try all multiples of word_sz in the indexed set, starting with |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2717 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2718 // then try getting a big chunk and splitting it. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2719 { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2720 bool found; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2721 int k; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2722 size_t cur_sz; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2723 for (k = 1, cur_sz = k * word_sz, found = false; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2724 (cur_sz < CompactibleFreeListSpace::IndexSetSize) && |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2725 (CMSSplitIndexedFreeListBlocks || k <= 1); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2726 k++, cur_sz = k * word_sz) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2727 FreeList fl_for_cur_sz; // Empty. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2728 fl_for_cur_sz.set_size(cur_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2729 { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2730 MutexLockerEx x(_indexedFreeListParLocks[cur_sz], |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2731 Mutex::_no_safepoint_check_flag); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2732 FreeList* gfl = &_indexedFreeList[cur_sz]; |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2733 if (gfl->count() != 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2734 // nn is the number of chunks of size cur_sz that |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2735 // we'd need to split k-ways each, in order to create |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2736 // "n" chunks of size word_sz each. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2737 const size_t nn = MAX2(n/k, (size_t)1); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2738 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2739 found = true; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2740 if (k > 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2741 // Update split death stats for the cur_sz-size blocks list: |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2742 // we increment the split death count by the number of blocks |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2743 // we just took from the cur_sz-size blocks list and which |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2744 // we will be splitting below. |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2745 ssize_t deaths = gfl->splitDeaths() + |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2746 fl_for_cur_sz.count(); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2747 gfl->set_splitDeaths(deaths); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2748 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2749 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2750 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2751 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2752 if (found) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2753 if (k == 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2754 fl->prepend(&fl_for_cur_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2755 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2756 // Divide each block on fl_for_cur_sz up k ways. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2757 FreeChunk* fc; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2758 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2759 // Must do this in reverse order, so that anybody attempting to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2760 // access the main chunk sees it as a single free block until we |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2761 // change it. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2762 size_t fc_size = fc->size(); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2763 assert(fc->isFree(), "Error"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2764 for (int i = k-1; i >= 0; i--) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2765 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2766 assert((i != 0) || |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2767 ((fc == ffc) && ffc->isFree() && |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2768 (ffc->size() == k*word_sz) && (fc_size == word_sz)), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2769 "Counting error"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2770 ffc->setSize(word_sz); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2771 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2772 ffc->linkNext(NULL); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2773 // Above must occur before BOT is updated below. |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2774 OrderAccess::storestore(); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2775 // splitting from the right, fc_size == i * word_sz |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2776 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2777 fc_size -= word_sz; |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2778 assert(fc_size == i*word_sz, "Error"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2779 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2780 _bt.verify_single_block((HeapWord*)fc, fc_size); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2781 _bt.verify_single_block((HeapWord*)ffc, word_sz); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2782 // Push this on "fl". |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2783 fl->returnChunkAtHead(ffc); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2784 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2785 // TRAP |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2786 assert(fl->tail()->next() == NULL, "List invariant."); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2787 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2788 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2789 // Update birth stats for this block size. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2790 size_t num = fl->count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2791 MutexLockerEx x(_indexedFreeListParLocks[word_sz], |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2792 Mutex::_no_safepoint_check_flag); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2793 ssize_t births = _indexedFreeList[word_sz].splitBirths() + num; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2794 _indexedFreeList[word_sz].set_splitBirths(births); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2795 return; |
0 | 2796 } |
2797 } | |
2798 } | |
2799 // Otherwise, we'll split a block from the dictionary. | |
2800 FreeChunk* fc = NULL; | |
2801 FreeChunk* rem_fc = NULL; | |
2802 size_t rem; | |
2803 { | |
2804 MutexLockerEx x(parDictionaryAllocLock(), | |
2805 Mutex::_no_safepoint_check_flag); | |
2806 while (n > 0) { | |
2807 fc = dictionary()->getChunk(MAX2(n * word_sz, | |
2808 _dictionary->minSize()), | |
2809 FreeBlockDictionary::atLeast); | |
2810 if (fc != NULL) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2811 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk |
0 | 2812 dictionary()->dictCensusUpdate(fc->size(), |
2813 true /*split*/, | |
2814 false /*birth*/); | |
2815 break; | |
2816 } else { | |
2817 n--; | |
2818 } | |
2819 } | |
2820 if (fc == NULL) return; | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2821 // Otherwise, split up that block. |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2822 assert((ssize_t)n >= 1, "Control point invariant"); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2823 assert(fc->isFree(), "Error: should be a free block"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2824 _bt.verify_single_block((HeapWord*)fc, fc->size()); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2825 const size_t nn = fc->size() / word_sz; |
0 | 2826 n = MIN2(nn, n); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2827 assert((ssize_t)n >= 1, "Control point invariant"); |
0 | 2828 rem = fc->size() - n * word_sz; |
2829 // If there is a remainder, and it's too small, allocate one fewer. | |
2830 if (rem > 0 && rem < MinChunkSize) { | |
2831 n--; rem += word_sz; | |
2832 } | |
1148
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2833 // Note that at this point we may have n == 0. |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2834 assert((ssize_t)n >= 0, "Control point invariant"); |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2835 |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2836 // If n is 0, the chunk fc that was found is not large |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2837 // enough to leave a viable remainder. We are unable to |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2838 // allocate even one block. Return fc to the |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2839 // dictionary and return, leaving "fl" empty. |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2840 if (n == 0) { |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2841 returnChunkToDictionary(fc); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2842 assert(fl->count() == 0, "We never allocated any blocks"); |
1148
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2843 return; |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2844 } |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2845 |
0 | 2846 // First return the remainder, if any. |
2847 // Note that we hold the lock until we decide if we're going to give | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2848 // back the remainder to the dictionary, since a concurrent allocation |
0 | 2849 // may otherwise see the heap as empty. (We're willing to take that |
2850 // hit if the block is a small block.) | |
2851 if (rem > 0) { | |
2852 size_t prefix_size = n * word_sz; | |
2853 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size); | |
2854 rem_fc->setSize(rem); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2855 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
0 | 2856 rem_fc->linkNext(NULL); |
2857 // Above must occur before BOT is updated below. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2858 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error"); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2859 OrderAccess::storestore(); |
0 | 2860 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2861 assert(fc->isFree(), "Error"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2862 fc->setSize(prefix_size); |
0 | 2863 if (rem >= IndexSetSize) { |
2864 returnChunkToDictionary(rem_fc); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2865 dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/); |
0 | 2866 rem_fc = NULL; |
2867 } | |
2868 // Otherwise, return it to the small list below. | |
2869 } | |
2870 } | |
2871 if (rem_fc != NULL) { | |
2872 MutexLockerEx x(_indexedFreeListParLocks[rem], | |
2873 Mutex::_no_safepoint_check_flag); | |
2874 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size()); | |
2875 _indexedFreeList[rem].returnChunkAtHead(rem_fc); | |
2876 smallSplitBirth(rem); | |
2877 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2878 assert((ssize_t)n > 0 && fc != NULL, "Consistency"); |
0 | 2879 // Now do the splitting up. |
2880 // Must do this in reverse order, so that anybody attempting to | |
2881 // access the main chunk sees it as a single free block until we | |
2882 // change it. | |
2883 size_t fc_size = n * word_sz; | |
2884 // All but first chunk in this loop | |
2885 for (ssize_t i = n-1; i > 0; i--) { | |
2886 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); | |
2887 ffc->setSize(word_sz); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2888 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
0 | 2889 ffc->linkNext(NULL); |
2890 // Above must occur before BOT is updated below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2891 OrderAccess::storestore(); |
0 | 2892 // splitting from the right, fc_size == (n - i + 1) * wordsize |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2893 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */); |
0 | 2894 fc_size -= word_sz; |
2895 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); | |
2896 _bt.verify_single_block((HeapWord*)ffc, ffc->size()); | |
2897 _bt.verify_single_block((HeapWord*)fc, fc_size); | |
2898 // Push this on "fl". | |
2899 fl->returnChunkAtHead(ffc); | |
2900 } | |
2901 // First chunk | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2902 assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2903 // The blocks above should show their new sizes before the first block below |
0 | 2904 fc->setSize(word_sz); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2905 fc->linkPrev(NULL); // idempotent wrt free-ness, see assert above |
0 | 2906 fc->linkNext(NULL); |
2907 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); | |
2908 _bt.verify_single_block((HeapWord*)fc, fc->size()); | |
2909 fl->returnChunkAtHead(fc); | |
2910 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2911 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks"); |
0 | 2912 { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2913 // Update the stats for this block size. |
0 | 2914 MutexLockerEx x(_indexedFreeListParLocks[word_sz], |
2915 Mutex::_no_safepoint_check_flag); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2916 const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2917 _indexedFreeList[word_sz].set_splitBirths(births); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2918 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2919 // _indexedFreeList[word_sz].set_surplus(new_surplus); |
0 | 2920 } |
2921 | |
2922 // TRAP | |
2923 assert(fl->tail()->next() == NULL, "List invariant."); | |
2924 } | |
2925 | |
2926 // Set up the space's par_seq_tasks structure for work claiming | |
2927 // for parallel rescan. See CMSParRemarkTask where this is currently used. | |
2928 // XXX Need to suitably abstract and generalize this and the next | |
2929 // method into one. | |
2930 void | |
2931 CompactibleFreeListSpace:: | |
2932 initialize_sequential_subtasks_for_rescan(int n_threads) { | |
2933 // The "size" of each task is fixed according to rescan_task_size. | |
2934 assert(n_threads > 0, "Unexpected n_threads argument"); | |
2935 const size_t task_size = rescan_task_size(); | |
2936 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size; | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2937 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect"); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2938 assert(n_tasks == 0 || |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2939 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) && |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2940 (used_region().start() + n_tasks*task_size >= used_region().end())), |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2941 "n_tasks calculation incorrect"); |
0 | 2942 SequentialSubTasksDone* pst = conc_par_seq_tasks(); |
2943 assert(!pst->valid(), "Clobbering existing data?"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2944 // Sets the condition for completion of the subtask (how many threads |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2945 // need to finish in order to be done). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2946 pst->set_n_threads(n_threads); |
0 | 2947 pst->set_n_tasks((int)n_tasks); |
2948 } | |
2949 | |
2950 // Set up the space's par_seq_tasks structure for work claiming | |
2951 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used. | |
2952 void | |
2953 CompactibleFreeListSpace:: | |
2954 initialize_sequential_subtasks_for_marking(int n_threads, | |
2955 HeapWord* low) { | |
2956 // The "size" of each task is fixed according to rescan_task_size. | |
2957 assert(n_threads > 0, "Unexpected n_threads argument"); | |
2958 const size_t task_size = marking_task_size(); | |
2959 assert(task_size > CardTableModRefBS::card_size_in_words && | |
2960 (task_size % CardTableModRefBS::card_size_in_words == 0), | |
2961 "Otherwise arithmetic below would be incorrect"); | |
2962 MemRegion span = _gen->reserved(); | |
2963 if (low != NULL) { | |
2964 if (span.contains(low)) { | |
2965 // Align low down to a card boundary so that | |
2966 // we can use block_offset_careful() on span boundaries. | |
2967 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low, | |
2968 CardTableModRefBS::card_size); | |
2969 // Clip span prefix at aligned_low | |
2970 span = span.intersection(MemRegion(aligned_low, span.end())); | |
2971 } else if (low > span.end()) { | |
2972 span = MemRegion(low, low); // Null region | |
2973 } // else use entire span | |
2974 } | |
2975 assert(span.is_empty() || | |
2976 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0), | |
2977 "span should start at a card boundary"); | |
2978 size_t n_tasks = (span.word_size() + task_size - 1)/task_size; | |
2979 assert((n_tasks == 0) == span.is_empty(), "Inconsistency"); | |
2980 assert(n_tasks == 0 || | |
2981 ((span.start() + (n_tasks - 1)*task_size < span.end()) && | |
2982 (span.start() + n_tasks*task_size >= span.end())), | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2983 "n_tasks calculation incorrect"); |
0 | 2984 SequentialSubTasksDone* pst = conc_par_seq_tasks(); |
2985 assert(!pst->valid(), "Clobbering existing data?"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2986 // Sets the condition for completion of the subtask (how many threads |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2987 // need to finish in order to be done). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2988 pst->set_n_threads(n_threads); |
0 | 2989 pst->set_n_tasks((int)n_tasks); |
2990 } |