Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp @ 3285:49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
Summary: It introduces non-product cmd line parameter G1DummyRegionsPerGC which indicates how many "dummy" regions to allocate at the end of each GC. This allows the G1 heap to grow artificially and makes concurrent marking cycles more frequent irrespective of what the application that is running is doing. The dummy regions will be found totally empty during cleanup so this parameter can also be used to stress the concurrent cleanup operation.
Reviewed-by: brutisso, johnc
author | tonyp |
---|---|
date | Tue, 19 Apr 2011 15:46:59 -0400 |
parents | 1d1603768966 |
children | 1f4413413144 |
rev | line source |
---|---|
0 | 1 /* |
2426
1d1603768966
7010070: Update all 2010 Oracle-changed OpenJDK files to have the proper copyright dates - second pass
trims
parents:
2226
diff
changeset
|
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp" | |
27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp" | |
28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp" | |
29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" | |
30 #include "gc_implementation/shared/liveRange.hpp" | |
31 #include "gc_implementation/shared/spaceDecorator.hpp" | |
32 #include "gc_interface/collectedHeap.hpp" | |
33 #include "memory/allocation.inline.hpp" | |
34 #include "memory/blockOffsetTable.inline.hpp" | |
35 #include "memory/resourceArea.hpp" | |
36 #include "memory/universe.inline.hpp" | |
37 #include "oops/oop.inline.hpp" | |
38 #include "runtime/globals.hpp" | |
39 #include "runtime/handles.inline.hpp" | |
40 #include "runtime/init.hpp" | |
41 #include "runtime/java.hpp" | |
42 #include "runtime/vmThread.hpp" | |
43 #include "utilities/copy.hpp" | |
0 | 44 |
45 ///////////////////////////////////////////////////////////////////////// | |
46 //// CompactibleFreeListSpace | |
47 ///////////////////////////////////////////////////////////////////////// | |
48 | |
49 // highest ranked free list lock rank | |
50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3; | |
51 | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
52 // Defaults are 0 so things will break badly if incorrectly initialized. |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
53 int CompactibleFreeListSpace::IndexSetStart = 0; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
54 int CompactibleFreeListSpace::IndexSetStride = 0; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
55 |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
56 size_t MinChunkSize = 0; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
57 |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
58 void CompactibleFreeListSpace::set_cms_values() { |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
59 // Set CMS global values |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
60 assert(MinChunkSize == 0, "already set"); |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
61 #define numQuanta(x,y) ((x+y-1)/y) |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
62 MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
63 |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
64 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set"); |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
65 IndexSetStart = MinObjAlignment; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
66 IndexSetStride = MinObjAlignment; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
67 } |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
68 |
0 | 69 // Constructor |
70 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, | |
71 MemRegion mr, bool use_adaptive_freelists, | |
72 FreeBlockDictionary::DictionaryChoice dictionaryChoice) : | |
73 _dictionaryChoice(dictionaryChoice), | |
74 _adaptive_freelists(use_adaptive_freelists), | |
75 _bt(bs, mr), | |
76 // free list locks are in the range of values taken by _lockRank | |
77 // This range currently is [_leaf+2, _leaf+3] | |
78 // Note: this requires that CFLspace c'tors | |
79 // are called serially in the order in which the locks are | |
80 // are acquired in the program text. This is true today. | |
81 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true), | |
82 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1 | |
83 "CompactibleFreeListSpace._dict_par_lock", true), | |
84 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * | |
85 CMSRescanMultiple), | |
86 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * | |
87 CMSConcMarkMultiple), | |
88 _collector(NULL) | |
89 { | |
90 _bt.set_space(this); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
187
diff
changeset
|
91 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
0 | 92 // We have all of "mr", all of which we place in the dictionary |
93 // as one big chunk. We'll need to decide here which of several | |
94 // possible alternative dictionary implementations to use. For | |
95 // now the choice is easy, since we have only one working | |
96 // implementation, namely, the simple binary tree (splaying | |
97 // temporarily disabled). | |
98 switch (dictionaryChoice) { | |
99 case FreeBlockDictionary::dictionarySplayTree: | |
100 case FreeBlockDictionary::dictionarySkipList: | |
101 default: | |
102 warning("dictionaryChoice: selected option not understood; using" | |
103 " default BinaryTreeDictionary implementation instead."); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
104 case FreeBlockDictionary::dictionaryBinaryTree: |
0 | 105 _dictionary = new BinaryTreeDictionary(mr); |
106 break; | |
107 } | |
108 assert(_dictionary != NULL, "CMS dictionary initialization"); | |
109 // The indexed free lists are initially all empty and are lazily | |
110 // filled in on demand. Initialize the array elements to NULL. | |
111 initializeIndexedFreeListArray(); | |
112 | |
113 // Not using adaptive free lists assumes that allocation is first | |
114 // from the linAB's. Also a cms perm gen which can be compacted | |
115 // has to have the klass's klassKlass allocated at a lower | |
116 // address in the heap than the klass so that the klassKlass is | |
117 // moved to its new location before the klass is moved. | |
118 // Set the _refillSize for the linear allocation blocks | |
119 if (!use_adaptive_freelists) { | |
120 FreeChunk* fc = _dictionary->getChunk(mr.word_size()); | |
121 // The small linAB initially has all the space and will allocate | |
122 // a chunk of any size. | |
123 HeapWord* addr = (HeapWord*) fc; | |
124 _smallLinearAllocBlock.set(addr, fc->size() , | |
125 1024*SmallForLinearAlloc, fc->size()); | |
126 // Note that _unallocated_block is not updated here. | |
127 // Allocations from the linear allocation block should | |
128 // update it. | |
129 } else { | |
130 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, | |
131 SmallForLinearAlloc); | |
132 } | |
133 // CMSIndexedFreeListReplenish should be at least 1 | |
134 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish); | |
135 _promoInfo.setSpace(this); | |
136 if (UseCMSBestFit) { | |
137 _fitStrategy = FreeBlockBestFitFirst; | |
138 } else { | |
139 _fitStrategy = FreeBlockStrategyNone; | |
140 } | |
141 checkFreeListConsistency(); | |
142 | |
143 // Initialize locks for parallel case. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
144 |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
145 if (CollectedHeap::use_parallel_gc_threads()) { |
0 | 146 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
147 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1 | |
148 "a freelist par lock", | |
149 true); | |
150 if (_indexedFreeListParLocks[i] == NULL) | |
151 vm_exit_during_initialization("Could not allocate a par lock"); | |
152 DEBUG_ONLY( | |
153 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]); | |
154 ) | |
155 } | |
156 _dictionary->set_par_lock(&_parDictionaryAllocLock); | |
157 } | |
158 } | |
159 | |
160 // Like CompactibleSpace forward() but always calls cross_threshold() to | |
161 // update the block offset table. Removed initialize_threshold call because | |
162 // CFLS does not use a block offset array for contiguous spaces. | |
163 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size, | |
164 CompactPoint* cp, HeapWord* compact_top) { | |
165 // q is alive | |
166 // First check if we should switch compaction space | |
167 assert(this == cp->space, "'this' should be current compaction space."); | |
168 size_t compaction_max_size = pointer_delta(end(), compact_top); | |
169 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size), | |
170 "virtual adjustObjectSize_v() method is not correct"); | |
171 size_t adjusted_size = adjustObjectSize(size); | |
172 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0, | |
173 "no small fragments allowed"); | |
174 assert(minimum_free_block_size() == MinChunkSize, | |
175 "for de-virtualized reference below"); | |
176 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize | |
177 if (adjusted_size + MinChunkSize > compaction_max_size && | |
178 adjusted_size != compaction_max_size) { | |
179 do { | |
180 // switch to next compaction space | |
181 cp->space->set_compaction_top(compact_top); | |
182 cp->space = cp->space->next_compaction_space(); | |
183 if (cp->space == NULL) { | |
184 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); | |
185 assert(cp->gen != NULL, "compaction must succeed"); | |
186 cp->space = cp->gen->first_compaction_space(); | |
187 assert(cp->space != NULL, "generation must have a first compaction space"); | |
188 } | |
189 compact_top = cp->space->bottom(); | |
190 cp->space->set_compaction_top(compact_top); | |
191 // The correct adjusted_size may not be the same as that for this method | |
192 // (i.e., cp->space may no longer be "this" so adjust the size again. | |
193 // Use the virtual method which is not used above to save the virtual | |
194 // dispatch. | |
195 adjusted_size = cp->space->adjust_object_size_v(size); | |
196 compaction_max_size = pointer_delta(cp->space->end(), compact_top); | |
197 assert(cp->space->minimum_free_block_size() == 0, "just checking"); | |
198 } while (adjusted_size > compaction_max_size); | |
199 } | |
200 | |
201 // store the forwarding pointer into the mark word | |
202 if ((HeapWord*)q != compact_top) { | |
203 q->forward_to(oop(compact_top)); | |
204 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); | |
205 } else { | |
206 // if the object isn't moving we can just set the mark to the default | |
207 // mark and handle it specially later on. | |
208 q->init_mark(); | |
209 assert(q->forwardee() == NULL, "should be forwarded to NULL"); | |
210 } | |
211 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
212 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size)); |
0 | 213 compact_top += adjusted_size; |
214 | |
215 // we need to update the offset table so that the beginnings of objects can be | |
216 // found during scavenge. Note that we are updating the offset table based on | |
217 // where the object will be once the compaction phase finishes. | |
218 | |
219 // Always call cross_threshold(). A contiguous space can only call it when | |
220 // the compaction_top exceeds the current threshold but not for an | |
221 // non-contiguous space. | |
222 cp->threshold = | |
223 cp->space->cross_threshold(compact_top - adjusted_size, compact_top); | |
224 return compact_top; | |
225 } | |
226 | |
227 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt | |
228 // and use of single_block instead of alloc_block. The name here is not really | |
229 // appropriate - maybe a more general name could be invented for both the | |
230 // contiguous and noncontiguous spaces. | |
231 | |
232 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) { | |
233 _bt.single_block(start, the_end); | |
234 return end(); | |
235 } | |
236 | |
237 // Initialize them to NULL. | |
238 void CompactibleFreeListSpace::initializeIndexedFreeListArray() { | |
239 for (size_t i = 0; i < IndexSetSize; i++) { | |
240 // Note that on platforms where objects are double word aligned, | |
241 // the odd array elements are not used. It is convenient, however, | |
242 // to map directly from the object size to the array element. | |
243 _indexedFreeList[i].reset(IndexSetSize); | |
244 _indexedFreeList[i].set_size(i); | |
245 assert(_indexedFreeList[i].count() == 0, "reset check failed"); | |
246 assert(_indexedFreeList[i].head() == NULL, "reset check failed"); | |
247 assert(_indexedFreeList[i].tail() == NULL, "reset check failed"); | |
248 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed"); | |
249 } | |
250 } | |
251 | |
252 void CompactibleFreeListSpace::resetIndexedFreeListArray() { | |
253 for (int i = 1; i < IndexSetSize; i++) { | |
254 assert(_indexedFreeList[i].size() == (size_t) i, | |
255 "Indexed free list sizes are incorrect"); | |
256 _indexedFreeList[i].reset(IndexSetSize); | |
257 assert(_indexedFreeList[i].count() == 0, "reset check failed"); | |
258 assert(_indexedFreeList[i].head() == NULL, "reset check failed"); | |
259 assert(_indexedFreeList[i].tail() == NULL, "reset check failed"); | |
260 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed"); | |
261 } | |
262 } | |
263 | |
264 void CompactibleFreeListSpace::reset(MemRegion mr) { | |
265 resetIndexedFreeListArray(); | |
266 dictionary()->reset(); | |
267 if (BlockOffsetArrayUseUnallocatedBlock) { | |
268 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen"); | |
269 // Everything's allocated until proven otherwise. | |
270 _bt.set_unallocated_block(end()); | |
271 } | |
272 if (!mr.is_empty()) { | |
273 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small"); | |
274 _bt.single_block(mr.start(), mr.word_size()); | |
275 FreeChunk* fc = (FreeChunk*) mr.start(); | |
276 fc->setSize(mr.word_size()); | |
277 if (mr.word_size() >= IndexSetSize ) { | |
278 returnChunkToDictionary(fc); | |
279 } else { | |
280 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); | |
281 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc); | |
282 } | |
283 } | |
284 _promoInfo.reset(); | |
285 _smallLinearAllocBlock._ptr = NULL; | |
286 _smallLinearAllocBlock._word_size = 0; | |
287 } | |
288 | |
289 void CompactibleFreeListSpace::reset_after_compaction() { | |
290 // Reset the space to the new reality - one free chunk. | |
291 MemRegion mr(compaction_top(), end()); | |
292 reset(mr); | |
293 // Now refill the linear allocation block(s) if possible. | |
294 if (_adaptive_freelists) { | |
295 refillLinearAllocBlocksIfNeeded(); | |
296 } else { | |
297 // Place as much of mr in the linAB as we can get, | |
298 // provided it was big enough to go into the dictionary. | |
299 FreeChunk* fc = dictionary()->findLargestDict(); | |
300 if (fc != NULL) { | |
301 assert(fc->size() == mr.word_size(), | |
302 "Why was the chunk broken up?"); | |
303 removeChunkFromDictionary(fc); | |
304 HeapWord* addr = (HeapWord*) fc; | |
305 _smallLinearAllocBlock.set(addr, fc->size() , | |
306 1024*SmallForLinearAlloc, fc->size()); | |
307 // Note that _unallocated_block is not updated here. | |
308 } | |
309 } | |
310 } | |
311 | |
312 // Walks the entire dictionary, returning a coterminal | |
313 // chunk, if it exists. Use with caution since it involves | |
314 // a potentially complete walk of a potentially large tree. | |
315 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() { | |
316 | |
317 assert_lock_strong(&_freelistLock); | |
318 | |
319 return dictionary()->find_chunk_ends_at(end()); | |
320 } | |
321 | |
322 | |
323 #ifndef PRODUCT | |
324 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() { | |
325 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
326 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0); | |
327 } | |
328 } | |
329 | |
330 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() { | |
331 size_t sum = 0; | |
332 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
333 sum += _indexedFreeList[i].allocation_stats()->returnedBytes(); | |
334 } | |
335 return sum; | |
336 } | |
337 | |
338 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const { | |
339 size_t count = 0; | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
340 for (int i = (int)MinChunkSize; i < IndexSetSize; i++) { |
0 | 341 debug_only( |
342 ssize_t total_list_count = 0; | |
343 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; | |
344 fc = fc->next()) { | |
345 total_list_count++; | |
346 } | |
347 assert(total_list_count == _indexedFreeList[i].count(), | |
348 "Count in list is incorrect"); | |
349 ) | |
350 count += _indexedFreeList[i].count(); | |
351 } | |
352 return count; | |
353 } | |
354 | |
355 size_t CompactibleFreeListSpace::totalCount() { | |
356 size_t num = totalCountInIndexedFreeLists(); | |
357 num += dictionary()->totalCount(); | |
358 if (_smallLinearAllocBlock._word_size != 0) { | |
359 num++; | |
360 } | |
361 return num; | |
362 } | |
363 #endif | |
364 | |
365 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const { | |
366 FreeChunk* fc = (FreeChunk*) p; | |
367 return fc->isFree(); | |
368 } | |
369 | |
370 size_t CompactibleFreeListSpace::used() const { | |
371 return capacity() - free(); | |
372 } | |
373 | |
374 size_t CompactibleFreeListSpace::free() const { | |
375 // "MT-safe, but not MT-precise"(TM), if you will: i.e. | |
376 // if you do this while the structures are in flux you | |
377 // may get an approximate answer only; for instance | |
378 // because there is concurrent allocation either | |
379 // directly by mutators or for promotion during a GC. | |
380 // It's "MT-safe", however, in the sense that you are guaranteed | |
381 // not to crash and burn, for instance, because of walking | |
382 // pointers that could disappear as you were walking them. | |
383 // The approximation is because the various components | |
384 // that are read below are not read atomically (and | |
385 // further the computation of totalSizeInIndexedFreeLists() | |
386 // is itself a non-atomic computation. The normal use of | |
387 // this is during a resize operation at the end of GC | |
388 // and at that time you are guaranteed to get the | |
389 // correct actual value. However, for instance, this is | |
390 // also read completely asynchronously by the "perf-sampler" | |
391 // that supports jvmstat, and you are apt to see the values | |
392 // flicker in such cases. | |
393 assert(_dictionary != NULL, "No _dictionary?"); | |
394 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) + | |
395 totalSizeInIndexedFreeLists() + | |
396 _smallLinearAllocBlock._word_size) * HeapWordSize; | |
397 } | |
398 | |
399 size_t CompactibleFreeListSpace::max_alloc_in_words() const { | |
400 assert(_dictionary != NULL, "No _dictionary?"); | |
401 assert_locked(); | |
402 size_t res = _dictionary->maxChunkSize(); | |
403 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size, | |
404 (size_t) SmallForLinearAlloc - 1)); | |
405 // XXX the following could potentially be pretty slow; | |
406 // should one, pesimally for the rare cases when res | |
407 // caclulated above is less than IndexSetSize, | |
408 // just return res calculated above? My reasoning was that | |
409 // those cases will be so rare that the extra time spent doesn't | |
410 // really matter.... | |
411 // Note: do not change the loop test i >= res + IndexSetStride | |
412 // to i > res below, because i is unsigned and res may be zero. | |
413 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride; | |
414 i -= IndexSetStride) { | |
415 if (_indexedFreeList[i].head() != NULL) { | |
416 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList"); | |
417 return i; | |
418 } | |
419 } | |
420 return res; | |
421 } | |
422 | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
423 void LinearAllocBlock::print_on(outputStream* st) const { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
424 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
425 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT, |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
426 _ptr, _word_size, _refillSize, _allocation_size_limit); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
427 } |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
428 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
429 void CompactibleFreeListSpace::print_on(outputStream* st) const { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
430 st->print_cr("COMPACTIBLE FREELIST SPACE"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
431 st->print_cr(" Space:"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
432 Space::print_on(st); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
433 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
434 st->print_cr("promoInfo:"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
435 _promoInfo.print_on(st); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
436 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
437 st->print_cr("_smallLinearAllocBlock"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
438 _smallLinearAllocBlock.print_on(st); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
439 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
440 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
441 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
442 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s", |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
443 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
444 } |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
445 |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
446 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
447 const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
448 reportIndexedFreeListStatistics(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
449 gclog_or_tty->print_cr("Layout of Indexed Freelists"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
450 gclog_or_tty->print_cr("---------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
451 FreeList::print_labels_on(st, "size"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
452 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
453 _indexedFreeList[i].print_on(gclog_or_tty); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
454 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
455 fc = fc->next()) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
456 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
457 fc, (HeapWord*)fc + i, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
458 fc->cantCoalesce() ? "\t CC" : ""); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
459 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
460 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
461 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
462 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
463 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
464 const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
465 _promoInfo.print_on(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
466 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
467 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
468 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
469 const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
470 _dictionary->reportStatistics(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
471 st->print_cr("Layout of Freelists in Tree"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
472 st->print_cr("---------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
473 _dictionary->print_free_lists(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
474 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
475 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
476 class BlkPrintingClosure: public BlkClosure { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
477 const CMSCollector* _collector; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
478 const CompactibleFreeListSpace* _sp; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
479 const CMSBitMap* _live_bit_map; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
480 const bool _post_remark; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
481 outputStream* _st; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
482 public: |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
483 BlkPrintingClosure(const CMSCollector* collector, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
484 const CompactibleFreeListSpace* sp, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
485 const CMSBitMap* live_bit_map, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
486 outputStream* st): |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
487 _collector(collector), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
488 _sp(sp), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
489 _live_bit_map(live_bit_map), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
490 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
491 _st(st) { } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
492 size_t do_blk(HeapWord* addr); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
493 }; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
494 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
495 size_t BlkPrintingClosure::do_blk(HeapWord* addr) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
496 size_t sz = _sp->block_size_no_stall(addr, _collector); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
497 assert(sz != 0, "Should always be able to compute a size"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
498 if (_sp->block_is_obj(addr)) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
499 const bool dead = _post_remark && !_live_bit_map->isMarked(addr); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
500 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
501 addr, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
502 dead ? "dead" : "live", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
503 sz, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
504 (!dead && CMSPrintObjectsInDump) ? ":" : "."); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
505 if (CMSPrintObjectsInDump && !dead) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
506 oop(addr)->print_on(_st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
507 _st->print_cr("--------------------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
508 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
509 } else { // free block |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
510 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
511 addr, sz, CMSPrintChunksInDump ? ":" : "."); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
512 if (CMSPrintChunksInDump) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
513 ((FreeChunk*)addr)->print_on(_st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
514 _st->print_cr("--------------------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
515 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
516 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
517 return sz; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
518 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
519 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
520 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
521 outputStream* st) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
522 st->print_cr("\n========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
523 st->print_cr("Block layout in CMS Heap:"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
524 st->print_cr("========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
525 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
526 blk_iterate(&bpcl); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
527 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
528 st->print_cr("\n======================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
529 st->print_cr("Order & Layout of Promotion Info Blocks"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
530 st->print_cr("======================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
531 print_promo_info_blocks(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
532 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
533 st->print_cr("\n==========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
534 st->print_cr("Order of Indexed Free Lists"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
535 st->print_cr("========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
536 print_indexed_free_lists(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
537 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
538 st->print_cr("\n================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
539 st->print_cr("Order of Free Lists in Dictionary"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
540 st->print_cr("================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
541 print_dictionary_free_lists(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
542 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
543 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
544 |
0 | 545 void CompactibleFreeListSpace::reportFreeListStatistics() const { |
546 assert_lock_strong(&_freelistLock); | |
547 assert(PrintFLSStatistics != 0, "Reporting error"); | |
548 _dictionary->reportStatistics(); | |
549 if (PrintFLSStatistics > 1) { | |
550 reportIndexedFreeListStatistics(); | |
551 size_t totalSize = totalSizeInIndexedFreeLists() + | |
552 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())); | |
553 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag()); | |
554 } | |
555 } | |
556 | |
557 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const { | |
558 assert_lock_strong(&_freelistLock); | |
559 gclog_or_tty->print("Statistics for IndexedFreeLists:\n" | |
560 "--------------------------------\n"); | |
561 size_t totalSize = totalSizeInIndexedFreeLists(); | |
562 size_t freeBlocks = numFreeBlocksInIndexedFreeLists(); | |
563 gclog_or_tty->print("Total Free Space: %d\n", totalSize); | |
564 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists()); | |
565 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks); | |
566 if (freeBlocks != 0) { | |
567 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks); | |
568 } | |
569 } | |
570 | |
571 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const { | |
572 size_t res = 0; | |
573 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
574 debug_only( | |
575 ssize_t recount = 0; | |
576 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; | |
577 fc = fc->next()) { | |
578 recount += 1; | |
579 } | |
580 assert(recount == _indexedFreeList[i].count(), | |
581 "Incorrect count in list"); | |
582 ) | |
583 res += _indexedFreeList[i].count(); | |
584 } | |
585 return res; | |
586 } | |
587 | |
588 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const { | |
589 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { | |
590 if (_indexedFreeList[i].head() != NULL) { | |
591 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList"); | |
592 return (size_t)i; | |
593 } | |
594 } | |
595 return 0; | |
596 } | |
597 | |
598 void CompactibleFreeListSpace::set_end(HeapWord* value) { | |
599 HeapWord* prevEnd = end(); | |
600 assert(prevEnd != value, "unnecessary set_end call"); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
601 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
602 "New end is below unallocated block"); |
0 | 603 _end = value; |
604 if (prevEnd != NULL) { | |
605 // Resize the underlying block offset table. | |
606 _bt.resize(pointer_delta(value, bottom())); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
607 if (value <= prevEnd) { |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
608 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
609 "New end is below unallocated block"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
610 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
611 // Now, take this new chunk and add it to the free blocks. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
612 // Note that the BOT has not yet been updated for this block. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
613 size_t newFcSize = pointer_delta(value, prevEnd); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
614 // XXX This is REALLY UGLY and should be fixed up. XXX |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
615 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
616 // Mark the boundary of the new block in BOT |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
617 _bt.mark_block(prevEnd, value); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
618 // put it all in the linAB |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
619 if (ParallelGCThreads == 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
620 _smallLinearAllocBlock._ptr = prevEnd; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
621 _smallLinearAllocBlock._word_size = newFcSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
622 repairLinearAllocBlock(&_smallLinearAllocBlock); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
623 } else { // ParallelGCThreads > 0 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
624 MutexLockerEx x(parDictionaryAllocLock(), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
625 Mutex::_no_safepoint_check_flag); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
626 _smallLinearAllocBlock._ptr = prevEnd; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
627 _smallLinearAllocBlock._word_size = newFcSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
628 repairLinearAllocBlock(&_smallLinearAllocBlock); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
629 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
630 // Births of chunks put into a LinAB are not recorded. Births |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
631 // of chunks as they are allocated out of a LinAB are. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
632 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
633 // Add the block to the free lists, if possible coalescing it |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
634 // with the last free block, and update the BOT and census data. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
635 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize); |
0 | 636 } |
637 } | |
638 } | |
639 } | |
640 | |
641 class FreeListSpace_DCTOC : public Filtering_DCTOC { | |
642 CompactibleFreeListSpace* _cfls; | |
643 CMSCollector* _collector; | |
644 protected: | |
645 // Override. | |
646 #define walk_mem_region_with_cl_DECL(ClosureType) \ | |
647 virtual void walk_mem_region_with_cl(MemRegion mr, \ | |
648 HeapWord* bottom, HeapWord* top, \ | |
649 ClosureType* cl); \ | |
650 void walk_mem_region_with_cl_par(MemRegion mr, \ | |
651 HeapWord* bottom, HeapWord* top, \ | |
652 ClosureType* cl); \ | |
653 void walk_mem_region_with_cl_nopar(MemRegion mr, \ | |
654 HeapWord* bottom, HeapWord* top, \ | |
655 ClosureType* cl) | |
656 walk_mem_region_with_cl_DECL(OopClosure); | |
657 walk_mem_region_with_cl_DECL(FilteringClosure); | |
658 | |
659 public: | |
660 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp, | |
661 CMSCollector* collector, | |
662 OopClosure* cl, | |
663 CardTableModRefBS::PrecisionStyle precision, | |
664 HeapWord* boundary) : | |
665 Filtering_DCTOC(sp, cl, precision, boundary), | |
666 _cfls(sp), _collector(collector) {} | |
667 }; | |
668 | |
669 // We de-virtualize the block-related calls below, since we know that our | |
670 // space is a CompactibleFreeListSpace. | |
671 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ | |
672 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \ | |
673 HeapWord* bottom, \ | |
674 HeapWord* top, \ | |
675 ClosureType* cl) { \ | |
676 if (SharedHeap::heap()->n_par_threads() > 0) { \ | |
677 walk_mem_region_with_cl_par(mr, bottom, top, cl); \ | |
678 } else { \ | |
679 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \ | |
680 } \ | |
681 } \ | |
682 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \ | |
683 HeapWord* bottom, \ | |
684 HeapWord* top, \ | |
685 ClosureType* cl) { \ | |
686 /* Skip parts that are before "mr", in case "block_start" sent us \ | |
687 back too far. */ \ | |
688 HeapWord* mr_start = mr.start(); \ | |
689 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ | |
690 HeapWord* next = bottom + bot_size; \ | |
691 while (next < mr_start) { \ | |
692 bottom = next; \ | |
693 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ | |
694 next = bottom + bot_size; \ | |
695 } \ | |
696 \ | |
697 while (bottom < top) { \ | |
698 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \ | |
699 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ | |
700 oop(bottom)) && \ | |
701 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ | |
702 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ | |
703 bottom += _cfls->adjustObjectSize(word_sz); \ | |
704 } else { \ | |
705 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \ | |
706 } \ | |
707 } \ | |
708 } \ | |
709 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \ | |
710 HeapWord* bottom, \ | |
711 HeapWord* top, \ | |
712 ClosureType* cl) { \ | |
713 /* Skip parts that are before "mr", in case "block_start" sent us \ | |
714 back too far. */ \ | |
715 HeapWord* mr_start = mr.start(); \ | |
716 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ | |
717 HeapWord* next = bottom + bot_size; \ | |
718 while (next < mr_start) { \ | |
719 bottom = next; \ | |
720 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ | |
721 next = bottom + bot_size; \ | |
722 } \ | |
723 \ | |
724 while (bottom < top) { \ | |
725 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \ | |
726 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ | |
727 oop(bottom)) && \ | |
728 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ | |
729 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ | |
730 bottom += _cfls->adjustObjectSize(word_sz); \ | |
731 } else { \ | |
732 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ | |
733 } \ | |
734 } \ | |
735 } | |
736 | |
737 // (There are only two of these, rather than N, because the split is due | |
738 // only to the introduction of the FilteringClosure, a local part of the | |
739 // impl of this abstraction.) | |
740 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure) | |
741 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) | |
742 | |
743 DirtyCardToOopClosure* | |
744 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl, | |
745 CardTableModRefBS::PrecisionStyle precision, | |
746 HeapWord* boundary) { | |
747 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary); | |
748 } | |
749 | |
750 | |
751 // Note on locking for the space iteration functions: | |
752 // since the collector's iteration activities are concurrent with | |
753 // allocation activities by mutators, absent a suitable mutual exclusion | |
754 // mechanism the iterators may go awry. For instace a block being iterated | |
755 // may suddenly be allocated or divided up and part of it allocated and | |
756 // so on. | |
757 | |
758 // Apply the given closure to each block in the space. | |
759 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) { | |
760 assert_lock_strong(freelistLock()); | |
761 HeapWord *cur, *limit; | |
762 for (cur = bottom(), limit = end(); cur < limit; | |
763 cur += cl->do_blk_careful(cur)); | |
764 } | |
765 | |
766 // Apply the given closure to each block in the space. | |
767 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) { | |
768 assert_lock_strong(freelistLock()); | |
769 HeapWord *cur, *limit; | |
770 for (cur = bottom(), limit = end(); cur < limit; | |
771 cur += cl->do_blk(cur)); | |
772 } | |
773 | |
774 // Apply the given closure to each oop in the space. | |
775 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) { | |
776 assert_lock_strong(freelistLock()); | |
777 HeapWord *cur, *limit; | |
778 size_t curSize; | |
779 for (cur = bottom(), limit = end(); cur < limit; | |
780 cur += curSize) { | |
781 curSize = block_size(cur); | |
782 if (block_is_obj(cur)) { | |
783 oop(cur)->oop_iterate(cl); | |
784 } | |
785 } | |
786 } | |
787 | |
788 // Apply the given closure to each oop in the space \intersect memory region. | |
789 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) { | |
790 assert_lock_strong(freelistLock()); | |
791 if (is_empty()) { | |
792 return; | |
793 } | |
794 MemRegion cur = MemRegion(bottom(), end()); | |
795 mr = mr.intersection(cur); | |
796 if (mr.is_empty()) { | |
797 return; | |
798 } | |
799 if (mr.equals(cur)) { | |
800 oop_iterate(cl); | |
801 return; | |
802 } | |
803 assert(mr.end() <= end(), "just took an intersection above"); | |
804 HeapWord* obj_addr = block_start(mr.start()); | |
805 HeapWord* t = mr.end(); | |
806 | |
807 SpaceMemRegionOopsIterClosure smr_blk(cl, mr); | |
808 if (block_is_obj(obj_addr)) { | |
809 // Handle first object specially. | |
810 oop obj = oop(obj_addr); | |
811 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk)); | |
812 } else { | |
813 FreeChunk* fc = (FreeChunk*)obj_addr; | |
814 obj_addr += fc->size(); | |
815 } | |
816 while (obj_addr < t) { | |
817 HeapWord* obj = obj_addr; | |
818 obj_addr += block_size(obj_addr); | |
819 // If "obj_addr" is not greater than top, then the | |
820 // entire object "obj" is within the region. | |
821 if (obj_addr <= t) { | |
822 if (block_is_obj(obj)) { | |
823 oop(obj)->oop_iterate(cl); | |
824 } | |
825 } else { | |
826 // "obj" extends beyond end of region | |
827 if (block_is_obj(obj)) { | |
828 oop(obj)->oop_iterate(&smr_blk); | |
829 } | |
830 break; | |
831 } | |
832 } | |
833 } | |
834 | |
835 // NOTE: In the following methods, in order to safely be able to | |
836 // apply the closure to an object, we need to be sure that the | |
837 // object has been initialized. We are guaranteed that an object | |
838 // is initialized if we are holding the Heap_lock with the | |
839 // world stopped. | |
840 void CompactibleFreeListSpace::verify_objects_initialized() const { | |
841 if (is_init_completed()) { | |
842 assert_locked_or_safepoint(Heap_lock); | |
843 if (Universe::is_fully_initialized()) { | |
844 guarantee(SafepointSynchronize::is_at_safepoint(), | |
845 "Required for objects to be initialized"); | |
846 } | |
847 } // else make a concession at vm start-up | |
848 } | |
849 | |
850 // Apply the given closure to each object in the space | |
851 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) { | |
852 assert_lock_strong(freelistLock()); | |
853 NOT_PRODUCT(verify_objects_initialized()); | |
854 HeapWord *cur, *limit; | |
855 size_t curSize; | |
856 for (cur = bottom(), limit = end(); cur < limit; | |
857 cur += curSize) { | |
858 curSize = block_size(cur); | |
859 if (block_is_obj(cur)) { | |
860 blk->do_object(oop(cur)); | |
861 } | |
862 } | |
863 } | |
864 | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
865 // Apply the given closure to each live object in the space |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
866 // The usage of CompactibleFreeListSpace |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
867 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
868 // objects in the space with references to objects that are no longer |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
869 // valid. For example, an object may reference another object |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
870 // that has already been sweep up (collected). This method uses |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
871 // obj_is_alive() to determine whether it is safe to apply the closure to |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
872 // an object. See obj_is_alive() for details on how liveness of an |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
873 // object is decided. |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
874 |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
875 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
876 assert_lock_strong(freelistLock()); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
877 NOT_PRODUCT(verify_objects_initialized()); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
878 HeapWord *cur, *limit; |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
879 size_t curSize; |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
880 for (cur = bottom(), limit = end(); cur < limit; |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
881 cur += curSize) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
882 curSize = block_size(cur); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
883 if (block_is_obj(cur) && obj_is_alive(cur)) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
884 blk->do_object(oop(cur)); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
885 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
886 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
887 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
888 |
0 | 889 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, |
890 UpwardsObjectClosure* cl) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
891 assert_locked(freelistLock()); |
0 | 892 NOT_PRODUCT(verify_objects_initialized()); |
893 Space::object_iterate_mem(mr, cl); | |
894 } | |
895 | |
896 // Callers of this iterator beware: The closure application should | |
897 // be robust in the face of uninitialized objects and should (always) | |
898 // return a correct size so that the next addr + size below gives us a | |
899 // valid block boundary. [See for instance, | |
900 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() | |
901 // in ConcurrentMarkSweepGeneration.cpp.] | |
902 HeapWord* | |
903 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) { | |
904 assert_lock_strong(freelistLock()); | |
905 HeapWord *addr, *last; | |
906 size_t size; | |
907 for (addr = bottom(), last = end(); | |
908 addr < last; addr += size) { | |
909 FreeChunk* fc = (FreeChunk*)addr; | |
910 if (fc->isFree()) { | |
911 // Since we hold the free list lock, which protects direct | |
912 // allocation in this generation by mutators, a free object | |
913 // will remain free throughout this iteration code. | |
914 size = fc->size(); | |
915 } else { | |
916 // Note that the object need not necessarily be initialized, | |
917 // because (for instance) the free list lock does NOT protect | |
918 // object initialization. The closure application below must | |
919 // therefore be correct in the face of uninitialized objects. | |
920 size = cl->do_object_careful(oop(addr)); | |
921 if (size == 0) { | |
922 // An unparsable object found. Signal early termination. | |
923 return addr; | |
924 } | |
925 } | |
926 } | |
927 return NULL; | |
928 } | |
929 | |
930 // Callers of this iterator beware: The closure application should | |
931 // be robust in the face of uninitialized objects and should (always) | |
932 // return a correct size so that the next addr + size below gives us a | |
933 // valid block boundary. [See for instance, | |
934 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() | |
935 // in ConcurrentMarkSweepGeneration.cpp.] | |
936 HeapWord* | |
937 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr, | |
938 ObjectClosureCareful* cl) { | |
939 assert_lock_strong(freelistLock()); | |
940 // Can't use used_region() below because it may not necessarily | |
941 // be the same as [bottom(),end()); although we could | |
942 // use [used_region().start(),round_to(used_region().end(),CardSize)), | |
943 // that appears too cumbersome, so we just do the simpler check | |
944 // in the assertion below. | |
945 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr), | |
946 "mr should be non-empty and within used space"); | |
947 HeapWord *addr, *end; | |
948 size_t size; | |
949 for (addr = block_start_careful(mr.start()), end = mr.end(); | |
950 addr < end; addr += size) { | |
951 FreeChunk* fc = (FreeChunk*)addr; | |
952 if (fc->isFree()) { | |
953 // Since we hold the free list lock, which protects direct | |
954 // allocation in this generation by mutators, a free object | |
955 // will remain free throughout this iteration code. | |
956 size = fc->size(); | |
957 } else { | |
958 // Note that the object need not necessarily be initialized, | |
959 // because (for instance) the free list lock does NOT protect | |
960 // object initialization. The closure application below must | |
961 // therefore be correct in the face of uninitialized objects. | |
962 size = cl->do_object_careful_m(oop(addr), mr); | |
963 if (size == 0) { | |
964 // An unparsable object found. Signal early termination. | |
965 return addr; | |
966 } | |
967 } | |
968 } | |
969 return NULL; | |
970 } | |
971 | |
972 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
973 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const { |
0 | 974 NOT_PRODUCT(verify_objects_initialized()); |
975 return _bt.block_start(p); | |
976 } | |
977 | |
978 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const { | |
979 return _bt.block_start_careful(p); | |
980 } | |
981 | |
982 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const { | |
983 NOT_PRODUCT(verify_objects_initialized()); | |
984 // This must be volatile, or else there is a danger that the compiler | |
985 // will compile the code below into a sometimes-infinite loop, by keeping | |
986 // the value read the first time in a register. | |
987 while (true) { | |
988 // We must do this until we get a consistent view of the object. | |
187 | 989 if (FreeChunk::indicatesFreeChunk(p)) { |
990 volatile FreeChunk* fc = (volatile FreeChunk*)p; | |
991 size_t res = fc->size(); | |
992 // If the object is still a free chunk, return the size, else it | |
993 // has been allocated so try again. | |
994 if (FreeChunk::indicatesFreeChunk(p)) { | |
0 | 995 assert(res != 0, "Block size should not be 0"); |
996 return res; | |
997 } | |
187 | 998 } else { |
999 // must read from what 'p' points to in each loop. | |
1000 klassOop k = ((volatile oopDesc*)p)->klass_or_null(); | |
1001 if (k != NULL) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1002 assert(k->is_oop(true /* ignore mark word */), "Should be klass oop"); |
187 | 1003 oop o = (oop)p; |
1004 assert(o->is_parsable(), "Should be parsable"); | |
1005 assert(o->is_oop(true /* ignore mark word */), "Should be an oop."); | |
1006 size_t res = o->size_given_klass(k->klass_part()); | |
1007 res = adjustObjectSize(res); | |
1008 assert(res != 0, "Block size should not be 0"); | |
1009 return res; | |
1010 } | |
0 | 1011 } |
1012 } | |
1013 } | |
1014 | |
1015 // A variant of the above that uses the Printezis bits for | |
1016 // unparsable but allocated objects. This avoids any possible | |
1017 // stalls waiting for mutators to initialize objects, and is | |
1018 // thus potentially faster than the variant above. However, | |
1019 // this variant may return a zero size for a block that is | |
1020 // under mutation and for which a consistent size cannot be | |
1021 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits(). | |
1022 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p, | |
1023 const CMSCollector* c) | |
1024 const { | |
1025 assert(MemRegion(bottom(), end()).contains(p), "p not in space"); | |
1026 // This must be volatile, or else there is a danger that the compiler | |
1027 // will compile the code below into a sometimes-infinite loop, by keeping | |
1028 // the value read the first time in a register. | |
1029 DEBUG_ONLY(uint loops = 0;) | |
1030 while (true) { | |
1031 // We must do this until we get a consistent view of the object. | |
187 | 1032 if (FreeChunk::indicatesFreeChunk(p)) { |
1033 volatile FreeChunk* fc = (volatile FreeChunk*)p; | |
1034 size_t res = fc->size(); | |
1035 if (FreeChunk::indicatesFreeChunk(p)) { | |
0 | 1036 assert(res != 0, "Block size should not be 0"); |
1037 assert(loops == 0, "Should be 0"); | |
1038 return res; | |
1039 } | |
1040 } else { | |
187 | 1041 // must read from what 'p' points to in each loop. |
1042 klassOop k = ((volatile oopDesc*)p)->klass_or_null(); | |
2226
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
1972
diff
changeset
|
1043 // We trust the size of any object that has a non-NULL |
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
1972
diff
changeset
|
1044 // klass and (for those in the perm gen) is parsable |
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
1972
diff
changeset
|
1045 // -- irrespective of its conc_safe-ty. |
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
1972
diff
changeset
|
1046 if (k != NULL && ((oopDesc*)p)->is_parsable()) { |
187 | 1047 assert(k->is_oop(), "Should really be klass oop."); |
1048 oop o = (oop)p; | |
1049 assert(o->is_oop(), "Should be an oop"); | |
1050 size_t res = o->size_given_klass(k->klass_part()); | |
1051 res = adjustObjectSize(res); | |
1052 assert(res != 0, "Block size should not be 0"); | |
1053 return res; | |
1054 } else { | |
2226
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
1972
diff
changeset
|
1055 // May return 0 if P-bits not present. |
187 | 1056 return c->block_size_if_printezis_bits(p); |
1057 } | |
0 | 1058 } |
1059 assert(loops == 0, "Can loop at most once"); | |
1060 DEBUG_ONLY(loops++;) | |
1061 } | |
1062 } | |
1063 | |
1064 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const { | |
1065 NOT_PRODUCT(verify_objects_initialized()); | |
1066 assert(MemRegion(bottom(), end()).contains(p), "p not in space"); | |
1067 FreeChunk* fc = (FreeChunk*)p; | |
1068 if (fc->isFree()) { | |
1069 return fc->size(); | |
1070 } else { | |
1071 // Ignore mark word because this may be a recently promoted | |
1072 // object whose mark word is used to chain together grey | |
1073 // objects (the last one would have a null value). | |
1074 assert(oop(p)->is_oop(true), "Should be an oop"); | |
1075 return adjustObjectSize(oop(p)->size()); | |
1076 } | |
1077 } | |
1078 | |
1079 // This implementation assumes that the property of "being an object" is | |
1080 // stable. But being a free chunk may not be (because of parallel | |
1081 // promotion.) | |
1082 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const { | |
1083 FreeChunk* fc = (FreeChunk*)p; | |
1084 assert(is_in_reserved(p), "Should be in space"); | |
1085 // When doing a mark-sweep-compact of the CMS generation, this | |
1086 // assertion may fail because prepare_for_compaction() uses | |
1087 // space that is garbage to maintain information on ranges of | |
1088 // live objects so that these live ranges can be moved as a whole. | |
1089 // Comment out this assertion until that problem can be solved | |
1090 // (i.e., that the block start calculation may look at objects | |
1091 // at address below "p" in finding the object that contains "p" | |
1092 // and those objects (if garbage) may have been modified to hold | |
1093 // live range information. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
1094 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
1095 // "Should be a block boundary"); |
187 | 1096 if (FreeChunk::indicatesFreeChunk(p)) return false; |
1097 klassOop k = oop(p)->klass_or_null(); | |
0 | 1098 if (k != NULL) { |
1099 // Ignore mark word because it may have been used to | |
1100 // chain together promoted objects (the last one | |
1101 // would have a null value). | |
1102 assert(oop(p)->is_oop(true), "Should be an oop"); | |
1103 return true; | |
1104 } else { | |
1105 return false; // Was not an object at the start of collection. | |
1106 } | |
1107 } | |
1108 | |
1109 // Check if the object is alive. This fact is checked either by consulting | |
1110 // the main marking bitmap in the sweeping phase or, if it's a permanent | |
1111 // generation and we're not in the sweeping phase, by checking the | |
1112 // perm_gen_verify_bit_map where we store the "deadness" information if | |
1113 // we did not sweep the perm gen in the most recent previous GC cycle. | |
1114 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const { | |
1959
9eecf81a02fb
7000578: CMS: assert(SafepointSynchronize::is_at_safepoint()) failed: Else races are possible
ysr
parents:
1952
diff
changeset
|
1115 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), |
9eecf81a02fb
7000578: CMS: assert(SafepointSynchronize::is_at_safepoint()) failed: Else races are possible
ysr
parents:
1952
diff
changeset
|
1116 "Else races are possible"); |
1951
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1117 assert(block_is_obj(p), "The address should point to an object"); |
0 | 1118 |
1119 // If we're sweeping, we use object liveness information from the main bit map | |
1120 // for both perm gen and old gen. | |
1121 // We don't need to lock the bitmap (live_map or dead_map below), because | |
1122 // EITHER we are in the middle of the sweeping phase, and the | |
1123 // main marking bit map (live_map below) is locked, | |
1124 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below) | |
1125 // is stable, because it's mutated only in the sweeping phase. | |
1951
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1126 // NOTE: This method is also used by jmap where, if class unloading is |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1127 // off, the results can return "false" for legitimate perm objects, |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1128 // when we are not in the midst of a sweeping phase, which can result |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1129 // in jmap not reporting certain perm gen objects. This will be moot |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1130 // if/when the perm gen goes away in the future. |
0 | 1131 if (_collector->abstract_state() == CMSCollector::Sweeping) { |
1132 CMSBitMap* live_map = _collector->markBitMap(); | |
1951
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1133 return live_map->par_isMarked((HeapWord*) p); |
0 | 1134 } else { |
1135 // If we're not currently sweeping and we haven't swept the perm gen in | |
1136 // the previous concurrent cycle then we may have dead but unswept objects | |
1137 // in the perm gen. In this case, we use the "deadness" information | |
1138 // that we had saved in perm_gen_verify_bit_map at the last sweep. | |
1139 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) { | |
1140 if (_collector->verifying()) { | |
1141 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map(); | |
1142 // Object is marked in the dead_map bitmap at the previous sweep | |
1143 // when we know that it's dead; if the bitmap is not allocated then | |
1144 // the object is alive. | |
1145 return (dead_map->sizeInBits() == 0) // bit_map has been allocated | |
1146 || !dead_map->par_isMarked((HeapWord*) p); | |
1147 } else { | |
1148 return false; // We can't say for sure if it's live, so we say that it's dead. | |
1149 } | |
1150 } | |
1151 } | |
1152 return true; | |
1153 } | |
1154 | |
1155 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const { | |
1156 FreeChunk* fc = (FreeChunk*)p; | |
1157 assert(is_in_reserved(p), "Should be in space"); | |
1158 assert(_bt.block_start(p) == p, "Should be a block boundary"); | |
1159 if (!fc->isFree()) { | |
1160 // Ignore mark word because it may have been used to | |
1161 // chain together promoted objects (the last one | |
1162 // would have a null value). | |
1163 assert(oop(p)->is_oop(true), "Should be an oop"); | |
1164 return true; | |
1165 } | |
1166 return false; | |
1167 } | |
1168 | |
1169 // "MT-safe but not guaranteed MT-precise" (TM); you may get an | |
1170 // approximate answer if you don't hold the freelistlock when you call this. | |
1171 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const { | |
1172 size_t size = 0; | |
1173 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
1174 debug_only( | |
1175 // We may be calling here without the lock in which case we | |
1176 // won't do this modest sanity check. | |
1177 if (freelistLock()->owned_by_self()) { | |
1178 size_t total_list_size = 0; | |
1179 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; | |
1180 fc = fc->next()) { | |
1181 total_list_size += i; | |
1182 } | |
1183 assert(total_list_size == i * _indexedFreeList[i].count(), | |
1184 "Count in list is incorrect"); | |
1185 } | |
1186 ) | |
1187 size += i * _indexedFreeList[i].count(); | |
1188 } | |
1189 return size; | |
1190 } | |
1191 | |
1192 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) { | |
1193 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
1194 return allocate(size); | |
1195 } | |
1196 | |
1197 HeapWord* | |
1198 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) { | |
1199 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size); | |
1200 } | |
1201 | |
1202 HeapWord* CompactibleFreeListSpace::allocate(size_t size) { | |
1203 assert_lock_strong(freelistLock()); | |
1204 HeapWord* res = NULL; | |
1205 assert(size == adjustObjectSize(size), | |
1206 "use adjustObjectSize() before calling into allocate()"); | |
1207 | |
1208 if (_adaptive_freelists) { | |
1209 res = allocate_adaptive_freelists(size); | |
1210 } else { // non-adaptive free lists | |
1211 res = allocate_non_adaptive_freelists(size); | |
1212 } | |
1213 | |
1214 if (res != NULL) { | |
1215 // check that res does lie in this space! | |
1216 assert(is_in_reserved(res), "Not in this space!"); | |
1217 assert(is_aligned((void*)res), "alignment check"); | |
1218 | |
1219 FreeChunk* fc = (FreeChunk*)res; | |
1220 fc->markNotFree(); | |
1221 assert(!fc->isFree(), "shouldn't be marked free"); | |
187 | 1222 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized"); |
0 | 1223 // Verify that the block offset table shows this to |
1224 // be a single block, but not one which is unallocated. | |
1225 _bt.verify_single_block(res, size); | |
1226 _bt.verify_not_unallocated(res, size); | |
1227 // mangle a just allocated object with a distinct pattern. | |
1228 debug_only(fc->mangleAllocated(size)); | |
1229 } | |
1230 | |
1231 return res; | |
1232 } | |
1233 | |
1234 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) { | |
1235 HeapWord* res = NULL; | |
1236 // try and use linear allocation for smaller blocks | |
1237 if (size < _smallLinearAllocBlock._allocation_size_limit) { | |
1238 // if successful, the following also adjusts block offset table | |
1239 res = getChunkFromSmallLinearAllocBlock(size); | |
1240 } | |
1241 // Else triage to indexed lists for smaller sizes | |
1242 if (res == NULL) { | |
1243 if (size < SmallForDictionary) { | |
1244 res = (HeapWord*) getChunkFromIndexedFreeList(size); | |
1245 } else { | |
1246 // else get it from the big dictionary; if even this doesn't | |
1247 // work we are out of luck. | |
1248 res = (HeapWord*)getChunkFromDictionaryExact(size); | |
1249 } | |
1250 } | |
1251 | |
1252 return res; | |
1253 } | |
1254 | |
1255 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) { | |
1256 assert_lock_strong(freelistLock()); | |
1257 HeapWord* res = NULL; | |
1258 assert(size == adjustObjectSize(size), | |
1259 "use adjustObjectSize() before calling into allocate()"); | |
1260 | |
1261 // Strategy | |
1262 // if small | |
1263 // exact size from small object indexed list if small | |
1264 // small or large linear allocation block (linAB) as appropriate | |
1265 // take from lists of greater sized chunks | |
1266 // else | |
1267 // dictionary | |
1268 // small or large linear allocation block if it has the space | |
1269 // Try allocating exact size from indexTable first | |
1270 if (size < IndexSetSize) { | |
1271 res = (HeapWord*) getChunkFromIndexedFreeList(size); | |
1272 if(res != NULL) { | |
1273 assert(res != (HeapWord*)_indexedFreeList[size].head(), | |
1274 "Not removed from free list"); | |
1275 // no block offset table adjustment is necessary on blocks in | |
1276 // the indexed lists. | |
1277 | |
1278 // Try allocating from the small LinAB | |
1279 } else if (size < _smallLinearAllocBlock._allocation_size_limit && | |
1280 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) { | |
1281 // if successful, the above also adjusts block offset table | |
1282 // Note that this call will refill the LinAB to | |
1283 // satisfy the request. This is different that | |
1284 // evm. | |
1285 // Don't record chunk off a LinAB? smallSplitBirth(size); | |
1286 } else { | |
1287 // Raid the exact free lists larger than size, even if they are not | |
1288 // overpopulated. | |
1289 res = (HeapWord*) getChunkFromGreater(size); | |
1290 } | |
1291 } else { | |
1292 // Big objects get allocated directly from the dictionary. | |
1293 res = (HeapWord*) getChunkFromDictionaryExact(size); | |
1294 if (res == NULL) { | |
1295 // Try hard not to fail since an allocation failure will likely | |
1296 // trigger a synchronous GC. Try to get the space from the | |
1297 // allocation blocks. | |
1298 res = getChunkFromSmallLinearAllocBlockRemainder(size); | |
1299 } | |
1300 } | |
1301 | |
1302 return res; | |
1303 } | |
1304 | |
1305 // A worst-case estimate of the space required (in HeapWords) to expand the heap | |
1306 // when promoting obj. | |
1307 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const { | |
1308 // Depending on the object size, expansion may require refilling either a | |
1309 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize | |
1310 // is added because the dictionary may over-allocate to avoid fragmentation. | |
1311 size_t space = obj_size; | |
1312 if (!_adaptive_freelists) { | |
1313 space = MAX2(space, _smallLinearAllocBlock._refillSize); | |
1314 } | |
1315 space += _promoInfo.refillSize() + 2 * MinChunkSize; | |
1316 return space; | |
1317 } | |
1318 | |
1319 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) { | |
1320 FreeChunk* ret; | |
1321 | |
1322 assert(numWords >= MinChunkSize, "Size is less than minimum"); | |
1323 assert(linearAllocationWouldFail() || bestFitFirst(), | |
1324 "Should not be here"); | |
1325 | |
1326 size_t i; | |
1327 size_t currSize = numWords + MinChunkSize; | |
1328 assert(currSize % MinObjAlignment == 0, "currSize should be aligned"); | |
1329 for (i = currSize; i < IndexSetSize; i += IndexSetStride) { | |
1330 FreeList* fl = &_indexedFreeList[i]; | |
1331 if (fl->head()) { | |
1332 ret = getFromListGreater(fl, numWords); | |
1333 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk"); | |
1334 return ret; | |
1335 } | |
1336 } | |
1337 | |
1338 currSize = MAX2((size_t)SmallForDictionary, | |
1339 (size_t)(numWords + MinChunkSize)); | |
1340 | |
1341 /* Try to get a chunk that satisfies request, while avoiding | |
1342 fragmentation that can't be handled. */ | |
1343 { | |
1344 ret = dictionary()->getChunk(currSize); | |
1345 if (ret != NULL) { | |
1346 assert(ret->size() - numWords >= MinChunkSize, | |
1347 "Chunk is too small"); | |
1348 _bt.allocated((HeapWord*)ret, ret->size()); | |
1349 /* Carve returned chunk. */ | |
1350 (void) splitChunkAndReturnRemainder(ret, numWords); | |
1351 /* Label this as no longer a free chunk. */ | |
1352 assert(ret->isFree(), "This chunk should be free"); | |
1353 ret->linkPrev(NULL); | |
1354 } | |
1355 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk"); | |
1356 return ret; | |
1357 } | |
1358 ShouldNotReachHere(); | |
1359 } | |
1360 | |
1361 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) | |
1362 const { | |
1363 assert(fc->size() < IndexSetSize, "Size of chunk is too large"); | |
1364 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc); | |
1365 } | |
1366 | |
1367 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const { | |
1368 if (fc->size() >= IndexSetSize) { | |
1369 return dictionary()->verifyChunkInFreeLists(fc); | |
1370 } else { | |
1371 return verifyChunkInIndexedFreeLists(fc); | |
1372 } | |
1373 } | |
1374 | |
1375 #ifndef PRODUCT | |
1376 void CompactibleFreeListSpace::assert_locked() const { | |
1377 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock()); | |
1378 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1379 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1380 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1381 CMSLockVerifier::assert_locked(lock); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1382 } |
0 | 1383 #endif |
1384 | |
1385 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) { | |
1386 // In the parallel case, the main thread holds the free list lock | |
1387 // on behalf the parallel threads. | |
1388 FreeChunk* fc; | |
1389 { | |
1390 // If GC is parallel, this might be called by several threads. | |
1391 // This should be rare enough that the locking overhead won't affect | |
1392 // the sequential code. | |
1393 MutexLockerEx x(parDictionaryAllocLock(), | |
1394 Mutex::_no_safepoint_check_flag); | |
1395 fc = getChunkFromDictionary(size); | |
1396 } | |
1397 if (fc != NULL) { | |
1398 fc->dontCoalesce(); | |
1399 assert(fc->isFree(), "Should be free, but not coalescable"); | |
1400 // Verify that the block offset table shows this to | |
1401 // be a single block, but not one which is unallocated. | |
1402 _bt.verify_single_block((HeapWord*)fc, fc->size()); | |
1403 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); | |
1404 } | |
1405 return fc; | |
1406 } | |
1407 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
1408 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) { |
0 | 1409 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
1410 assert_locked(); | |
1411 | |
1412 // if we are tracking promotions, then first ensure space for | |
1413 // promotion (including spooling space for saving header if necessary). | |
1414 // then allocate and copy, then track promoted info if needed. | |
1415 // When tracking (see PromotionInfo::track()), the mark word may | |
1416 // be displaced and in this case restoration of the mark word | |
1417 // occurs in the (oop_since_save_marks_)iterate phase. | |
1418 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) { | |
1419 return NULL; | |
1420 } | |
1421 // Call the allocate(size_t, bool) form directly to avoid the | |
1422 // additional call through the allocate(size_t) form. Having | |
1423 // the compile inline the call is problematic because allocate(size_t) | |
1424 // is a virtual method. | |
1425 HeapWord* res = allocate(adjustObjectSize(obj_size)); | |
1426 if (res != NULL) { | |
1427 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size); | |
1428 // if we should be tracking promotions, do so. | |
1429 if (_promoInfo.tracking()) { | |
1430 _promoInfo.track((PromotedObject*)res); | |
1431 } | |
1432 } | |
1433 return oop(res); | |
1434 } | |
1435 | |
1436 HeapWord* | |
1437 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) { | |
1438 assert_locked(); | |
1439 assert(size >= MinChunkSize, "minimum chunk size"); | |
1440 assert(size < _smallLinearAllocBlock._allocation_size_limit, | |
1441 "maximum from smallLinearAllocBlock"); | |
1442 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size); | |
1443 } | |
1444 | |
1445 HeapWord* | |
1446 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk, | |
1447 size_t size) { | |
1448 assert_locked(); | |
1449 assert(size >= MinChunkSize, "too small"); | |
1450 HeapWord* res = NULL; | |
1451 // Try to do linear allocation from blk, making sure that | |
1452 if (blk->_word_size == 0) { | |
1453 // We have probably been unable to fill this either in the prologue or | |
1454 // when it was exhausted at the last linear allocation. Bail out until | |
1455 // next time. | |
1456 assert(blk->_ptr == NULL, "consistency check"); | |
1457 return NULL; | |
1458 } | |
1459 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check"); | |
1460 res = getChunkFromLinearAllocBlockRemainder(blk, size); | |
1461 if (res != NULL) return res; | |
1462 | |
1463 // about to exhaust this linear allocation block | |
1464 if (blk->_word_size == size) { // exactly satisfied | |
1465 res = blk->_ptr; | |
1466 _bt.allocated(res, blk->_word_size); | |
1467 } else if (size + MinChunkSize <= blk->_refillSize) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1468 size_t sz = blk->_word_size; |
0 | 1469 // Update _unallocated_block if the size is such that chunk would be |
1470 // returned to the indexed free list. All other chunks in the indexed | |
1471 // free lists are allocated from the dictionary so that _unallocated_block | |
1472 // has already been adjusted for them. Do it here so that the cost | |
1473 // for all chunks added back to the indexed free lists. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1474 if (sz < SmallForDictionary) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1475 _bt.allocated(blk->_ptr, sz); |
0 | 1476 } |
1477 // Return the chunk that isn't big enough, and then refill below. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1478 addChunkToFreeLists(blk->_ptr, sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1479 splitBirth(sz); |
0 | 1480 // Don't keep statistics on adding back chunk from a LinAB. |
1481 } else { | |
1482 // A refilled block would not satisfy the request. | |
1483 return NULL; | |
1484 } | |
1485 | |
1486 blk->_ptr = NULL; blk->_word_size = 0; | |
1487 refillLinearAllocBlock(blk); | |
1488 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize, | |
1489 "block was replenished"); | |
1490 if (res != NULL) { | |
1491 splitBirth(size); | |
1492 repairLinearAllocBlock(blk); | |
1493 } else if (blk->_ptr != NULL) { | |
1494 res = blk->_ptr; | |
1495 size_t blk_size = blk->_word_size; | |
1496 blk->_word_size -= size; | |
1497 blk->_ptr += size; | |
1498 splitBirth(size); | |
1499 repairLinearAllocBlock(blk); | |
1500 // Update BOT last so that other (parallel) GC threads see a consistent | |
1501 // view of the BOT and free blocks. | |
1502 // Above must occur before BOT is updated below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1503 OrderAccess::storestore(); |
0 | 1504 _bt.split_block(res, blk_size, size); // adjust block offset table |
1505 } | |
1506 return res; | |
1507 } | |
1508 | |
1509 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder( | |
1510 LinearAllocBlock* blk, | |
1511 size_t size) { | |
1512 assert_locked(); | |
1513 assert(size >= MinChunkSize, "too small"); | |
1514 | |
1515 HeapWord* res = NULL; | |
1516 // This is the common case. Keep it simple. | |
1517 if (blk->_word_size >= size + MinChunkSize) { | |
1518 assert(blk->_ptr != NULL, "consistency check"); | |
1519 res = blk->_ptr; | |
1520 // Note that the BOT is up-to-date for the linAB before allocation. It | |
1521 // indicates the start of the linAB. The split_block() updates the | |
1522 // BOT for the linAB after the allocation (indicates the start of the | |
1523 // next chunk to be allocated). | |
1524 size_t blk_size = blk->_word_size; | |
1525 blk->_word_size -= size; | |
1526 blk->_ptr += size; | |
1527 splitBirth(size); | |
1528 repairLinearAllocBlock(blk); | |
1529 // Update BOT last so that other (parallel) GC threads see a consistent | |
1530 // view of the BOT and free blocks. | |
1531 // Above must occur before BOT is updated below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1532 OrderAccess::storestore(); |
0 | 1533 _bt.split_block(res, blk_size, size); // adjust block offset table |
1534 _bt.allocated(res, size); | |
1535 } | |
1536 return res; | |
1537 } | |
1538 | |
1539 FreeChunk* | |
1540 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) { | |
1541 assert_locked(); | |
1542 assert(size < SmallForDictionary, "just checking"); | |
1543 FreeChunk* res; | |
1544 res = _indexedFreeList[size].getChunkAtHead(); | |
1545 if (res == NULL) { | |
1546 res = getChunkFromIndexedFreeListHelper(size); | |
1547 } | |
1548 _bt.verify_not_unallocated((HeapWord*) res, size); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1549 assert(res == NULL || res->size() == size, "Incorrect block size"); |
0 | 1550 return res; |
1551 } | |
1552 | |
1553 FreeChunk* | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1554 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1555 bool replenish) { |
0 | 1556 assert_locked(); |
1557 FreeChunk* fc = NULL; | |
1558 if (size < SmallForDictionary) { | |
1559 assert(_indexedFreeList[size].head() == NULL || | |
1560 _indexedFreeList[size].surplus() <= 0, | |
1561 "List for this size should be empty or under populated"); | |
1562 // Try best fit in exact lists before replenishing the list | |
1563 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) { | |
1564 // Replenish list. | |
1565 // | |
1566 // Things tried that failed. | |
1567 // Tried allocating out of the two LinAB's first before | |
1568 // replenishing lists. | |
1569 // Tried small linAB of size 256 (size in indexed list) | |
1570 // and replenishing indexed lists from the small linAB. | |
1571 // | |
1572 FreeChunk* newFc = NULL; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1573 const size_t replenish_size = CMSIndexedFreeListReplenish * size; |
0 | 1574 if (replenish_size < SmallForDictionary) { |
1575 // Do not replenish from an underpopulated size. | |
1576 if (_indexedFreeList[replenish_size].surplus() > 0 && | |
1577 _indexedFreeList[replenish_size].head() != NULL) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1578 newFc = _indexedFreeList[replenish_size].getChunkAtHead(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1579 } else if (bestFitFirst()) { |
0 | 1580 newFc = bestFitSmall(replenish_size); |
1581 } | |
1582 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1583 if (newFc == NULL && replenish_size > size) { |
0 | 1584 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1585 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false); |
0 | 1586 } |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1587 // Note: The stats update re split-death of block obtained above |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1588 // will be recorded below precisely when we know we are going to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1589 // be actually splitting it into more than one pieces below. |
0 | 1590 if (newFc != NULL) { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1591 if (replenish || CMSReplenishIntermediate) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1592 // Replenish this list and return one block to caller. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1593 size_t i; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1594 FreeChunk *curFc, *nextFc; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1595 size_t num_blk = newFc->size() / size; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1596 assert(num_blk >= 1, "Smaller than requested?"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1597 assert(newFc->size() % size == 0, "Should be integral multiple of request"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1598 if (num_blk > 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1599 // we are sure we will be splitting the block just obtained |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1600 // into multiple pieces; record the split-death of the original |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1601 splitDeath(replenish_size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1602 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1603 // carve up and link blocks 0, ..., num_blk - 2 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1604 // The last chunk is not added to the lists but is returned as the |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1605 // free chunk. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1606 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1607 i = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1608 i < (num_blk - 1); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1609 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1610 i++) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1611 curFc->setSize(size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1612 // Don't record this as a return in order to try and |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1613 // determine the "returns" from a GC. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1614 _bt.verify_not_unallocated((HeapWord*) fc, size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1615 _indexedFreeList[size].returnChunkAtTail(curFc, false); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1616 _bt.mark_block((HeapWord*)curFc, size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1617 splitBirth(size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1618 // Don't record the initial population of the indexed list |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1619 // as a split birth. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1620 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1621 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1622 // check that the arithmetic was OK above |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1623 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1624 "inconsistency in carving newFc"); |
0 | 1625 curFc->setSize(size); |
1626 _bt.mark_block((HeapWord*)curFc, size); | |
1627 splitBirth(size); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1628 fc = curFc; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1629 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1630 // Return entire block to caller |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1631 fc = newFc; |
0 | 1632 } |
1633 } | |
1634 } | |
1635 } else { | |
1636 // Get a free chunk from the free chunk dictionary to be returned to | |
1637 // replenish the indexed free list. | |
1638 fc = getChunkFromDictionaryExact(size); | |
1639 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1640 // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk"); |
0 | 1641 return fc; |
1642 } | |
1643 | |
1644 FreeChunk* | |
1645 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) { | |
1646 assert_locked(); | |
1647 FreeChunk* fc = _dictionary->getChunk(size); | |
1648 if (fc == NULL) { | |
1649 return NULL; | |
1650 } | |
1651 _bt.allocated((HeapWord*)fc, fc->size()); | |
1652 if (fc->size() >= size + MinChunkSize) { | |
1653 fc = splitChunkAndReturnRemainder(fc, size); | |
1654 } | |
1655 assert(fc->size() >= size, "chunk too small"); | |
1656 assert(fc->size() < size + MinChunkSize, "chunk too big"); | |
1657 _bt.verify_single_block((HeapWord*)fc, fc->size()); | |
1658 return fc; | |
1659 } | |
1660 | |
1661 FreeChunk* | |
1662 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) { | |
1663 assert_locked(); | |
1664 FreeChunk* fc = _dictionary->getChunk(size); | |
1665 if (fc == NULL) { | |
1666 return fc; | |
1667 } | |
1668 _bt.allocated((HeapWord*)fc, fc->size()); | |
1669 if (fc->size() == size) { | |
1670 _bt.verify_single_block((HeapWord*)fc, size); | |
1671 return fc; | |
1672 } | |
1673 assert(fc->size() > size, "getChunk() guarantee"); | |
1674 if (fc->size() < size + MinChunkSize) { | |
1675 // Return the chunk to the dictionary and go get a bigger one. | |
1676 returnChunkToDictionary(fc); | |
1677 fc = _dictionary->getChunk(size + MinChunkSize); | |
1678 if (fc == NULL) { | |
1679 return NULL; | |
1680 } | |
1681 _bt.allocated((HeapWord*)fc, fc->size()); | |
1682 } | |
1683 assert(fc->size() >= size + MinChunkSize, "tautology"); | |
1684 fc = splitChunkAndReturnRemainder(fc, size); | |
1685 assert(fc->size() == size, "chunk is wrong size"); | |
1686 _bt.verify_single_block((HeapWord*)fc, size); | |
1687 return fc; | |
1688 } | |
1689 | |
1690 void | |
1691 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) { | |
1692 assert_locked(); | |
1693 | |
1694 size_t size = chunk->size(); | |
1695 _bt.verify_single_block((HeapWord*)chunk, size); | |
1696 // adjust _unallocated_block downward, as necessary | |
1697 _bt.freed((HeapWord*)chunk, size); | |
1698 _dictionary->returnChunk(chunk); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1699 #ifndef PRODUCT |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1700 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1701 TreeChunk::as_TreeChunk(chunk)->list()->verify_stats(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1702 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1703 #endif // PRODUCT |
0 | 1704 } |
1705 | |
1706 void | |
1707 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) { | |
1708 assert_locked(); | |
1709 size_t size = fc->size(); | |
1710 _bt.verify_single_block((HeapWord*) fc, size); | |
1711 _bt.verify_not_unallocated((HeapWord*) fc, size); | |
1712 if (_adaptive_freelists) { | |
1713 _indexedFreeList[size].returnChunkAtTail(fc); | |
1714 } else { | |
1715 _indexedFreeList[size].returnChunkAtHead(fc); | |
1716 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1717 #ifndef PRODUCT |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1718 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1719 _indexedFreeList[size].verify_stats(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1720 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1721 #endif // PRODUCT |
0 | 1722 } |
1723 | |
1724 // Add chunk to end of last block -- if it's the largest | |
1725 // block -- and update BOT and census data. We would | |
1726 // of course have preferred to coalesce it with the | |
1727 // last block, but it's currently less expensive to find the | |
1728 // largest block than it is to find the last. | |
1729 void | |
1730 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats( | |
1731 HeapWord* chunk, size_t size) { | |
1732 // check that the chunk does lie in this space! | |
1733 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!"); | |
1734 // One of the parallel gc task threads may be here | |
1735 // whilst others are allocating. | |
1736 Mutex* lock = NULL; | |
1737 if (ParallelGCThreads != 0) { | |
1738 lock = &_parDictionaryAllocLock; | |
1739 } | |
1740 FreeChunk* ec; | |
1741 { | |
1742 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
1743 ec = dictionary()->findLargestDict(); // get largest block | |
1744 if (ec != NULL && ec->end() == chunk) { | |
1745 // It's a coterminal block - we can coalesce. | |
1746 size_t old_size = ec->size(); | |
1747 coalDeath(old_size); | |
1748 removeChunkFromDictionary(ec); | |
1749 size += old_size; | |
1750 } else { | |
1751 ec = (FreeChunk*)chunk; | |
1752 } | |
1753 } | |
1754 ec->setSize(size); | |
1755 debug_only(ec->mangleFreed(size)); | |
1756 if (size < SmallForDictionary) { | |
1757 lock = _indexedFreeListParLocks[size]; | |
1758 } | |
1759 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
1760 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true); | |
1761 // record the birth under the lock since the recording involves | |
1762 // manipulation of the list on which the chunk lives and | |
1763 // if the chunk is allocated and is the last on the list, | |
1764 // the list can go away. | |
1765 coalBirth(size); | |
1766 } | |
1767 | |
1768 void | |
1769 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk, | |
1770 size_t size) { | |
1771 // check that the chunk does lie in this space! | |
1772 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!"); | |
1773 assert_locked(); | |
1774 _bt.verify_single_block(chunk, size); | |
1775 | |
1776 FreeChunk* fc = (FreeChunk*) chunk; | |
1777 fc->setSize(size); | |
1778 debug_only(fc->mangleFreed(size)); | |
1779 if (size < SmallForDictionary) { | |
1780 returnChunkToFreeList(fc); | |
1781 } else { | |
1782 returnChunkToDictionary(fc); | |
1783 } | |
1784 } | |
1785 | |
1786 void | |
1787 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk, | |
1788 size_t size, bool coalesced) { | |
1789 assert_locked(); | |
1790 assert(chunk != NULL, "null chunk"); | |
1791 if (coalesced) { | |
1792 // repair BOT | |
1793 _bt.single_block(chunk, size); | |
1794 } | |
1795 addChunkToFreeLists(chunk, size); | |
1796 } | |
1797 | |
1798 // We _must_ find the purported chunk on our free lists; | |
1799 // we assert if we don't. | |
1800 void | |
1801 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) { | |
1802 size_t size = fc->size(); | |
1803 assert_locked(); | |
1804 debug_only(verifyFreeLists()); | |
1805 if (size < SmallForDictionary) { | |
1806 removeChunkFromIndexedFreeList(fc); | |
1807 } else { | |
1808 removeChunkFromDictionary(fc); | |
1809 } | |
1810 _bt.verify_single_block((HeapWord*)fc, size); | |
1811 debug_only(verifyFreeLists()); | |
1812 } | |
1813 | |
1814 void | |
1815 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) { | |
1816 size_t size = fc->size(); | |
1817 assert_locked(); | |
1818 assert(fc != NULL, "null chunk"); | |
1819 _bt.verify_single_block((HeapWord*)fc, size); | |
1820 _dictionary->removeChunk(fc); | |
1821 // adjust _unallocated_block upward, as necessary | |
1822 _bt.allocated((HeapWord*)fc, size); | |
1823 } | |
1824 | |
1825 void | |
1826 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) { | |
1827 assert_locked(); | |
1828 size_t size = fc->size(); | |
1829 _bt.verify_single_block((HeapWord*)fc, size); | |
1830 NOT_PRODUCT( | |
1831 if (FLSVerifyIndexTable) { | |
1832 verifyIndexedFreeList(size); | |
1833 } | |
1834 ) | |
1835 _indexedFreeList[size].removeChunk(fc); | |
1836 debug_only(fc->clearNext()); | |
1837 debug_only(fc->clearPrev()); | |
1838 NOT_PRODUCT( | |
1839 if (FLSVerifyIndexTable) { | |
1840 verifyIndexedFreeList(size); | |
1841 } | |
1842 ) | |
1843 } | |
1844 | |
1845 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) { | |
1846 /* A hint is the next larger size that has a surplus. | |
1847 Start search at a size large enough to guarantee that | |
1848 the excess is >= MIN_CHUNK. */ | |
1849 size_t start = align_object_size(numWords + MinChunkSize); | |
1850 if (start < IndexSetSize) { | |
1851 FreeList* it = _indexedFreeList; | |
1852 size_t hint = _indexedFreeList[start].hint(); | |
1853 while (hint < IndexSetSize) { | |
1854 assert(hint % MinObjAlignment == 0, "hint should be aligned"); | |
1855 FreeList *fl = &_indexedFreeList[hint]; | |
1856 if (fl->surplus() > 0 && fl->head() != NULL) { | |
1857 // Found a list with surplus, reset original hint | |
1858 // and split out a free chunk which is returned. | |
1859 _indexedFreeList[start].set_hint(hint); | |
1860 FreeChunk* res = getFromListGreater(fl, numWords); | |
1861 assert(res == NULL || res->isFree(), | |
1862 "Should be returning a free chunk"); | |
1863 return res; | |
1864 } | |
1865 hint = fl->hint(); /* keep looking */ | |
1866 } | |
1867 /* None found. */ | |
1868 it[start].set_hint(IndexSetSize); | |
1869 } | |
1870 return NULL; | |
1871 } | |
1872 | |
1873 /* Requires fl->size >= numWords + MinChunkSize */ | |
1874 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl, | |
1875 size_t numWords) { | |
1876 FreeChunk *curr = fl->head(); | |
1877 size_t oldNumWords = curr->size(); | |
1878 assert(numWords >= MinChunkSize, "Word size is too small"); | |
1879 assert(curr != NULL, "List is empty"); | |
1880 assert(oldNumWords >= numWords + MinChunkSize, | |
1881 "Size of chunks in the list is too small"); | |
1882 | |
1883 fl->removeChunk(curr); | |
1884 // recorded indirectly by splitChunkAndReturnRemainder - | |
1885 // smallSplit(oldNumWords, numWords); | |
1886 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords); | |
1887 // Does anything have to be done for the remainder in terms of | |
1888 // fixing the card table? | |
1889 assert(new_chunk == NULL || new_chunk->isFree(), | |
1890 "Should be returning a free chunk"); | |
1891 return new_chunk; | |
1892 } | |
1893 | |
1894 FreeChunk* | |
1895 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk, | |
1896 size_t new_size) { | |
1897 assert_locked(); | |
1898 size_t size = chunk->size(); | |
1899 assert(size > new_size, "Split from a smaller block?"); | |
1900 assert(is_aligned(chunk), "alignment problem"); | |
1901 assert(size == adjustObjectSize(size), "alignment problem"); | |
1902 size_t rem_size = size - new_size; | |
1903 assert(rem_size == adjustObjectSize(rem_size), "alignment problem"); | |
1904 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum"); | |
1905 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size); | |
1906 assert(is_aligned(ffc), "alignment problem"); | |
1907 ffc->setSize(rem_size); | |
1908 ffc->linkNext(NULL); | |
1909 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. | |
1910 // Above must occur before BOT is updated below. | |
1911 // adjust block offset table | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1912 OrderAccess::storestore(); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1913 assert(chunk->isFree() && ffc->isFree(), "Error"); |
0 | 1914 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size); |
1915 if (rem_size < SmallForDictionary) { | |
1916 bool is_par = (SharedHeap::heap()->n_par_threads() > 0); | |
1917 if (is_par) _indexedFreeListParLocks[rem_size]->lock(); | |
1918 returnChunkToFreeList(ffc); | |
1919 split(size, rem_size); | |
1920 if (is_par) _indexedFreeListParLocks[rem_size]->unlock(); | |
1921 } else { | |
1922 returnChunkToDictionary(ffc); | |
1923 split(size ,rem_size); | |
1924 } | |
1925 chunk->setSize(new_size); | |
1926 return chunk; | |
1927 } | |
1928 | |
1929 void | |
1930 CompactibleFreeListSpace::sweep_completed() { | |
1931 // Now that space is probably plentiful, refill linear | |
1932 // allocation blocks as needed. | |
1933 refillLinearAllocBlocksIfNeeded(); | |
1934 } | |
1935 | |
1936 void | |
1937 CompactibleFreeListSpace::gc_prologue() { | |
1938 assert_locked(); | |
1939 if (PrintFLSStatistics != 0) { | |
1940 gclog_or_tty->print("Before GC:\n"); | |
1941 reportFreeListStatistics(); | |
1942 } | |
1943 refillLinearAllocBlocksIfNeeded(); | |
1944 } | |
1945 | |
1946 void | |
1947 CompactibleFreeListSpace::gc_epilogue() { | |
1948 assert_locked(); | |
1949 if (PrintGCDetails && Verbose && !_adaptive_freelists) { | |
1950 if (_smallLinearAllocBlock._word_size == 0) | |
1951 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure"); | |
1952 } | |
1953 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); | |
1954 _promoInfo.stopTrackingPromotions(); | |
1955 repairLinearAllocationBlocks(); | |
1956 // Print Space's stats | |
1957 if (PrintFLSStatistics != 0) { | |
1958 gclog_or_tty->print("After GC:\n"); | |
1959 reportFreeListStatistics(); | |
1960 } | |
1961 } | |
1962 | |
1963 // Iteration support, mostly delegated from a CMS generation | |
1964 | |
1965 void CompactibleFreeListSpace::save_marks() { | |
1966 // mark the "end" of the used space at the time of this call; | |
1967 // note, however, that promoted objects from this point | |
1968 // on are tracked in the _promoInfo below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1969 set_saved_mark_word(unallocated_block()); |
0 | 1970 // inform allocator that promotions should be tracked. |
1971 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); | |
1972 _promoInfo.startTrackingPromotions(); | |
1973 } | |
1974 | |
1975 bool CompactibleFreeListSpace::no_allocs_since_save_marks() { | |
1976 assert(_promoInfo.tracking(), "No preceding save_marks?"); | |
1777
179464550c7d
6983930: CMS: Various small cleanups ca September 2010
ysr
parents:
1716
diff
changeset
|
1977 assert(SharedHeap::heap()->n_par_threads() == 0, |
179464550c7d
6983930: CMS: Various small cleanups ca September 2010
ysr
parents:
1716
diff
changeset
|
1978 "Shouldn't be called if using parallel gc."); |
0 | 1979 return _promoInfo.noPromotions(); |
1980 } | |
1981 | |
1982 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
1983 \ | |
1984 void CompactibleFreeListSpace:: \ | |
1985 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ | |
1986 assert(SharedHeap::heap()->n_par_threads() == 0, \ | |
1987 "Shouldn't be called (yet) during parallel part of gc."); \ | |
1988 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \ | |
1989 /* \ | |
1990 * This also restores any displaced headers and removes the elements from \ | |
1991 * the iteration set as they are processed, so that we have a clean slate \ | |
1992 * at the end of the iteration. Note, thus, that if new objects are \ | |
1993 * promoted as a result of the iteration they are iterated over as well. \ | |
1994 */ \ | |
1995 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \ | |
1996 } | |
1997 | |
1998 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN) | |
1999 | |
2000 | |
2001 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) { | |
2002 // ugghh... how would one do this efficiently for a non-contiguous space? | |
2003 guarantee(false, "NYI"); | |
2004 } | |
2005 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2006 bool CompactibleFreeListSpace::linearAllocationWouldFail() const { |
0 | 2007 return _smallLinearAllocBlock._word_size == 0; |
2008 } | |
2009 | |
2010 void CompactibleFreeListSpace::repairLinearAllocationBlocks() { | |
2011 // Fix up linear allocation blocks to look like free blocks | |
2012 repairLinearAllocBlock(&_smallLinearAllocBlock); | |
2013 } | |
2014 | |
2015 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) { | |
2016 assert_locked(); | |
2017 if (blk->_ptr != NULL) { | |
2018 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize, | |
2019 "Minimum block size requirement"); | |
2020 FreeChunk* fc = (FreeChunk*)(blk->_ptr); | |
2021 fc->setSize(blk->_word_size); | |
2022 fc->linkPrev(NULL); // mark as free | |
2023 fc->dontCoalesce(); | |
2024 assert(fc->isFree(), "just marked it free"); | |
2025 assert(fc->cantCoalesce(), "just marked it uncoalescable"); | |
2026 } | |
2027 } | |
2028 | |
2029 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() { | |
2030 assert_locked(); | |
2031 if (_smallLinearAllocBlock._ptr == NULL) { | |
2032 assert(_smallLinearAllocBlock._word_size == 0, | |
2033 "Size of linAB should be zero if the ptr is NULL"); | |
2034 // Reset the linAB refill and allocation size limit. | |
2035 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc); | |
2036 } | |
2037 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock); | |
2038 } | |
2039 | |
2040 void | |
2041 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) { | |
2042 assert_locked(); | |
2043 assert((blk->_ptr == NULL && blk->_word_size == 0) || | |
2044 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize), | |
2045 "blk invariant"); | |
2046 if (blk->_ptr == NULL) { | |
2047 refillLinearAllocBlock(blk); | |
2048 } | |
2049 if (PrintMiscellaneous && Verbose) { | |
2050 if (blk->_word_size == 0) { | |
2051 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure"); | |
2052 } | |
2053 } | |
2054 } | |
2055 | |
2056 void | |
2057 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) { | |
2058 assert_locked(); | |
2059 assert(blk->_word_size == 0 && blk->_ptr == NULL, | |
2060 "linear allocation block should be empty"); | |
2061 FreeChunk* fc; | |
2062 if (blk->_refillSize < SmallForDictionary && | |
2063 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) { | |
2064 // A linAB's strategy might be to use small sizes to reduce | |
2065 // fragmentation but still get the benefits of allocation from a | |
2066 // linAB. | |
2067 } else { | |
2068 fc = getChunkFromDictionary(blk->_refillSize); | |
2069 } | |
2070 if (fc != NULL) { | |
2071 blk->_ptr = (HeapWord*)fc; | |
2072 blk->_word_size = fc->size(); | |
2073 fc->dontCoalesce(); // to prevent sweeper from sweeping us up | |
2074 } | |
2075 } | |
2076 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2077 // Support for concurrent collection policy decisions. |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2078 bool CompactibleFreeListSpace::should_concurrent_collect() const { |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2079 // In the future we might want to add in frgamentation stats -- |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2080 // including erosion of the "mountain" into this decision as well. |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2081 return !adaptive_freelists() && linearAllocationWouldFail(); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2082 } |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2083 |
0 | 2084 // Support for compaction |
2085 | |
2086 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { | |
2087 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); | |
2088 // prepare_for_compaction() uses the space between live objects | |
2089 // so that later phase can skip dead space quickly. So verification | |
2090 // of the free lists doesn't work after. | |
2091 } | |
2092 | |
2093 #define obj_size(q) adjustObjectSize(oop(q)->size()) | |
2094 #define adjust_obj_size(s) adjustObjectSize(s) | |
2095 | |
2096 void CompactibleFreeListSpace::adjust_pointers() { | |
2097 // In other versions of adjust_pointers(), a bail out | |
2098 // based on the amount of live data in the generation | |
2099 // (i.e., if 0, bail out) may be used. | |
2100 // Cannot test used() == 0 here because the free lists have already | |
2101 // been mangled by the compaction. | |
2102 | |
2103 SCAN_AND_ADJUST_POINTERS(adjust_obj_size); | |
2104 // See note about verification in prepare_for_compaction(). | |
2105 } | |
2106 | |
2107 void CompactibleFreeListSpace::compact() { | |
2108 SCAN_AND_COMPACT(obj_size); | |
2109 } | |
2110 | |
2111 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] | |
2112 // where fbs is free block sizes | |
2113 double CompactibleFreeListSpace::flsFrag() const { | |
2114 size_t itabFree = totalSizeInIndexedFreeLists(); | |
2115 double frag = 0.0; | |
2116 size_t i; | |
2117 | |
2118 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2119 double sz = i; | |
2120 frag += _indexedFreeList[i].count() * (sz * sz); | |
2121 } | |
2122 | |
2123 double totFree = itabFree + | |
2124 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())); | |
2125 if (totFree > 0) { | |
2126 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) / | |
2127 (totFree * totFree)); | |
2128 frag = (double)1.0 - frag; | |
2129 } else { | |
2130 assert(frag == 0.0, "Follows from totFree == 0"); | |
2131 } | |
2132 return frag; | |
2133 } | |
2134 | |
2135 void CompactibleFreeListSpace::beginSweepFLCensus( | |
2136 float inter_sweep_current, | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2137 float inter_sweep_estimate, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2138 float intra_sweep_estimate) { |
0 | 2139 assert_locked(); |
2140 size_t i; | |
2141 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2142 FreeList* fl = &_indexedFreeList[i]; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2143 if (PrintFLSStatistics > 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2144 gclog_or_tty->print("size[%d] : ", i); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2145 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2146 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2147 fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent)); |
0 | 2148 fl->set_beforeSweep(fl->count()); |
2149 fl->set_bfrSurp(fl->surplus()); | |
2150 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2151 _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent, |
0 | 2152 inter_sweep_current, |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2153 inter_sweep_estimate, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2154 intra_sweep_estimate); |
0 | 2155 } |
2156 | |
2157 void CompactibleFreeListSpace::setFLSurplus() { | |
2158 assert_locked(); | |
2159 size_t i; | |
2160 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2161 FreeList *fl = &_indexedFreeList[i]; | |
2162 fl->set_surplus(fl->count() - | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2163 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent)); |
0 | 2164 } |
2165 } | |
2166 | |
2167 void CompactibleFreeListSpace::setFLHints() { | |
2168 assert_locked(); | |
2169 size_t i; | |
2170 size_t h = IndexSetSize; | |
2171 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { | |
2172 FreeList *fl = &_indexedFreeList[i]; | |
2173 fl->set_hint(h); | |
2174 if (fl->surplus() > 0) { | |
2175 h = i; | |
2176 } | |
2177 } | |
2178 } | |
2179 | |
2180 void CompactibleFreeListSpace::clearFLCensus() { | |
2181 assert_locked(); | |
2182 int i; | |
2183 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2184 FreeList *fl = &_indexedFreeList[i]; | |
2185 fl->set_prevSweep(fl->count()); | |
2186 fl->set_coalBirths(0); | |
2187 fl->set_coalDeaths(0); | |
2188 fl->set_splitBirths(0); | |
2189 fl->set_splitDeaths(0); | |
2190 } | |
2191 } | |
2192 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2193 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2194 if (PrintFLSStatistics > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2195 HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2196 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2197 largestAddr); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2198 } |
0 | 2199 setFLSurplus(); |
2200 setFLHints(); | |
2201 if (PrintGC && PrintFLSCensus > 0) { | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2202 printFLCensus(sweep_count); |
0 | 2203 } |
2204 clearFLCensus(); | |
2205 assert_locked(); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2206 _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent); |
0 | 2207 } |
2208 | |
2209 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) { | |
2210 if (size < SmallForDictionary) { | |
2211 FreeList *fl = &_indexedFreeList[size]; | |
2212 return (fl->coalDesired() < 0) || | |
2213 ((int)fl->count() > fl->coalDesired()); | |
2214 } else { | |
2215 return dictionary()->coalDictOverPopulated(size); | |
2216 } | |
2217 } | |
2218 | |
2219 void CompactibleFreeListSpace::smallCoalBirth(size_t size) { | |
2220 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2221 FreeList *fl = &_indexedFreeList[size]; | |
2222 fl->increment_coalBirths(); | |
2223 fl->increment_surplus(); | |
2224 } | |
2225 | |
2226 void CompactibleFreeListSpace::smallCoalDeath(size_t size) { | |
2227 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2228 FreeList *fl = &_indexedFreeList[size]; | |
2229 fl->increment_coalDeaths(); | |
2230 fl->decrement_surplus(); | |
2231 } | |
2232 | |
2233 void CompactibleFreeListSpace::coalBirth(size_t size) { | |
2234 if (size < SmallForDictionary) { | |
2235 smallCoalBirth(size); | |
2236 } else { | |
2237 dictionary()->dictCensusUpdate(size, | |
2238 false /* split */, | |
2239 true /* birth */); | |
2240 } | |
2241 } | |
2242 | |
2243 void CompactibleFreeListSpace::coalDeath(size_t size) { | |
2244 if(size < SmallForDictionary) { | |
2245 smallCoalDeath(size); | |
2246 } else { | |
2247 dictionary()->dictCensusUpdate(size, | |
2248 false /* split */, | |
2249 false /* birth */); | |
2250 } | |
2251 } | |
2252 | |
2253 void CompactibleFreeListSpace::smallSplitBirth(size_t size) { | |
2254 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2255 FreeList *fl = &_indexedFreeList[size]; | |
2256 fl->increment_splitBirths(); | |
2257 fl->increment_surplus(); | |
2258 } | |
2259 | |
2260 void CompactibleFreeListSpace::smallSplitDeath(size_t size) { | |
2261 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2262 FreeList *fl = &_indexedFreeList[size]; | |
2263 fl->increment_splitDeaths(); | |
2264 fl->decrement_surplus(); | |
2265 } | |
2266 | |
2267 void CompactibleFreeListSpace::splitBirth(size_t size) { | |
2268 if (size < SmallForDictionary) { | |
2269 smallSplitBirth(size); | |
2270 } else { | |
2271 dictionary()->dictCensusUpdate(size, | |
2272 true /* split */, | |
2273 true /* birth */); | |
2274 } | |
2275 } | |
2276 | |
2277 void CompactibleFreeListSpace::splitDeath(size_t size) { | |
2278 if (size < SmallForDictionary) { | |
2279 smallSplitDeath(size); | |
2280 } else { | |
2281 dictionary()->dictCensusUpdate(size, | |
2282 true /* split */, | |
2283 false /* birth */); | |
2284 } | |
2285 } | |
2286 | |
2287 void CompactibleFreeListSpace::split(size_t from, size_t to1) { | |
2288 size_t to2 = from - to1; | |
2289 splitDeath(from); | |
2290 splitBirth(to1); | |
2291 splitBirth(to2); | |
2292 } | |
2293 | |
2294 void CompactibleFreeListSpace::print() const { | |
1952
4df7f8cba524
6996613: CompactibleFreeListSpace::print should call CompactibleFreeListSpace::print_on, not Space::print_on
ysr
parents:
1951
diff
changeset
|
2295 print_on(tty); |
0 | 2296 } |
2297 | |
2298 void CompactibleFreeListSpace::prepare_for_verify() { | |
2299 assert_locked(); | |
2300 repairLinearAllocationBlocks(); | |
2301 // Verify that the SpoolBlocks look like free blocks of | |
2302 // appropriate sizes... To be done ... | |
2303 } | |
2304 | |
2305 class VerifyAllBlksClosure: public BlkClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2306 private: |
0 | 2307 const CompactibleFreeListSpace* _sp; |
2308 const MemRegion _span; | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2309 HeapWord* _last_addr; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2310 size_t _last_size; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2311 bool _last_was_obj; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2312 bool _last_was_live; |
0 | 2313 |
2314 public: | |
2315 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp, | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2316 MemRegion span) : _sp(sp), _span(span), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2317 _last_addr(NULL), _last_size(0), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2318 _last_was_obj(false), _last_was_live(false) { } |
0 | 2319 |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2320 virtual size_t do_blk(HeapWord* addr) { |
0 | 2321 size_t res; |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2322 bool was_obj = false; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2323 bool was_live = false; |
0 | 2324 if (_sp->block_is_obj(addr)) { |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2325 was_obj = true; |
0 | 2326 oop p = oop(addr); |
2327 guarantee(p->is_oop(), "Should be an oop"); | |
2328 res = _sp->adjustObjectSize(p->size()); | |
2329 if (_sp->obj_is_alive(addr)) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2330 was_live = true; |
0 | 2331 p->verify(); |
2332 } | |
2333 } else { | |
2334 FreeChunk* fc = (FreeChunk*)addr; | |
2335 res = fc->size(); | |
2336 if (FLSVerifyLists && !fc->cantCoalesce()) { | |
2337 guarantee(_sp->verifyChunkInFreeLists(fc), | |
2338 "Chunk should be on a free list"); | |
2339 } | |
2340 } | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2341 if (res == 0) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2342 gclog_or_tty->print_cr("Livelock: no rank reduction!"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2343 gclog_or_tty->print_cr( |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2344 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n" |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2345 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n", |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2346 addr, res, was_obj ?"true":"false", was_live ?"true":"false", |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2347 _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2348 _sp->print_on(gclog_or_tty); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2349 guarantee(false, "Seppuku!"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2350 } |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2351 _last_addr = addr; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2352 _last_size = res; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2353 _last_was_obj = was_obj; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2354 _last_was_live = was_live; |
0 | 2355 return res; |
2356 } | |
2357 }; | |
2358 | |
2359 class VerifyAllOopsClosure: public OopClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2360 private: |
0 | 2361 const CMSCollector* _collector; |
2362 const CompactibleFreeListSpace* _sp; | |
2363 const MemRegion _span; | |
2364 const bool _past_remark; | |
2365 const CMSBitMap* _bit_map; | |
2366 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2367 protected: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2368 void do_oop(void* p, oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2369 if (_span.contains(obj)) { // the interior oop points into CMS heap |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2370 if (!_span.contains(p)) { // reference from outside CMS heap |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2371 // Should be a valid object; the first disjunct below allows |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2372 // us to sidestep an assertion in block_is_obj() that insists |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2373 // that p be in _sp. Note that several generations (and spaces) |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2374 // are spanned by _span (CMS heap) above. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2375 guarantee(!_sp->is_in_reserved(obj) || |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2376 _sp->block_is_obj((HeapWord*)obj), |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2377 "Should be an object"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2378 guarantee(obj->is_oop(), "Should be an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2379 obj->verify(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2380 if (_past_remark) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2381 // Remark has been completed, the object should be marked |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2382 _bit_map->isMarked((HeapWord*)obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2383 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2384 } else { // reference within CMS heap |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2385 if (_past_remark) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2386 // Remark has been completed -- so the referent should have |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2387 // been marked, if referring object is. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2388 if (_bit_map->isMarked(_collector->block_start(p))) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2389 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2390 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2391 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2392 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2393 } else if (_sp->is_in_reserved(p)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2394 // the reference is from FLS, and points out of FLS |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2395 guarantee(obj->is_oop(), "Should be an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2396 obj->verify(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2397 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2398 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2399 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2400 template <class T> void do_oop_work(T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2401 T heap_oop = oopDesc::load_heap_oop(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2402 if (!oopDesc::is_null(heap_oop)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2403 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2404 do_oop(p, obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2405 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2406 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2407 |
0 | 2408 public: |
2409 VerifyAllOopsClosure(const CMSCollector* collector, | |
2410 const CompactibleFreeListSpace* sp, MemRegion span, | |
2411 bool past_remark, CMSBitMap* bit_map) : | |
2412 OopClosure(), _collector(collector), _sp(sp), _span(span), | |
2413 _past_remark(past_remark), _bit_map(bit_map) { } | |
2414 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2415 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2416 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); } |
0 | 2417 }; |
2418 | |
2419 void CompactibleFreeListSpace::verify(bool ignored) const { | |
2420 assert_lock_strong(&_freelistLock); | |
2421 verify_objects_initialized(); | |
2422 MemRegion span = _collector->_span; | |
2423 bool past_remark = (_collector->abstract_state() == | |
2424 CMSCollector::Sweeping); | |
2425 | |
2426 ResourceMark rm; | |
2427 HandleMark hm; | |
2428 | |
2429 // Check integrity of CFL data structures | |
2430 _promoInfo.verify(); | |
2431 _dictionary->verify(); | |
2432 if (FLSVerifyIndexTable) { | |
2433 verifyIndexedFreeLists(); | |
2434 } | |
2435 // Check integrity of all objects and free blocks in space | |
2436 { | |
2437 VerifyAllBlksClosure cl(this, span); | |
2438 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const | |
2439 } | |
2440 // Check that all references in the heap to FLS | |
2441 // are to valid objects in FLS or that references in | |
2442 // FLS are to valid objects elsewhere in the heap | |
2443 if (FLSVerifyAllHeapReferences) | |
2444 { | |
2445 VerifyAllOopsClosure cl(_collector, this, span, past_remark, | |
2446 _collector->markBitMap()); | |
2447 CollectedHeap* ch = Universe::heap(); | |
2448 ch->oop_iterate(&cl); // all oops in generations | |
2449 ch->permanent_oop_iterate(&cl); // all oops in perm gen | |
2450 } | |
2451 | |
2452 if (VerifyObjectStartArray) { | |
2453 // Verify the block offset table | |
2454 _bt.verify(); | |
2455 } | |
2456 } | |
2457 | |
2458 #ifndef PRODUCT | |
2459 void CompactibleFreeListSpace::verifyFreeLists() const { | |
2460 if (FLSVerifyLists) { | |
2461 _dictionary->verify(); | |
2462 verifyIndexedFreeLists(); | |
2463 } else { | |
2464 if (FLSVerifyDictionary) { | |
2465 _dictionary->verify(); | |
2466 } | |
2467 if (FLSVerifyIndexTable) { | |
2468 verifyIndexedFreeLists(); | |
2469 } | |
2470 } | |
2471 } | |
2472 #endif | |
2473 | |
2474 void CompactibleFreeListSpace::verifyIndexedFreeLists() const { | |
2475 size_t i = 0; | |
2476 for (; i < MinChunkSize; i++) { | |
2477 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL"); | |
2478 } | |
2479 for (; i < IndexSetSize; i++) { | |
2480 verifyIndexedFreeList(i); | |
2481 } | |
2482 } | |
2483 | |
2484 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2485 FreeChunk* fc = _indexedFreeList[size].head(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2486 FreeChunk* tail = _indexedFreeList[size].tail(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2487 size_t num = _indexedFreeList[size].count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2488 size_t n = 0; |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
2489 guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2490 for (; fc != NULL; fc = fc->next(), n++) { |
0 | 2491 guarantee(fc->size() == size, "Size inconsistency"); |
2492 guarantee(fc->isFree(), "!free?"); | |
2493 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list"); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2494 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail"); |
0 | 2495 } |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2496 guarantee(n == num, "Incorrect count"); |
0 | 2497 } |
2498 | |
2499 #ifndef PRODUCT | |
2500 void CompactibleFreeListSpace::checkFreeListConsistency() const { | |
2501 assert(_dictionary->minSize() <= IndexSetSize, | |
2502 "Some sizes can't be allocated without recourse to" | |
2503 " linear allocation buffers"); | |
2504 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk), | |
2505 "else MIN_TREE_CHUNK_SIZE is wrong"); | |
2506 assert((IndexSetStride == 2 && IndexSetStart == 2) || | |
2507 (IndexSetStride == 1 && IndexSetStart == 1), "just checking"); | |
2508 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0), | |
2509 "Some for-loops may be incorrectly initialized"); | |
2510 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1), | |
2511 "For-loops that iterate over IndexSet with stride 2 may be wrong"); | |
2512 } | |
2513 #endif | |
2514 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2515 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const { |
0 | 2516 assert_lock_strong(&_freelistLock); |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2517 FreeList total; |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2518 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2519 FreeList::print_labels_on(gclog_or_tty, "size"); |
0 | 2520 size_t totalFree = 0; |
2521 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2522 const FreeList *fl = &_indexedFreeList[i]; | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2523 totalFree += fl->count() * fl->size(); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2524 if (i % (40*IndexSetStride) == 0) { |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2525 FreeList::print_labels_on(gclog_or_tty, "size"); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2526 } |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2527 fl->print_on(gclog_or_tty); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2528 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2529 total.set_surplus( total.surplus() + fl->surplus() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2530 total.set_desired( total.desired() + fl->desired() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2531 total.set_prevSweep( total.prevSweep() + fl->prevSweep() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2532 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep()); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2533 total.set_count( total.count() + fl->count() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2534 total.set_coalBirths( total.coalBirths() + fl->coalBirths() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2535 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2536 total.set_splitBirths(total.splitBirths() + fl->splitBirths()); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2537 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths()); |
0 | 2538 } |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2539 total.print_on(gclog_or_tty, "TOTAL"); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2540 gclog_or_tty->print_cr("Total free in indexed lists " |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2541 SIZE_FORMAT " words", totalFree); |
0 | 2542 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n", |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2543 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/ |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2544 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0), |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2545 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0)); |
0 | 2546 _dictionary->printDictCensus(); |
2547 } | |
2548 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2549 /////////////////////////////////////////////////////////////////////////// |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2550 // CFLS_LAB |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2551 /////////////////////////////////////////////////////////////////////////// |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2552 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2553 #define VECTOR_257(x) \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2554 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2555 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2556 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2557 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2558 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2559 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2560 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2561 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2562 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2563 x } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2564 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2565 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2566 // OldPLABSize, whose static default is different; if overridden at the |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2567 // command-line, this will get reinitialized via a call to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2568 // modify_initialization() below. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2569 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] = |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2570 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim)); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2571 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2572 int CFLS_LAB::_global_num_workers[] = VECTOR_257(0); |
0 | 2573 |
2574 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) : | |
2575 _cfls(cfls) | |
2576 { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2577 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above"); |
0 | 2578 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
2579 i < CompactibleFreeListSpace::IndexSetSize; | |
2580 i += CompactibleFreeListSpace::IndexSetStride) { | |
2581 _indexedFreeList[i].set_size(i); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2582 _num_blocks[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2583 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2584 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2585 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2586 static bool _CFLS_LAB_modified = false; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2587 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2588 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2589 assert(!_CFLS_LAB_modified, "Call only once"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2590 _CFLS_LAB_modified = true; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2591 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2592 i < CompactibleFreeListSpace::IndexSetSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2593 i += CompactibleFreeListSpace::IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2594 _blocks_to_claim[i].modify(n, wt, true /* force */); |
0 | 2595 } |
2596 } | |
2597 | |
2598 HeapWord* CFLS_LAB::alloc(size_t word_sz) { | |
2599 FreeChunk* res; | |
1777
179464550c7d
6983930: CMS: Various small cleanups ca September 2010
ysr
parents:
1716
diff
changeset
|
2600 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error"); |
0 | 2601 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) { |
2602 // This locking manages sync with other large object allocations. | |
2603 MutexLockerEx x(_cfls->parDictionaryAllocLock(), | |
2604 Mutex::_no_safepoint_check_flag); | |
2605 res = _cfls->getChunkFromDictionaryExact(word_sz); | |
2606 if (res == NULL) return NULL; | |
2607 } else { | |
2608 FreeList* fl = &_indexedFreeList[word_sz]; | |
2609 if (fl->count() == 0) { | |
2610 // Attempt to refill this local free list. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2611 get_from_global_pool(word_sz, fl); |
0 | 2612 // If it didn't work, give up. |
2613 if (fl->count() == 0) return NULL; | |
2614 } | |
2615 res = fl->getChunkAtHead(); | |
2616 assert(res != NULL, "Why was count non-zero?"); | |
2617 } | |
2618 res->markNotFree(); | |
2619 assert(!res->isFree(), "shouldn't be marked free"); | |
187 | 2620 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized"); |
0 | 2621 // mangle a just allocated object with a distinct pattern. |
2622 debug_only(res->mangleAllocated(word_sz)); | |
2623 return (HeapWord*)res; | |
2624 } | |
2625 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2626 // Get a chunk of blocks of the right size and update related |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2627 // book-keeping stats |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2628 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2629 // Get the #blocks we want to claim |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2630 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2631 assert(n_blks > 0, "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2632 assert(ResizePLAB || n_blks == OldPLABSize, "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2633 // In some cases, when the application has a phase change, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2634 // there may be a sudden and sharp shift in the object survival |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2635 // profile, and updating the counts at the end of a scavenge |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2636 // may not be quick enough, giving rise to large scavenge pauses |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2637 // during these phase changes. It is beneficial to detect such |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2638 // changes on-the-fly during a scavenge and avoid such a phase-change |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2639 // pothole. The following code is a heuristic attempt to do that. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2640 // It is protected by a product flag until we have gained |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2641 // enough experience with this heuristic and fine-tuned its behaviour. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2642 // WARNING: This might increase fragmentation if we overreact to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2643 // small spikes, so some kind of historical smoothing based on |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2644 // previous experience with the greater reactivity might be useful. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2645 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2646 // default. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2647 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2648 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2649 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2650 n_blks = MIN2(n_blks, CMSOldPLABMax); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2651 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2652 assert(n_blks > 0, "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2653 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2654 // Update stats table entry for this block size |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2655 _num_blocks[word_sz] += fl->count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2656 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2657 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2658 void CFLS_LAB::compute_desired_plab_size() { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2659 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
0 | 2660 i < CompactibleFreeListSpace::IndexSetSize; |
2661 i += CompactibleFreeListSpace::IndexSetStride) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2662 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2663 "Counter inconsistency"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2664 if (_global_num_workers[i] > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2665 // Need to smooth wrt historical average |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2666 if (ResizeOldPLAB) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2667 _blocks_to_claim[i].sample( |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2668 MAX2((size_t)CMSOldPLABMin, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2669 MIN2((size_t)CMSOldPLABMax, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2670 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills)))); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2671 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2672 // Reset counters for next round |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2673 _global_num_workers[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2674 _global_num_blocks[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2675 if (PrintOldPLAB) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2676 gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2677 } |
0 | 2678 } |
2679 } | |
2680 } | |
2681 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2682 void CFLS_LAB::retire(int tid) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2683 // We run this single threaded with the world stopped; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2684 // so no need for locks and such. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2685 #define CFLS_LAB_PARALLEL_ACCESS 0 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2686 NOT_PRODUCT(Thread* t = Thread::current();) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2687 assert(Thread::current()->is_VM_thread(), "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2688 assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2689 "Will access to uninitialized slot below"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2690 #if CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2691 for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2692 i > 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2693 i -= CompactibleFreeListSpace::IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2694 #else // CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2695 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2696 i < CompactibleFreeListSpace::IndexSetSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2697 i += CompactibleFreeListSpace::IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2698 #endif // !CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2699 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2700 "Can't retire more than what we obtained"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2701 if (_num_blocks[i] > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2702 size_t num_retire = _indexedFreeList[i].count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2703 assert(_num_blocks[i] > num_retire, "Should have used at least one"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2704 { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2705 #if CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2706 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i], |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2707 Mutex::_no_safepoint_check_flag); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2708 #endif // CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2709 // Update globals stats for num_blocks used |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2710 _global_num_blocks[i] += (_num_blocks[i] - num_retire); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2711 _global_num_workers[i]++; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2712 assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2713 if (num_retire > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2714 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2715 // Reset this list. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2716 _indexedFreeList[i] = FreeList(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2717 _indexedFreeList[i].set_size(i); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2718 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2719 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2720 if (PrintOldPLAB) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2721 gclog_or_tty->print_cr("%d[%d]: %d/%d/%d", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2722 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2723 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2724 // Reset stats for next round |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2725 _num_blocks[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2726 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2727 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2728 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2729 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2730 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) { |
0 | 2731 assert(fl->count() == 0, "Precondition."); |
2732 assert(word_sz < CompactibleFreeListSpace::IndexSetSize, | |
2733 "Precondition"); | |
2734 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2735 // We'll try all multiples of word_sz in the indexed set, starting with |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2736 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2737 // then try getting a big chunk and splitting it. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2738 { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2739 bool found; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2740 int k; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2741 size_t cur_sz; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2742 for (k = 1, cur_sz = k * word_sz, found = false; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2743 (cur_sz < CompactibleFreeListSpace::IndexSetSize) && |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2744 (CMSSplitIndexedFreeListBlocks || k <= 1); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2745 k++, cur_sz = k * word_sz) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2746 FreeList fl_for_cur_sz; // Empty. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2747 fl_for_cur_sz.set_size(cur_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2748 { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2749 MutexLockerEx x(_indexedFreeListParLocks[cur_sz], |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2750 Mutex::_no_safepoint_check_flag); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2751 FreeList* gfl = &_indexedFreeList[cur_sz]; |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2752 if (gfl->count() != 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2753 // nn is the number of chunks of size cur_sz that |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2754 // we'd need to split k-ways each, in order to create |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2755 // "n" chunks of size word_sz each. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2756 const size_t nn = MAX2(n/k, (size_t)1); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2757 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2758 found = true; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2759 if (k > 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2760 // Update split death stats for the cur_sz-size blocks list: |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2761 // we increment the split death count by the number of blocks |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2762 // we just took from the cur_sz-size blocks list and which |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2763 // we will be splitting below. |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2764 ssize_t deaths = gfl->splitDeaths() + |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2765 fl_for_cur_sz.count(); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2766 gfl->set_splitDeaths(deaths); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2767 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2768 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2769 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2770 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2771 if (found) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2772 if (k == 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2773 fl->prepend(&fl_for_cur_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2774 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2775 // Divide each block on fl_for_cur_sz up k ways. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2776 FreeChunk* fc; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2777 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2778 // Must do this in reverse order, so that anybody attempting to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2779 // access the main chunk sees it as a single free block until we |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2780 // change it. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2781 size_t fc_size = fc->size(); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2782 assert(fc->isFree(), "Error"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2783 for (int i = k-1; i >= 0; i--) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2784 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2785 assert((i != 0) || |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2786 ((fc == ffc) && ffc->isFree() && |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2787 (ffc->size() == k*word_sz) && (fc_size == word_sz)), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2788 "Counting error"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2789 ffc->setSize(word_sz); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2790 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2791 ffc->linkNext(NULL); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2792 // Above must occur before BOT is updated below. |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2793 OrderAccess::storestore(); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2794 // splitting from the right, fc_size == i * word_sz |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2795 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2796 fc_size -= word_sz; |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2797 assert(fc_size == i*word_sz, "Error"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2798 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2799 _bt.verify_single_block((HeapWord*)fc, fc_size); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2800 _bt.verify_single_block((HeapWord*)ffc, word_sz); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2801 // Push this on "fl". |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2802 fl->returnChunkAtHead(ffc); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2803 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2804 // TRAP |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2805 assert(fl->tail()->next() == NULL, "List invariant."); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2806 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2807 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2808 // Update birth stats for this block size. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2809 size_t num = fl->count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2810 MutexLockerEx x(_indexedFreeListParLocks[word_sz], |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2811 Mutex::_no_safepoint_check_flag); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2812 ssize_t births = _indexedFreeList[word_sz].splitBirths() + num; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2813 _indexedFreeList[word_sz].set_splitBirths(births); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2814 return; |
0 | 2815 } |
2816 } | |
2817 } | |
2818 // Otherwise, we'll split a block from the dictionary. | |
2819 FreeChunk* fc = NULL; | |
2820 FreeChunk* rem_fc = NULL; | |
2821 size_t rem; | |
2822 { | |
2823 MutexLockerEx x(parDictionaryAllocLock(), | |
2824 Mutex::_no_safepoint_check_flag); | |
2825 while (n > 0) { | |
2826 fc = dictionary()->getChunk(MAX2(n * word_sz, | |
2827 _dictionary->minSize()), | |
2828 FreeBlockDictionary::atLeast); | |
2829 if (fc != NULL) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2830 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk |
0 | 2831 dictionary()->dictCensusUpdate(fc->size(), |
2832 true /*split*/, | |
2833 false /*birth*/); | |
2834 break; | |
2835 } else { | |
2836 n--; | |
2837 } | |
2838 } | |
2839 if (fc == NULL) return; | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2840 // Otherwise, split up that block. |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2841 assert((ssize_t)n >= 1, "Control point invariant"); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2842 assert(fc->isFree(), "Error: should be a free block"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2843 _bt.verify_single_block((HeapWord*)fc, fc->size()); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2844 const size_t nn = fc->size() / word_sz; |
0 | 2845 n = MIN2(nn, n); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2846 assert((ssize_t)n >= 1, "Control point invariant"); |
0 | 2847 rem = fc->size() - n * word_sz; |
2848 // If there is a remainder, and it's too small, allocate one fewer. | |
2849 if (rem > 0 && rem < MinChunkSize) { | |
2850 n--; rem += word_sz; | |
2851 } | |
1148
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2852 // Note that at this point we may have n == 0. |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2853 assert((ssize_t)n >= 0, "Control point invariant"); |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2854 |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2855 // If n is 0, the chunk fc that was found is not large |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2856 // enough to leave a viable remainder. We are unable to |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2857 // allocate even one block. Return fc to the |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2858 // dictionary and return, leaving "fl" empty. |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2859 if (n == 0) { |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2860 returnChunkToDictionary(fc); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2861 assert(fl->count() == 0, "We never allocated any blocks"); |
1148
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2862 return; |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2863 } |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2864 |
0 | 2865 // First return the remainder, if any. |
2866 // Note that we hold the lock until we decide if we're going to give | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2867 // back the remainder to the dictionary, since a concurrent allocation |
0 | 2868 // may otherwise see the heap as empty. (We're willing to take that |
2869 // hit if the block is a small block.) | |
2870 if (rem > 0) { | |
2871 size_t prefix_size = n * word_sz; | |
2872 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size); | |
2873 rem_fc->setSize(rem); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2874 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
0 | 2875 rem_fc->linkNext(NULL); |
2876 // Above must occur before BOT is updated below. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2877 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error"); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2878 OrderAccess::storestore(); |
0 | 2879 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2880 assert(fc->isFree(), "Error"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2881 fc->setSize(prefix_size); |
0 | 2882 if (rem >= IndexSetSize) { |
2883 returnChunkToDictionary(rem_fc); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2884 dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/); |
0 | 2885 rem_fc = NULL; |
2886 } | |
2887 // Otherwise, return it to the small list below. | |
2888 } | |
2889 } | |
2890 if (rem_fc != NULL) { | |
2891 MutexLockerEx x(_indexedFreeListParLocks[rem], | |
2892 Mutex::_no_safepoint_check_flag); | |
2893 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size()); | |
2894 _indexedFreeList[rem].returnChunkAtHead(rem_fc); | |
2895 smallSplitBirth(rem); | |
2896 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2897 assert((ssize_t)n > 0 && fc != NULL, "Consistency"); |
0 | 2898 // Now do the splitting up. |
2899 // Must do this in reverse order, so that anybody attempting to | |
2900 // access the main chunk sees it as a single free block until we | |
2901 // change it. | |
2902 size_t fc_size = n * word_sz; | |
2903 // All but first chunk in this loop | |
2904 for (ssize_t i = n-1; i > 0; i--) { | |
2905 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); | |
2906 ffc->setSize(word_sz); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2907 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
0 | 2908 ffc->linkNext(NULL); |
2909 // Above must occur before BOT is updated below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2910 OrderAccess::storestore(); |
0 | 2911 // splitting from the right, fc_size == (n - i + 1) * wordsize |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2912 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */); |
0 | 2913 fc_size -= word_sz; |
2914 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); | |
2915 _bt.verify_single_block((HeapWord*)ffc, ffc->size()); | |
2916 _bt.verify_single_block((HeapWord*)fc, fc_size); | |
2917 // Push this on "fl". | |
2918 fl->returnChunkAtHead(ffc); | |
2919 } | |
2920 // First chunk | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2921 assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2922 // The blocks above should show their new sizes before the first block below |
0 | 2923 fc->setSize(word_sz); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2924 fc->linkPrev(NULL); // idempotent wrt free-ness, see assert above |
0 | 2925 fc->linkNext(NULL); |
2926 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); | |
2927 _bt.verify_single_block((HeapWord*)fc, fc->size()); | |
2928 fl->returnChunkAtHead(fc); | |
2929 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2930 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks"); |
0 | 2931 { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2932 // Update the stats for this block size. |
0 | 2933 MutexLockerEx x(_indexedFreeListParLocks[word_sz], |
2934 Mutex::_no_safepoint_check_flag); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2935 const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2936 _indexedFreeList[word_sz].set_splitBirths(births); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2937 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2938 // _indexedFreeList[word_sz].set_surplus(new_surplus); |
0 | 2939 } |
2940 | |
2941 // TRAP | |
2942 assert(fl->tail()->next() == NULL, "List invariant."); | |
2943 } | |
2944 | |
2945 // Set up the space's par_seq_tasks structure for work claiming | |
2946 // for parallel rescan. See CMSParRemarkTask where this is currently used. | |
2947 // XXX Need to suitably abstract and generalize this and the next | |
2948 // method into one. | |
2949 void | |
2950 CompactibleFreeListSpace:: | |
2951 initialize_sequential_subtasks_for_rescan(int n_threads) { | |
2952 // The "size" of each task is fixed according to rescan_task_size. | |
2953 assert(n_threads > 0, "Unexpected n_threads argument"); | |
2954 const size_t task_size = rescan_task_size(); | |
2955 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size; | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2956 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect"); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2957 assert(n_tasks == 0 || |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2958 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) && |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2959 (used_region().start() + n_tasks*task_size >= used_region().end())), |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2960 "n_tasks calculation incorrect"); |
0 | 2961 SequentialSubTasksDone* pst = conc_par_seq_tasks(); |
2962 assert(!pst->valid(), "Clobbering existing data?"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2963 // Sets the condition for completion of the subtask (how many threads |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2964 // need to finish in order to be done). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2965 pst->set_n_threads(n_threads); |
0 | 2966 pst->set_n_tasks((int)n_tasks); |
2967 } | |
2968 | |
2969 // Set up the space's par_seq_tasks structure for work claiming | |
2970 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used. | |
2971 void | |
2972 CompactibleFreeListSpace:: | |
2973 initialize_sequential_subtasks_for_marking(int n_threads, | |
2974 HeapWord* low) { | |
2975 // The "size" of each task is fixed according to rescan_task_size. | |
2976 assert(n_threads > 0, "Unexpected n_threads argument"); | |
2977 const size_t task_size = marking_task_size(); | |
2978 assert(task_size > CardTableModRefBS::card_size_in_words && | |
2979 (task_size % CardTableModRefBS::card_size_in_words == 0), | |
2980 "Otherwise arithmetic below would be incorrect"); | |
2981 MemRegion span = _gen->reserved(); | |
2982 if (low != NULL) { | |
2983 if (span.contains(low)) { | |
2984 // Align low down to a card boundary so that | |
2985 // we can use block_offset_careful() on span boundaries. | |
2986 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low, | |
2987 CardTableModRefBS::card_size); | |
2988 // Clip span prefix at aligned_low | |
2989 span = span.intersection(MemRegion(aligned_low, span.end())); | |
2990 } else if (low > span.end()) { | |
2991 span = MemRegion(low, low); // Null region | |
2992 } // else use entire span | |
2993 } | |
2994 assert(span.is_empty() || | |
2995 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0), | |
2996 "span should start at a card boundary"); | |
2997 size_t n_tasks = (span.word_size() + task_size - 1)/task_size; | |
2998 assert((n_tasks == 0) == span.is_empty(), "Inconsistency"); | |
2999 assert(n_tasks == 0 || | |
3000 ((span.start() + (n_tasks - 1)*task_size < span.end()) && | |
3001 (span.start() + n_tasks*task_size >= span.end())), | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
3002 "n_tasks calculation incorrect"); |
0 | 3003 SequentialSubTasksDone* pst = conc_par_seq_tasks(); |
3004 assert(!pst->valid(), "Clobbering existing data?"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3005 // Sets the condition for completion of the subtask (how many threads |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3006 // need to finish in order to be done). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3007 pst->set_n_threads(n_threads); |
0 | 3008 pst->set_n_tasks((int)n_tasks); |
3009 } |